Skip to content
This repository has been archived by the owner on Jan 11, 2024. It is now read-only.

Topdown finality proposal and execution #310

Merged
merged 53 commits into from
Oct 25, 2023
Merged
Show file tree
Hide file tree
Changes from 16 commits
Commits
Show all changes
53 commits
Select commit Hold shift + click to select a range
5f13dbf
migrate top down crate
Oct 4, 2023
5be01f1
top down crate
Oct 4, 2023
c097c33
new ipc changes
Oct 8, 2023
84c0a7f
export proxy mod
Oct 8, 2023
1ce66f6
fix tests
Oct 8, 2023
e0e248f
update cargo
Oct 9, 2023
6d553f1
update convert
Oct 9, 2023
f84d37f
stashed
Oct 9, 2023
4f0d20e
stashed
Oct 9, 2023
f94fb64
initial impl
Oct 9, 2023
e648c2c
update cargo
Oct 9, 2023
62a46c5
lint
Oct 10, 2023
20c267d
merge with upstream
Oct 10, 2023
0672f98
update tests
Oct 10, 2023
1d75966
execute top down
Oct 10, 2023
f5d2eb7
format code
Oct 10, 2023
02ee4a7
Merge branch 'main' into topdown
adlrocha Oct 12, 2023
4d71552
fm-303: update ipc sdk deps
adlrocha Oct 12, 2023
9daa1b3
ipc-308: address review
adlrocha Oct 12, 2023
92ecf89
Merge branch 'main' into topdown
adlrocha Oct 12, 2023
368162e
ipc-308: add additional check for reorgs
adlrocha Oct 12, 2023
6cad105
ipc-308: revert additional reorg check and add todo
adlrocha Oct 12, 2023
8e5f151
detect chain reorg
Oct 13, 2023
9a1b23a
temp changes
Oct 13, 2023
880e2ee
update reorg
Oct 13, 2023
35a3a2d
fix tests
Oct 15, 2023
511650d
partial changes
Oct 15, 2023
e34371b
fix review
Oct 16, 2023
3b1d687
resolve conflicts
Oct 16, 2023
4b36abe
move get finality
Oct 16, 2023
703a85a
more logging
Oct 18, 2023
a4566cf
fix getting genesis epoch
Oct 18, 2023
8e33af7
update config
Oct 18, 2023
1face0c
more logging
Oct 18, 2023
aa30534
more logs
Oct 18, 2023
f088d50
update cargo lock
Oct 18, 2023
9fb60e4
update config
Oct 18, 2023
2c31f16
remove network name
Oct 18, 2023
20c8991
more logs
Oct 18, 2023
33f0be9
update cargo
Oct 18, 2023
df2e2d8
fix top down error
Oct 18, 2023
ffb4301
more logs
Oct 18, 2023
22082c1
fix prev hash
Oct 18, 2023
aa4b4b5
revert toolchain
Oct 18, 2023
3734664
implicit execution
Oct 18, 2023
dba7183
remove context
Oct 18, 2023
dc60f57
implicit execution
Oct 18, 2023
bbb307a
more logging
Oct 18, 2023
f03613b
more logging
Oct 18, 2023
9880805
merge with main
Oct 23, 2023
6743aec
address review
adlrocha Oct 24, 2023
c95fa24
Fix topdown (#321)
cryptoAtwill Oct 25, 2023
4b7e311
merge with main
Oct 25, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
884 changes: 596 additions & 288 deletions Cargo.lock

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ tendermint-proto = { version = "0.31" }
ipc-sdk = { git = "https://github.com/consensus-shipyard/ipc-agent.git", branch = "dev" }
ipc_ipld_resolver = { git = "https://github.com/consensus-shipyard/ipc-ipld-resolver.git", branch = "main" }
ipc_actors_abis = { git = "https://github.com/consensus-shipyard/ipc-solidity-actors.git", branch = "dev" }
ipc-provider = { git = "https://github.com/consensus-shipyard/ipc-agent.git", branch = "dev" }

[patch.crates-io]
# Use stable-only features.
Expand Down
2 changes: 2 additions & 0 deletions fendermint/app/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -48,13 +48,15 @@ fendermint_vm_genesis = { path = "../vm/genesis" }
fendermint_vm_interpreter = { path = "../vm/interpreter", features = ["bundle"] }
fendermint_vm_message = { path = "../vm/message" }
fendermint_vm_resolver = { path = "../vm/resolver" }
fendermint_vm_topdown = { path = "../vm/topdown" }

fvm = { workspace = true }
fvm_ipld_blockstore = { workspace = true }
fvm_ipld_car = { workspace = true }
fvm_ipld_encoding = { workspace = true }
fvm_shared = { workspace = true }
ipc-sdk = { workspace = true }
ipc-provider = { workspace = true }
ipc_ipld_resolver = { workspace = true }

[dev-dependencies]
Expand Down
5 changes: 5 additions & 0 deletions fendermint/app/config/default.toml
Original file line number Diff line number Diff line change
Expand Up @@ -168,3 +168,8 @@ event_buffer_capacity = 100
rate_limit_bytes = 0
# Length of the time period at which the consumption limit fills. 0 means no limit.
rate_limit_period = 0

# IPC related configuration parameters
[ipc]
subnet_id = "/r0"
network_name = "dev"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wouldn't you need to add here the settings for top-down too?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What is a networ_name? IIRC @dnkolegov said it's the same as the subnet ID.
Sorry I can't keep it in my head 😞

2 changes: 2 additions & 0 deletions fendermint/app/settings/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,5 +21,7 @@ tendermint-rpc = { workspace = true }
fvm_shared = { workspace = true }
fvm_ipld_encoding = { workspace = true }
ipc-sdk = { workspace = true }
ipc-provider = { workspace = true }

fendermint_vm_encoding = { path = "../../vm/encoding" }
fendermint_vm_topdown = { path = "../../vm/topdown" }
53 changes: 52 additions & 1 deletion fendermint/app/settings/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,19 +1,23 @@
// Copyright 2022-2023 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT

use anyhow::Context;
use anyhow::{anyhow, Context};
use config::{Config, ConfigError, Environment, File};
use fvm_shared::address::Address;
use fvm_shared::econ::TokenAmount;
use ipc_sdk::subnet_id::SubnetID;
use serde::Deserialize;
use serde_with::serde_as;
use std::path::{Path, PathBuf};
use tendermint_rpc::Url;

use fendermint_vm_encoding::{human_readable_delegate, human_readable_str};
use fendermint_vm_topdown::BlockHeight;

use self::eth::EthSettings;
use self::fvm::FvmSettings;
use self::resolver::ResolverSettings;
use ipc_provider::config::deserialize::deserialize_eth_address_from_str;

pub mod eth;
pub mod fvm;
Expand Down Expand Up @@ -93,6 +97,52 @@ pub struct BroadcastSettings {
pub max_retries: u8,
}

#[derive(Debug, Deserialize, Clone)]
pub struct TopDownConfig {
/// The number of blocks to delay before reporting a height as final on the parent chain.
/// To propose a certain number of epochs delayed from the latest height, we see to be
/// conservative and avoid other from rejecting the proposal because they don't see the
/// height as final yet.
pub chain_head_delay: BlockHeight,
/// Parent syncing cron period, in seconds
pub polling_interval_secs: u64,
aakoshh marked this conversation as resolved.
Show resolved Hide resolved
/// Top down exponential back off retry base
pub exponential_back_off_secs: u64,
/// The max number of retries for exponential backoff before giving up
pub exponential_retry_limit: usize,
/// The ipc provider url
pub ipc_provider_url: String,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should be the endpoint for the parent, right? Not for the whole ipc provider. This is confusing.

Suggested change
pub ipc_provider_url: String,
pub parent_endpoint: String,

/// The parent registry address
#[serde(deserialize_with = "deserialize_eth_address_from_str")]
pub registry_address: Address,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe it i a good idea to make it more explicit?

Suggested change
pub registry_address: Address,
pub parent_registry: Address,

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The gateway and the registry do not have stable addresses on Lotus, right?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Correct, that is why we need to offer a config for that (for L2+ subnets this will be easier as we can point to the deterministic address by Fendermint)

/// The parent gateway address
#[serde(deserialize_with = "deserialize_eth_address_from_str")]
pub gateway_address: Address,
aakoshh marked this conversation as resolved.
Show resolved Hide resolved
}

#[serde_as]
#[derive(Debug, Deserialize, Clone)]
pub struct IPCSettings {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
pub struct IPCSettings {
pub struct IpcSettings {

So that we match existing capitalization here and [here](https://github.com/consensus-shipyard/fendermint/blob/main/fendermint/vm/genesis/src/lib.rs#L152] for example.

#[serde_as(as = "IsHumanReadable")]
pub subnet_id: SubnetID,
pub network_name: String,
/// The config for top down checkpoint. It's None if subnet id is root or not activating
/// any top down checkpoint related operations
pub topdown: Option<TopDownConfig>,
}

impl IPCSettings {
pub fn is_topdown_enabled(&self) -> bool {
!self.subnet_id.is_root() && self.topdown.is_some()
}

pub fn topdown_config(&self) -> anyhow::Result<&TopDownConfig> {
self.topdown
.as_ref()
.ok_or_else(|| anyhow!("top down config missing"))
}
}

#[derive(Debug, Deserialize, Clone)]
pub struct Settings {
/// Home directory configured on the CLI, to which all paths in settings can be set relative.
Expand All @@ -115,6 +165,7 @@ pub struct Settings {
pub fvm: FvmSettings,
pub resolver: ResolverSettings,
pub broadcast: BroadcastSettings,
pub ipc: IPCSettings,
}

#[macro_export]
Expand Down
75 changes: 67 additions & 8 deletions fendermint/app/src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,9 @@ use fendermint_vm_genesis::Validator;
use fendermint_vm_interpreter::bytes::{
BytesMessageApplyRes, BytesMessageCheckRes, BytesMessageQuery, BytesMessageQueryRes,
};
use fendermint_vm_interpreter::chain::{ChainMessageApplyRet, CheckpointPool, IllegalMessage};
use fendermint_vm_interpreter::chain::{
ChainMessageApplyRet, CheckpointPool, IllegalMessage, TopDownFinalityProvider,
};
use fendermint_vm_interpreter::fvm::state::{
empty_state_tree, CheckStateRef, FvmExecState, FvmGenesisState, FvmQueryState, FvmStateParams,
};
Expand All @@ -31,6 +33,7 @@ use fendermint_vm_message::query::FvmQueryHeight;
use fvm::engine::MultiEngine;
use fvm_ipld_blockstore::Blockstore;
use fvm_shared::chainid::ChainID;
use fvm_shared::clock::ChainEpoch;
use fvm_shared::econ::TokenAmount;
use fvm_shared::version::NetworkVersion;
use num_traits::Zero;
Expand Down Expand Up @@ -136,6 +139,8 @@ where
interpreter: Arc<I>,
/// CID resolution pool.
resolve_pool: CheckpointPool,
/// The parent finality provider for top down checkpoint
parent_finality_provider: TopDownFinalityProvider,
/// State accumulating changes during block execution.
exec_state: Arc<Mutex<Option<FvmExecState<SS>>>>,
/// Projected (partial) state accumulating during transaction checks.
Expand All @@ -162,6 +167,7 @@ where
state_store: SS,
interpreter: I,
resolve_pool: CheckpointPool,
parent_finality_provider: TopDownFinalityProvider,
) -> Result<Self> {
let app = Self {
db: Arc::new(db),
Expand All @@ -173,6 +179,7 @@ where
state_hist_size: config.state_hist_size,
interpreter: Arc::new(interpreter),
resolve_pool,
parent_finality_provider,
exec_state: Arc::new(Mutex::new(None)),
check_state: Arc::new(tokio::sync::Mutex::new(None)),
};
Expand Down Expand Up @@ -276,15 +283,55 @@ where
/// Take the execution state, update it, put it back, return the output.
async fn modify_exec_state<T, F, R>(&self, f: F) -> Result<T>
where
F: FnOnce((CheckpointPool, FvmExecState<SS>)) -> R,
R: Future<Output = Result<((CheckpointPool, FvmExecState<SS>), T)>>,
F: FnOnce((CheckpointPool, TopDownFinalityProvider, FvmExecState<SS>)) -> R,
R: Future<
Output = Result<(
(CheckpointPool, TopDownFinalityProvider, FvmExecState<SS>),
T,
)>,
>,
{
let state = self.take_exec_state();
let ((_pool, state), ret) = f((self.resolve_pool.clone(), state)).await?;
let ((_pool, _provider, state), ret) = f((
self.resolve_pool.clone(),
self.parent_finality_provider.clone(),
state,
))
.await?;
self.put_exec_state(state);
Ok(ret)
}

/// Get a read only fvm execution state. This is useful to perform query commands targeting
/// the latest state.
pub fn new_read_only_exec_state(
adlrocha marked this conversation as resolved.
Show resolved Hide resolved
&self,
) -> Result<Option<FvmExecState<ReadOnlyBlockstore<Arc<SS>>>>> {
let maybe_app_state = self.get_committed_state()?;

Ok(if let Some(app_state) = maybe_app_state {
let block_height = app_state.block_height;
let state_params = app_state.state_params;

// wait for block production
if block_height == 0 {
return Ok(None);
}

let exec_state = FvmExecState::new(
ReadOnlyBlockstore::new(self.state_store.clone()),
self.multi_engine.as_ref(),
block_height as ChainEpoch,
state_params,
)
.context("error creating execution state")?;

Some(exec_state)
} else {
None
})
}

/// Look up a past state at a particular height Tendermint Core is looking for.
///
/// A height of zero means we are looking for the latest state.
Expand Down Expand Up @@ -334,9 +381,9 @@ where
Genesis = Vec<u8>,
Output = FvmGenesisOutput,
>,
I: ProposalInterpreter<State = CheckpointPool, Message = Vec<u8>>,
I: ProposalInterpreter<State = (CheckpointPool, TopDownFinalityProvider), Message = Vec<u8>>,
I: ExecInterpreter<
State = (CheckpointPool, FvmExecState<SS>),
State = (CheckpointPool, TopDownFinalityProvider, FvmExecState<SS>),
Message = Vec<u8>,
BeginOutput = FvmApplyRet,
DeliverOutput = BytesMessageApplyRes,
Expand Down Expand Up @@ -531,7 +578,13 @@ where

let txs = self
.interpreter
.prepare(self.resolve_pool.clone(), txs)
.prepare(
(
self.resolve_pool.clone(),
self.parent_finality_provider.clone(),
),
txs,
)
.await
.context("failed to prepare proposal")?;

Expand All @@ -550,7 +603,13 @@ where

let accept = self
.interpreter
.process(self.resolve_pool.clone(), txs)
.process(
(
self.resolve_pool.clone(),
self.parent_finality_provider.clone(),
),
txs,
)
.await
.context("failed to process proposal")?;

Expand Down
71 changes: 70 additions & 1 deletion fendermint/app/src/cmd/run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

use anyhow::{anyhow, bail, Context};
use fendermint_abci::ApplicationService;
use fendermint_app::{App, AppConfig, AppStore, BitswapBlockstore};
use fendermint_app::{App, AppConfig, AppParentFinalityQuery, AppStore, BitswapBlockstore};
use fendermint_app_settings::AccountKind;
use fendermint_crypto::SecretKey;
use fendermint_rocksdb::{blockstore::NamespaceBlockstore, namespaces, RocksDb, RocksDbConfig};
Expand All @@ -15,14 +15,45 @@ use fendermint_vm_interpreter::{
signed::SignedMessageInterpreter,
};
use fendermint_vm_resolver::ipld::IpldResolver;
use fendermint_vm_topdown::proxy::IPCProviderProxy;
use fendermint_vm_topdown::sync::launch_polling_syncer;
use fendermint_vm_topdown::{CachedFinalityProvider, Toggle};
use fvm_shared::address::Address;
use ipc_provider::config::subnet::{EVMSubnet, SubnetConfig};
use ipc_provider::IpcProvider;
use libp2p::identity::secp256k1;
use libp2p::identity::Keypair;
use std::sync::Arc;
use tracing::info;

use crate::cmd::key::read_secret_key;
use crate::{cmd, options::run::RunArgs, settings::Settings};

fn create_ipc_provider_proxy(settings: &Settings) -> anyhow::Result<IPCProviderProxy> {
let topdown_config = settings.ipc.topdown_config()?;

let url = topdown_config
.ipc_provider_url
.parse()
.context("invalid agent URL")?;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So, no more agent?

It would be nice to move this parsing into the settings and use a Url type instead of String, to catch problems early. There is a type in tendermint_rpc we already use here, perhaps it can do the trick.


let ipc_provider = IpcProvider::new_with_subnet(
None,
ipc_provider::config::Subnet {
id: settings.ipc.subnet_id.clone(),
network_name: settings.ipc.network_name.clone(),
config: SubnetConfig::Fevm(EVMSubnet {
provider_http: url,
auth_token: None,
registry_addr: topdown_config.registry_address,
gateway_addr: topdown_config.gateway_address,
accounts: vec![],
}),
},
)?;
IPCProviderProxy::new(ipc_provider, settings.ipc.subnet_id.clone())
}

cmd! {
RunArgs(self, settings) {
run(settings).await
Expand Down Expand Up @@ -123,6 +154,26 @@ async fn run(settings: Settings) -> anyhow::Result<()> {
tracing::info!("IPLD Resolver disabled.")
}

let (parent_finality_provider, ipc_tuple) = if settings.ipc.is_topdown_enabled() {
info!("topdown finality enabled");
let topdown_config = settings.ipc.topdown_config()?;
let config = fendermint_vm_topdown::Config {
chain_head_delay: topdown_config.chain_head_delay,
polling_interval_secs: topdown_config.polling_interval_secs,
exponential_back_off_secs: topdown_config.exponential_back_off_secs,
exponential_retry_limit: topdown_config.exponential_retry_limit,
};
let agent_proxy = Arc::new(create_ipc_provider_proxy(&settings)?);
aakoshh marked this conversation as resolved.
Show resolved Hide resolved
let p = Arc::new(Toggle::enabled(CachedFinalityProvider::uninitialized(
config.clone(),
agent_proxy.clone(),
)));
(p, Some((agent_proxy, config)))
} else {
info!("topdown finality disabled");
(Arc::new(Toggle::disabled()), None)
};

let app: App<_, _, AppStore, _> = App::new(
AppConfig {
app_namespace: ns.app,
Expand All @@ -134,8 +185,26 @@ async fn run(settings: Settings) -> anyhow::Result<()> {
state_store,
interpreter,
resolve_pool,
parent_finality_provider.clone(),
)?;

if let Some((agent_proxy, config)) = ipc_tuple {
let app_parent_finality_query = AppParentFinalityQuery::new(app.clone());
tokio::spawn(async move {
match launch_polling_syncer(
app_parent_finality_query,
config,
parent_finality_provider,
agent_proxy,
)
.await
{
Ok(_) => {}
Err(e) => tracing::error!("cannot launch polling syncer: {e}"),
}
});
}

let service = ApplicationService(app);

// Split it into components.
Expand Down
Loading
Loading