diff --git a/.env b/.env index 128a6994d7..6930223567 100644 --- a/.env +++ b/.env @@ -35,6 +35,7 @@ ESPRESSO_SEQUENCER_MAX_CONNECTIONS=25 ESPRESSO_SEQUENCER_STORAGE_PATH=/store/sequencer ESPRESSO_SEQUENCER_GENESIS_FILE=/genesis/demo.toml ESPRESSO_SEQUENCER_L1_PORT=8545 +ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL=100ms ESPRESSO_SEQUENCER_L1_WS_PORT=8546 ESPRESSO_SEQUENCER_L1_PROVIDER=http://demo-l1-network:${ESPRESSO_SEQUENCER_L1_PORT} ESPRESSO_SEQUENCER_L1_WS_PROVIDER=ws://demo-l1-network:${ESPRESSO_SEQUENCER_L1_WS_PORT} @@ -57,9 +58,10 @@ ESPRESSO_BUILDER_ETH_ACCOUNT_INDEX=8 ESPRESSO_DEPLOYER_ACCOUNT_INDEX=9 # Contracts -ESPRESSO_SEQUENCER_LIGHT_CLIENT_PROXY_ADDRESS=0x0c8e79f3534b00d9a3d4a856b665bf4ebc22f2ba +ESPRESSO_SEQUENCER_LIGHT_CLIENT_PROXY_ADDRESS=0xf7cd8fa9b94db2aa972023b379c7f72c65e4de9d ESPRESSO_SEQUENCER_LIGHTCLIENT_ADDRESS=$ESPRESSO_SEQUENCER_LIGHT_CLIENT_PROXY_ADDRESS ESPRESSO_SEQUENCER_PERMISSIONED_PROVER=0x14dc79964da2c08b23698b3d3cc7ca32193d9955 +SPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS=0x8ce361602b935680e8dec218b820ff5056beb7af # Example sequencer demo private keys ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_0=BLS_SIGNING_KEY~lNDh4Pn-pTAyzyprOAFdXHwhrKhEwqwtMtkD3CZF4x3o @@ -149,5 +151,5 @@ INTEGRATION_TEST_PROTO=http # `03`, marketplace upgrade will be tested. INTEGRATION_TEST_SEQUENCER_VERSION=02 -# max database connections -ESPRESSO_SEQUENCER_DATABASE_MAX_CONNECTIONS=25 \ No newline at end of file +# max database connections +ESPRESSO_SEQUENCER_DATABASE_MAX_CONNECTIONS=25 diff --git a/.typos.toml b/.typos.toml index 48ac32a608..01a0775fb3 100644 --- a/.typos.toml +++ b/.typos.toml @@ -1,7 +1,9 @@ [files] extend-exclude = [ + "data/initial_stake_table.toml", ".env", "*.json", + "**/*.pdf", "doc/*.svg", "doc/*.puml", "contracts/lib", diff --git a/Cargo.lock b/Cargo.lock index 7276550596..ef86322713 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2834,15 +2834,17 @@ dependencies = [ "committable", "contract-bindings", "derive_more 1.0.0", - "dyn-clone", + "diff-test-bn254", "ethers", "fluent-asserter", "futures", "hotshot", + "hotshot-contract-adapter", "hotshot-query-service", "hotshot-types", "itertools 0.12.1", "jf-merkle-tree", + "jf-signature 0.2.0", "jf-utils", "jf-vid", "lru 0.12.5", @@ -4082,11 +4084,14 @@ version = "0.1.0" dependencies = [ "anyhow", "ark-bn254", + "ark-ec", + "ark-ed-on-bn254", "ark-ff", "ark-poly", "ark-serialize", "ark-std", "contract-bindings", + "derive_more 1.0.0", "diff-test-bn254", "ethers", "hotshot-types", @@ -4096,6 +4101,7 @@ dependencies = [ "libp2p", "num-bigint", "num-traits", + "serde", ] [[package]] @@ -7413,6 +7419,17 @@ dependencies = [ "uint", ] +[[package]] +name = "priority-queue" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "714c75db297bc88a63783ffc6ab9f830698a6705aa0201416931759ef4c8183d" +dependencies = [ + "autocfg", + "equivalent", + "indexmap 2.6.0", +] + [[package]] name = "proc-macro-crate" version = "3.2.0" @@ -8595,7 +8612,7 @@ dependencies = [ "anyhow", "ark-ff", "ark-serialize", - "async-broadcast", + "async-channel 2.3.1", "async-lock 3.4.0", "async-once-cell", "async-trait", @@ -8610,7 +8627,6 @@ dependencies = [ "derivative", "derive_more 1.0.0", "dotenvy", - "dyn-clone", "escargot", "espresso-macros", "espresso-types", @@ -8642,6 +8658,7 @@ dependencies = [ "parking_lot", "portpicker", "pretty_assertions", + "priority-queue", "rand 0.8.5", "rand_chacha 0.3.1", "rand_distr", @@ -8686,6 +8703,7 @@ dependencies = [ "futures", "hotshot", "hotshot-contract-adapter", + "hotshot-types", "log-panics", "portpicker", "reqwest 0.11.27", @@ -8694,6 +8712,7 @@ dependencies = [ "surf", "tempfile", "tokio", + "toml 0.8.19", "tracing", "url", ] diff --git a/Cargo.toml b/Cargo.toml index 011701805d..630d32d085 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,6 +40,7 @@ ark-poly = "0.4" ark-serialize = "0.4" ark-srs = "0.3.1" async-broadcast = "0.7.0" +async-channel = "2" async-lock = "3" async-once-cell = "0.5" async-trait = "0.1" @@ -54,7 +55,6 @@ cld = "0.5" derive_more = { version = "1.0", features = ["full"] } es-version = { git = "https://github.com/EspressoSystems/es-version.git", branch = "main" } dotenvy = "0.15" -dyn-clone = "1.0" ethers = { version = "2.0", features = ["solc", "ws"] } futures = "0.3" tokio = { version = "1", default-features = false, features = [ @@ -125,6 +125,7 @@ thiserror = "1.0.69" tracing = "0.1" bytesize = "1.3" itertools = "0.12" +priority-queue = "2" rand_chacha = "0.3" rand_distr = "0.4" reqwest = "0.12" diff --git a/builder/src/non_permissioned.rs b/builder/src/non_permissioned.rs index ab33c1fb35..e07509d6d9 100644 --- a/builder/src/non_permissioned.rs +++ b/builder/src/non_permissioned.rs @@ -19,6 +19,7 @@ use hotshot_types::{ data::{fake_commitment, ViewNumber}, traits::{ block_contents::{vid_commitment, GENESIS_VID_NUM_STORAGE_NODES}, + metrics::NoMetrics, node_implementation::Versions, EncodeBytes, }, @@ -53,6 +54,7 @@ pub async fn build_instance_state( Arc::new(StatePeers::::from_urls( state_peers, Default::default(), + &NoMetrics, )), V::Base::VERSION, ); diff --git a/contract-bindings/src/lib.rs b/contract-bindings/src/lib.rs index cc6716e894..ee76dcf578 100644 --- a/contract-bindings/src/lib.rs +++ b/contract-bindings/src/lib.rs @@ -10,6 +10,7 @@ pub mod light_client; pub mod light_client_mock; pub mod light_client_state_update_vk; pub mod light_client_state_update_vk_mock; +pub mod permissioned_stake_table; pub mod plonk_verifier; pub mod plonk_verifier_2; pub mod shared_types; diff --git a/contract-bindings/src/permissioned_stake_table.rs b/contract-bindings/src/permissioned_stake_table.rs new file mode 100644 index 0000000000..416ad97bdf --- /dev/null +++ b/contract-bindings/src/permissioned_stake_table.rs @@ -0,0 +1,1169 @@ +pub use permissioned_stake_table::*; +/// This module was auto-generated with ethers-rs Abigen. +/// More information at: +#[allow( + clippy::enum_variant_names, + clippy::too_many_arguments, + clippy::upper_case_acronyms, + clippy::type_complexity, + dead_code, + non_camel_case_types +)] +pub mod permissioned_stake_table { + #[allow(deprecated)] + fn __abi() -> ::ethers::core::abi::Abi { + ::ethers::core::abi::ethabi::Contract { + constructor: ::core::option::Option::Some(::ethers::core::abi::ethabi::Constructor { + inputs: ::std::vec![::ethers::core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("initialStakers"), + kind: ::ethers::core::abi::ethabi::ParamType::Array(::std::boxed::Box::new( + ::ethers::core::abi::ethabi::ParamType::Tuple(::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Tuple(::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ],), + ::ethers::core::abi::ethabi::ParamType::Tuple(::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ],), + ::ethers::core::abi::ethabi::ParamType::Bool, + ],), + ),), + internal_type: ::core::option::Option::Some(::std::borrow::ToOwned::to_owned( + "struct PermissionedStakeTable.NodeInfo[]", + ),), + },], + }), + functions: ::core::convert::From::from([ + ( + ::std::borrow::ToOwned::to_owned("_hashBlsKey"), + ::std::vec![::ethers::core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("_hashBlsKey"), + inputs: ::std::vec![::ethers::core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("blsVK"), + kind: ::ethers::core::abi::ethabi::ParamType::Tuple(::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ],), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("struct BN254.G2Point"), + ), + },], + outputs: ::std::vec![::ethers::core::abi::ethabi::Param { + name: ::std::string::String::new(), + kind: ::ethers::core::abi::ethabi::ParamType::FixedBytes(32usize,), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("bytes32"), + ), + },], + constant: ::core::option::Option::None, + state_mutability: ::ethers::core::abi::ethabi::StateMutability::Pure, + },], + ), + ( + ::std::borrow::ToOwned::to_owned("isStaker"), + ::std::vec![::ethers::core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("isStaker"), + inputs: ::std::vec![::ethers::core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("staker"), + kind: ::ethers::core::abi::ethabi::ParamType::Tuple(::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ],), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("struct BN254.G2Point"), + ), + },], + outputs: ::std::vec![::ethers::core::abi::ethabi::Param { + name: ::std::string::String::new(), + kind: ::ethers::core::abi::ethabi::ParamType::Bool, + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("bool"), + ), + },], + constant: ::core::option::Option::None, + state_mutability: ::ethers::core::abi::ethabi::StateMutability::View, + },], + ), + ( + ::std::borrow::ToOwned::to_owned("owner"), + ::std::vec![::ethers::core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("owner"), + inputs: ::std::vec![], + outputs: ::std::vec![::ethers::core::abi::ethabi::Param { + name: ::std::string::String::new(), + kind: ::ethers::core::abi::ethabi::ParamType::Address, + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("address"), + ), + },], + constant: ::core::option::Option::None, + state_mutability: ::ethers::core::abi::ethabi::StateMutability::View, + },], + ), + ( + ::std::borrow::ToOwned::to_owned("renounceOwnership"), + ::std::vec![::ethers::core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("renounceOwnership"), + inputs: ::std::vec![], + outputs: ::std::vec![], + constant: ::core::option::Option::None, + state_mutability: ::ethers::core::abi::ethabi::StateMutability::NonPayable, + },], + ), + ( + ::std::borrow::ToOwned::to_owned("transferOwnership"), + ::std::vec![::ethers::core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("transferOwnership"), + inputs: ::std::vec![::ethers::core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("newOwner"), + kind: ::ethers::core::abi::ethabi::ParamType::Address, + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("address"), + ), + },], + outputs: ::std::vec![], + constant: ::core::option::Option::None, + state_mutability: ::ethers::core::abi::ethabi::StateMutability::NonPayable, + },], + ), + ( + ::std::borrow::ToOwned::to_owned("update"), + ::std::vec![::ethers::core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("update"), + inputs: ::std::vec![ + ::ethers::core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("stakersToRemove"), + kind: ::ethers::core::abi::ethabi::ParamType::Array( + ::std::boxed::Box::new( + ::ethers::core::abi::ethabi::ParamType::Tuple(::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Tuple( + ::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ], + ), + ::ethers::core::abi::ethabi::ParamType::Tuple( + ::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ], + ), + ::ethers::core::abi::ethabi::ParamType::Bool, + ],), + ), + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned( + "struct PermissionedStakeTable.NodeInfo[]", + ), + ), + }, + ::ethers::core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("newStakers"), + kind: ::ethers::core::abi::ethabi::ParamType::Array( + ::std::boxed::Box::new( + ::ethers::core::abi::ethabi::ParamType::Tuple(::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Tuple( + ::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ], + ), + ::ethers::core::abi::ethabi::ParamType::Tuple( + ::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ], + ), + ::ethers::core::abi::ethabi::ParamType::Bool, + ],), + ), + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned( + "struct PermissionedStakeTable.NodeInfo[]", + ), + ), + }, + ], + outputs: ::std::vec![], + constant: ::core::option::Option::None, + state_mutability: ::ethers::core::abi::ethabi::StateMutability::NonPayable, + },], + ), + ]), + events: ::core::convert::From::from([ + ( + ::std::borrow::ToOwned::to_owned("OwnershipTransferred"), + ::std::vec![::ethers::core::abi::ethabi::Event { + name: ::std::borrow::ToOwned::to_owned("OwnershipTransferred",), + inputs: ::std::vec![ + ::ethers::core::abi::ethabi::EventParam { + name: ::std::borrow::ToOwned::to_owned("previousOwner"), + kind: ::ethers::core::abi::ethabi::ParamType::Address, + indexed: true, + }, + ::ethers::core::abi::ethabi::EventParam { + name: ::std::borrow::ToOwned::to_owned("newOwner"), + kind: ::ethers::core::abi::ethabi::ParamType::Address, + indexed: true, + }, + ], + anonymous: false, + },], + ), + ( + ::std::borrow::ToOwned::to_owned("StakersUpdated"), + ::std::vec![::ethers::core::abi::ethabi::Event { + name: ::std::borrow::ToOwned::to_owned("StakersUpdated"), + inputs: ::std::vec![ + ::ethers::core::abi::ethabi::EventParam { + name: ::std::borrow::ToOwned::to_owned("removed"), + kind: ::ethers::core::abi::ethabi::ParamType::Array( + ::std::boxed::Box::new( + ::ethers::core::abi::ethabi::ParamType::Tuple(::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Tuple( + ::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ], + ), + ::ethers::core::abi::ethabi::ParamType::Tuple( + ::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ], + ), + ::ethers::core::abi::ethabi::ParamType::Bool, + ],), + ), + ), + indexed: false, + }, + ::ethers::core::abi::ethabi::EventParam { + name: ::std::borrow::ToOwned::to_owned("added"), + kind: ::ethers::core::abi::ethabi::ParamType::Array( + ::std::boxed::Box::new( + ::ethers::core::abi::ethabi::ParamType::Tuple(::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Tuple( + ::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ], + ), + ::ethers::core::abi::ethabi::ParamType::Tuple( + ::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ::ethers::core::abi::ethabi::ParamType::Uint( + 256usize + ), + ], + ), + ::ethers::core::abi::ethabi::ParamType::Bool, + ],), + ), + ), + indexed: false, + }, + ], + anonymous: false, + },], + ), + ]), + errors: ::core::convert::From::from([ + ( + ::std::borrow::ToOwned::to_owned("OwnableInvalidOwner"), + ::std::vec![::ethers::core::abi::ethabi::AbiError { + name: ::std::borrow::ToOwned::to_owned("OwnableInvalidOwner",), + inputs: ::std::vec![::ethers::core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("owner"), + kind: ::ethers::core::abi::ethabi::ParamType::Address, + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("address"), + ), + },], + },], + ), + ( + ::std::borrow::ToOwned::to_owned("OwnableUnauthorizedAccount"), + ::std::vec![::ethers::core::abi::ethabi::AbiError { + name: ::std::borrow::ToOwned::to_owned("OwnableUnauthorizedAccount",), + inputs: ::std::vec![::ethers::core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("account"), + kind: ::ethers::core::abi::ethabi::ParamType::Address, + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("address"), + ), + },], + },], + ), + ( + ::std::borrow::ToOwned::to_owned("StakerAlreadyExists"), + ::std::vec![::ethers::core::abi::ethabi::AbiError { + name: ::std::borrow::ToOwned::to_owned("StakerAlreadyExists",), + inputs: ::std::vec![::ethers::core::abi::ethabi::Param { + name: ::std::string::String::new(), + kind: ::ethers::core::abi::ethabi::ParamType::Tuple(::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ],), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("struct BN254.G2Point"), + ), + },], + },], + ), + ( + ::std::borrow::ToOwned::to_owned("StakerNotFound"), + ::std::vec![::ethers::core::abi::ethabi::AbiError { + name: ::std::borrow::ToOwned::to_owned("StakerNotFound"), + inputs: ::std::vec![::ethers::core::abi::ethabi::Param { + name: ::std::string::String::new(), + kind: ::ethers::core::abi::ethabi::ParamType::Tuple(::std::vec![ + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ::ethers::core::abi::ethabi::ParamType::Uint(256usize), + ],), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("struct BN254.G2Point"), + ), + },], + },], + ), + ]), + receive: false, + fallback: false, + } + } + ///The parsed JSON ABI of the contract. + pub static PERMISSIONEDSTAKETABLE_ABI: ::ethers::contract::Lazy<::ethers::core::abi::Abi> = + ::ethers::contract::Lazy::new(__abi); + #[rustfmt::skip] + const __BYTECODE: &[u8] = b"`\x80`@R4\x80\x15b\0\0\x11W`\0\x80\xFD[P`@Qb\0\x0B\xF28\x03\x80b\0\x0B\xF2\x839\x81\x01`@\x81\x90Rb\0\x004\x91b\0\x03\x03V[3\x80b\0\0\\W`@Qc\x1EO\xBD\xF7`\xE0\x1B\x81R`\0`\x04\x82\x01R`$\x01[`@Q\x80\x91\x03\x90\xFD[b\0\0g\x81b\0\0zV[Pb\0\0s\x81b\0\0\xCAV[Pb\0\x04LV[`\0\x80T`\x01`\x01`\xA0\x1B\x03\x83\x81\x16`\x01`\x01`\xA0\x1B\x03\x19\x83\x16\x81\x17\x84U`@Q\x91\x90\x92\x16\x92\x83\x91\x7F\x8B\xE0\x07\x9CS\x16Y\x14\x13D\xCD\x1F\xD0\xA4\xF2\x84\x19I\x7F\x97\"\xA3\xDA\xAF\xE3\xB4\x18okdW\xE0\x91\x90\xA3PPV[`\0[\x81Q\x81\x10\x15b\0\x01\xA1W`\0b\0\x01\n\x83\x83\x81Q\x81\x10b\0\0\xF2Wb\0\0\xF2b\0\x046V[` \x02` \x01\x01Q`\0\x01Qb\0\x01\xA5` \x1B` \x1CV[`\0\x81\x81R`\x01` R`@\x90 T\x90\x91P`\xFF\x16\x15b\0\x01\x7FW\x82\x82\x81Q\x81\x10b\0\x01:Wb\0\x01:b\0\x046V[` \x90\x81\x02\x91\x90\x91\x01\x81\x01QQ`@\x80Qc\x1B\x06\xE1A`\xE1\x1B\x81R\x82Q`\x04\x82\x01R\x92\x82\x01Q`$\x84\x01R\x81\x01Q`D\x83\x01R``\x01Q`d\x82\x01R`\x84\x01b\0\0SV[`\0\x90\x81R`\x01` \x81\x90R`@\x90\x91 \x80T`\xFF\x19\x16\x82\x17\x90U\x01b\0\0\xCDV[PPV[`\0\x81`\0\x01Q\x82` \x01Q\x83`@\x01Q\x84``\x01Q`@Q` \x01b\0\x01\xE5\x94\x93\x92\x91\x90\x93\x84R` \x84\x01\x92\x90\x92R`@\x83\x01R``\x82\x01R`\x80\x01\x90V[`@Q` \x81\x83\x03\x03\x81R\x90`@R\x80Q\x90` \x01 \x90P\x91\x90PV[cNH{q`\xE0\x1B`\0R`A`\x04R`$`\0\xFD[`@Q``\x81\x01`\x01`\x01`@\x1B\x03\x81\x11\x82\x82\x10\x17\x15b\0\x02=Wb\0\x02=b\0\x02\x02V[`@R\x90V[`@Q`\x80\x81\x01`\x01`\x01`@\x1B\x03\x81\x11\x82\x82\x10\x17\x15b\0\x02=Wb\0\x02=b\0\x02\x02V[`@Q`\x1F\x82\x01`\x1F\x19\x16\x81\x01`\x01`\x01`@\x1B\x03\x81\x11\x82\x82\x10\x17\x15b\0\x02\x93Wb\0\x02\x93b\0\x02\x02V[`@R\x91\x90PV[`\0`@\x82\x84\x03\x12\x15b\0\x02\xAEW`\0\x80\xFD[`@\x80Q\x90\x81\x01`\x01`\x01`@\x1B\x03\x81\x11\x82\x82\x10\x17\x15b\0\x02\xD3Wb\0\x02\xD3b\0\x02\x02V[`@R\x82Q\x81R` \x92\x83\x01Q\x92\x81\x01\x92\x90\x92RP\x91\x90PV[\x80Q\x80\x15\x15\x81\x14b\0\x02\xFEW`\0\x80\xFD[\x91\x90PV[`\0` \x80\x83\x85\x03\x12\x15b\0\x03\x17W`\0\x80\xFD[\x82Q`\x01`\x01`@\x1B\x03\x80\x82\x11\x15b\0\x03/W`\0\x80\xFD[\x81\x85\x01\x91P\x85`\x1F\x83\x01\x12b\0\x03DW`\0\x80\xFD[\x81Q\x81\x81\x11\x15b\0\x03YWb\0\x03Yb\0\x02\x02V[b\0\x03i\x84\x82`\x05\x1B\x01b\0\x02hV[\x81\x81R\x84\x81\x01\x92P`\xE0\x91\x82\x02\x84\x01\x85\x01\x91\x88\x83\x11\x15b\0\x03\x89W`\0\x80\xFD[\x93\x85\x01\x93[\x82\x85\x10\x15b\0\x04*W\x84\x89\x03\x81\x81\x12\x15b\0\x03\xA9W`\0\x80\x81\xFD[b\0\x03\xB3b\0\x02\x18V[`\x80\x80\x83\x12\x15b\0\x03\xC4W`\0\x80\x81\xFD[b\0\x03\xCEb\0\x02CV[\x92P\x87Q\x83R\x88\x88\x01Q\x89\x84\x01R`@\x80\x89\x01Q\x81\x85\x01R``\x80\x8A\x01Q\x81\x86\x01RP\x83\x83Rb\0\x04\x02\x8D\x83\x8B\x01b\0\x02\x9BV[\x8A\x84\x01Rb\0\x04\x14`\xC0\x8A\x01b\0\x02\xEDV[\x90\x83\x01RP\x85RP\x93\x84\x01\x93\x92\x85\x01\x92b\0\x03\x8EV[P\x97\x96PPPPPPPV[cNH{q`\xE0\x1B`\0R`2`\x04R`$`\0\xFD[a\x07\x96\x80b\0\x04\\`\09`\0\xF3\xFE`\x80`@R4\x80\x15a\0\x10W`\0\x80\xFD[P`\x046\x10a\0bW`\x005`\xE0\x1C\x80cqP\x18\xA6\x14a\0gW\x80cu\xD7\x05\xE9\x14a\0qW\x80c\x8D\xA5\xCB[\x14a\0\x99W\x80c\x9B0\xA5\xE6\x14a\0\xB4W\x80c\xA8\xA0\xEA\\\x14a\0\xD5W\x80c\xF2\xFD\xE3\x8B\x14a\0\xE8W[`\0\x80\xFD[a\0oa\0\xFBV[\0[a\0\x84a\0\x7F6`\x04a\x05\x1EV[a\x01\x0FV[`@Q\x90\x15\x15\x81R` \x01[`@Q\x80\x91\x03\x90\xF3[`\0T`@Q`\x01`\x01`\xA0\x1B\x03\x90\x91\x16\x81R` \x01a\0\x90V[a\0\xC7a\0\xC26`\x04a\x05\x1EV[a\x018V[`@Q\x90\x81R` \x01a\0\x90V[a\0oa\0\xE36`\x04a\x062V[a\x01\x94V[a\0oa\0\xF66`\x04a\x06\x96V[a\x01\xEBV[a\x01\x03a\x02.V[a\x01\r`\0a\x02[V[V[`\0`\x01`\0a\x01\x1E\x84a\x018V[\x81R` \x81\x01\x91\x90\x91R`@\x01`\0 T`\xFF\x16\x92\x91PPV[`\0\x81`\0\x01Q\x82` \x01Q\x83`@\x01Q\x84``\x01Q`@Q` \x01a\x01w\x94\x93\x92\x91\x90\x93\x84R` \x84\x01\x92\x90\x92R`@\x83\x01R``\x82\x01R`\x80\x01\x90V[`@Q` \x81\x83\x03\x03\x81R\x90`@R\x80Q\x90` \x01 \x90P\x91\x90PV[a\x01\x9Ca\x02.V[a\x01\xA5\x82a\x02\xABV[a\x01\xAE\x81a\x03qV[\x7F5\r\xA2J2\x14l\xFD\x14\xB4\xFD\x11\xFB>;t\xA3\xB9\xA0\xCC\x92\x01KEL)S\xA0\x0E\xAB\xA3=\x82\x82`@Qa\x01\xDF\x92\x91\x90a\x07EV[`@Q\x80\x91\x03\x90\xA1PPV[a\x01\xF3a\x02.V[`\x01`\x01`\xA0\x1B\x03\x81\x16a\x02\"W`@Qc\x1EO\xBD\xF7`\xE0\x1B\x81R`\0`\x04\x82\x01R`$\x01[`@Q\x80\x91\x03\x90\xFD[a\x02+\x81a\x02[V[PV[`\0T`\x01`\x01`\xA0\x1B\x03\x163\x14a\x01\rW`@Qc\x11\x8C\xDA\xA7`\xE0\x1B\x81R3`\x04\x82\x01R`$\x01a\x02\x19V[`\0\x80T`\x01`\x01`\xA0\x1B\x03\x83\x81\x16`\x01`\x01`\xA0\x1B\x03\x19\x83\x16\x81\x17\x84U`@Q\x91\x90\x92\x16\x92\x83\x91\x7F\x8B\xE0\x07\x9CS\x16Y\x14\x13D\xCD\x1F\xD0\xA4\xF2\x84\x19I\x7F\x97\"\xA3\xDA\xAF\xE3\xB4\x18okdW\xE0\x91\x90\xA3PPV[`\0[\x81Q\x81\x10\x15a\x03mW`\0a\x02\xDF\x83\x83\x81Q\x81\x10a\x02\xCEWa\x02\xCEa\x07sV[` \x02` \x01\x01Q`\0\x01Qa\x018V[`\0\x81\x81R`\x01` R`@\x90 T\x90\x91P`\xFF\x16a\x03NW\x82\x82\x81Q\x81\x10a\x03\nWa\x03\na\x07sV[` \x90\x81\x02\x91\x90\x91\x01\x81\x01QQ`@\x80Qc4\xA7V\x1F`\xE0\x1B\x81R\x82Q`\x04\x82\x01R\x92\x82\x01Q`$\x84\x01R\x81\x01Q`D\x83\x01R``\x01Q`d\x82\x01R`\x84\x01a\x02\x19V[`\0\x90\x81R`\x01` \x81\x90R`@\x90\x91 \x80T`\xFF\x19\x16\x90U\x01a\x02\xAEV[PPV[`\0[\x81Q\x81\x10\x15a\x03mW`\0a\x03\x94\x83\x83\x81Q\x81\x10a\x02\xCEWa\x02\xCEa\x07sV[`\0\x81\x81R`\x01` R`@\x90 T\x90\x91P`\xFF\x16\x15a\x04\x04W\x82\x82\x81Q\x81\x10a\x03\xC0Wa\x03\xC0a\x07sV[` \x90\x81\x02\x91\x90\x91\x01\x81\x01QQ`@\x80Qc\x1B\x06\xE1A`\xE1\x1B\x81R\x82Q`\x04\x82\x01R\x92\x82\x01Q`$\x84\x01R\x81\x01Q`D\x83\x01R``\x01Q`d\x82\x01R`\x84\x01a\x02\x19V[`\0\x90\x81R`\x01` \x81\x90R`@\x90\x91 \x80T`\xFF\x19\x16\x82\x17\x90U\x01a\x03tV[cNH{q`\xE0\x1B`\0R`A`\x04R`$`\0\xFD[`@Q``\x81\x01g\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x11\x82\x82\x10\x17\x15a\x04^Wa\x04^a\x04%V[`@R\x90V[`@\x80Q\x90\x81\x01g\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x11\x82\x82\x10\x17\x15a\x04^Wa\x04^a\x04%V[`@Q`\x1F\x82\x01`\x1F\x19\x16\x81\x01g\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x11\x82\x82\x10\x17\x15a\x04\xB0Wa\x04\xB0a\x04%V[`@R\x91\x90PV[`\0`\x80\x82\x84\x03\x12\x15a\x04\xCAW`\0\x80\xFD[`@Q`\x80\x81\x01\x81\x81\x10g\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x82\x11\x17\x15a\x04\xEDWa\x04\xEDa\x04%V[\x80`@RP\x80\x91P\x825\x81R` \x83\x015` \x82\x01R`@\x83\x015`@\x82\x01R``\x83\x015``\x82\x01RP\x92\x91PPV[`\0`\x80\x82\x84\x03\x12\x15a\x050W`\0\x80\xFD[a\x05:\x83\x83a\x04\xB8V[\x93\x92PPPV[`\0\x82`\x1F\x83\x01\x12a\x05RW`\0\x80\xFD[\x815` g\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x82\x11\x15a\x05nWa\x05na\x04%V[a\x05|\x81\x83`\x05\x1B\x01a\x04\x87V[\x82\x81R`\xE0\x92\x83\x02\x85\x01\x82\x01\x92\x82\x82\x01\x91\x90\x87\x85\x11\x15a\x05\x9BW`\0\x80\xFD[\x83\x87\x01[\x85\x81\x10\x15a\x06%W\x80\x89\x03\x82\x81\x12\x15a\x05\xB8W`\0\x80\x81\xFD[a\x05\xC0a\x04;V[a\x05\xCA\x8B\x84a\x04\xB8V[\x81R`@\x80`\x7F\x19\x84\x01\x12\x15a\x05\xE0W`\0\x80\x81\xFD[a\x05\xE8a\x04dV[`\x80\x85\x015\x81R`\xA0\x85\x015\x89\x82\x01R\x82\x89\x01R`\xC0\x84\x015\x92P\x82\x15\x15\x83\x14a\x06\x12W`\0\x80\x81\xFD[\x81\x01\x91\x90\x91R\x84R\x92\x84\x01\x92\x81\x01a\x05\x9FV[P\x90\x97\x96PPPPPPPV[`\0\x80`@\x83\x85\x03\x12\x15a\x06EW`\0\x80\xFD[\x825g\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x80\x82\x11\x15a\x06]W`\0\x80\xFD[a\x06i\x86\x83\x87\x01a\x05AV[\x93P` \x85\x015\x91P\x80\x82\x11\x15a\x06\x7FW`\0\x80\xFD[Pa\x06\x8C\x85\x82\x86\x01a\x05AV[\x91PP\x92P\x92\x90PV[`\0` \x82\x84\x03\x12\x15a\x06\xA8W`\0\x80\xFD[\x815`\x01`\x01`\xA0\x1B\x03\x81\x16\x81\x14a\x05:W`\0\x80\xFD[`\0\x81Q\x80\x84R` \x80\x85\x01\x94P` \x84\x01`\0[\x83\x81\x10\x15a\x07:W\x81Qa\x07\n\x88\x82Q\x80Q\x82R` \x81\x01Q` \x83\x01R`@\x81\x01Q`@\x83\x01R``\x81\x01Q``\x83\x01RPPV[\x80\x84\x01Q\x80Q`\x80\x8A\x01R\x84\x01Q`\xA0\x89\x01R`@\x01Q\x15\x15`\xC0\x88\x01R`\xE0\x90\x96\x01\x95\x90\x82\x01\x90`\x01\x01a\x06\xD4V[P\x94\x95\x94PPPPPV[`@\x81R`\0a\x07X`@\x83\x01\x85a\x06\xBFV[\x82\x81\x03` \x84\x01Ra\x07j\x81\x85a\x06\xBFV[\x95\x94PPPPPV[cNH{q`\xE0\x1B`\0R`2`\x04R`$`\0\xFD\xFE\xA1dsolcC\0\x08\x17\0\n"; + /// The bytecode of the contract. + pub static PERMISSIONEDSTAKETABLE_BYTECODE: ::ethers::core::types::Bytes = + ::ethers::core::types::Bytes::from_static(__BYTECODE); + #[rustfmt::skip] + const __DEPLOYED_BYTECODE: &[u8] = b"`\x80`@R4\x80\x15a\0\x10W`\0\x80\xFD[P`\x046\x10a\0bW`\x005`\xE0\x1C\x80cqP\x18\xA6\x14a\0gW\x80cu\xD7\x05\xE9\x14a\0qW\x80c\x8D\xA5\xCB[\x14a\0\x99W\x80c\x9B0\xA5\xE6\x14a\0\xB4W\x80c\xA8\xA0\xEA\\\x14a\0\xD5W\x80c\xF2\xFD\xE3\x8B\x14a\0\xE8W[`\0\x80\xFD[a\0oa\0\xFBV[\0[a\0\x84a\0\x7F6`\x04a\x05\x1EV[a\x01\x0FV[`@Q\x90\x15\x15\x81R` \x01[`@Q\x80\x91\x03\x90\xF3[`\0T`@Q`\x01`\x01`\xA0\x1B\x03\x90\x91\x16\x81R` \x01a\0\x90V[a\0\xC7a\0\xC26`\x04a\x05\x1EV[a\x018V[`@Q\x90\x81R` \x01a\0\x90V[a\0oa\0\xE36`\x04a\x062V[a\x01\x94V[a\0oa\0\xF66`\x04a\x06\x96V[a\x01\xEBV[a\x01\x03a\x02.V[a\x01\r`\0a\x02[V[V[`\0`\x01`\0a\x01\x1E\x84a\x018V[\x81R` \x81\x01\x91\x90\x91R`@\x01`\0 T`\xFF\x16\x92\x91PPV[`\0\x81`\0\x01Q\x82` \x01Q\x83`@\x01Q\x84``\x01Q`@Q` \x01a\x01w\x94\x93\x92\x91\x90\x93\x84R` \x84\x01\x92\x90\x92R`@\x83\x01R``\x82\x01R`\x80\x01\x90V[`@Q` \x81\x83\x03\x03\x81R\x90`@R\x80Q\x90` \x01 \x90P\x91\x90PV[a\x01\x9Ca\x02.V[a\x01\xA5\x82a\x02\xABV[a\x01\xAE\x81a\x03qV[\x7F5\r\xA2J2\x14l\xFD\x14\xB4\xFD\x11\xFB>;t\xA3\xB9\xA0\xCC\x92\x01KEL)S\xA0\x0E\xAB\xA3=\x82\x82`@Qa\x01\xDF\x92\x91\x90a\x07EV[`@Q\x80\x91\x03\x90\xA1PPV[a\x01\xF3a\x02.V[`\x01`\x01`\xA0\x1B\x03\x81\x16a\x02\"W`@Qc\x1EO\xBD\xF7`\xE0\x1B\x81R`\0`\x04\x82\x01R`$\x01[`@Q\x80\x91\x03\x90\xFD[a\x02+\x81a\x02[V[PV[`\0T`\x01`\x01`\xA0\x1B\x03\x163\x14a\x01\rW`@Qc\x11\x8C\xDA\xA7`\xE0\x1B\x81R3`\x04\x82\x01R`$\x01a\x02\x19V[`\0\x80T`\x01`\x01`\xA0\x1B\x03\x83\x81\x16`\x01`\x01`\xA0\x1B\x03\x19\x83\x16\x81\x17\x84U`@Q\x91\x90\x92\x16\x92\x83\x91\x7F\x8B\xE0\x07\x9CS\x16Y\x14\x13D\xCD\x1F\xD0\xA4\xF2\x84\x19I\x7F\x97\"\xA3\xDA\xAF\xE3\xB4\x18okdW\xE0\x91\x90\xA3PPV[`\0[\x81Q\x81\x10\x15a\x03mW`\0a\x02\xDF\x83\x83\x81Q\x81\x10a\x02\xCEWa\x02\xCEa\x07sV[` \x02` \x01\x01Q`\0\x01Qa\x018V[`\0\x81\x81R`\x01` R`@\x90 T\x90\x91P`\xFF\x16a\x03NW\x82\x82\x81Q\x81\x10a\x03\nWa\x03\na\x07sV[` \x90\x81\x02\x91\x90\x91\x01\x81\x01QQ`@\x80Qc4\xA7V\x1F`\xE0\x1B\x81R\x82Q`\x04\x82\x01R\x92\x82\x01Q`$\x84\x01R\x81\x01Q`D\x83\x01R``\x01Q`d\x82\x01R`\x84\x01a\x02\x19V[`\0\x90\x81R`\x01` \x81\x90R`@\x90\x91 \x80T`\xFF\x19\x16\x90U\x01a\x02\xAEV[PPV[`\0[\x81Q\x81\x10\x15a\x03mW`\0a\x03\x94\x83\x83\x81Q\x81\x10a\x02\xCEWa\x02\xCEa\x07sV[`\0\x81\x81R`\x01` R`@\x90 T\x90\x91P`\xFF\x16\x15a\x04\x04W\x82\x82\x81Q\x81\x10a\x03\xC0Wa\x03\xC0a\x07sV[` \x90\x81\x02\x91\x90\x91\x01\x81\x01QQ`@\x80Qc\x1B\x06\xE1A`\xE1\x1B\x81R\x82Q`\x04\x82\x01R\x92\x82\x01Q`$\x84\x01R\x81\x01Q`D\x83\x01R``\x01Q`d\x82\x01R`\x84\x01a\x02\x19V[`\0\x90\x81R`\x01` \x81\x90R`@\x90\x91 \x80T`\xFF\x19\x16\x82\x17\x90U\x01a\x03tV[cNH{q`\xE0\x1B`\0R`A`\x04R`$`\0\xFD[`@Q``\x81\x01g\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x11\x82\x82\x10\x17\x15a\x04^Wa\x04^a\x04%V[`@R\x90V[`@\x80Q\x90\x81\x01g\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x11\x82\x82\x10\x17\x15a\x04^Wa\x04^a\x04%V[`@Q`\x1F\x82\x01`\x1F\x19\x16\x81\x01g\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x81\x11\x82\x82\x10\x17\x15a\x04\xB0Wa\x04\xB0a\x04%V[`@R\x91\x90PV[`\0`\x80\x82\x84\x03\x12\x15a\x04\xCAW`\0\x80\xFD[`@Q`\x80\x81\x01\x81\x81\x10g\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x82\x11\x17\x15a\x04\xEDWa\x04\xEDa\x04%V[\x80`@RP\x80\x91P\x825\x81R` \x83\x015` \x82\x01R`@\x83\x015`@\x82\x01R``\x83\x015``\x82\x01RP\x92\x91PPV[`\0`\x80\x82\x84\x03\x12\x15a\x050W`\0\x80\xFD[a\x05:\x83\x83a\x04\xB8V[\x93\x92PPPV[`\0\x82`\x1F\x83\x01\x12a\x05RW`\0\x80\xFD[\x815` g\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x82\x11\x15a\x05nWa\x05na\x04%V[a\x05|\x81\x83`\x05\x1B\x01a\x04\x87V[\x82\x81R`\xE0\x92\x83\x02\x85\x01\x82\x01\x92\x82\x82\x01\x91\x90\x87\x85\x11\x15a\x05\x9BW`\0\x80\xFD[\x83\x87\x01[\x85\x81\x10\x15a\x06%W\x80\x89\x03\x82\x81\x12\x15a\x05\xB8W`\0\x80\x81\xFD[a\x05\xC0a\x04;V[a\x05\xCA\x8B\x84a\x04\xB8V[\x81R`@\x80`\x7F\x19\x84\x01\x12\x15a\x05\xE0W`\0\x80\x81\xFD[a\x05\xE8a\x04dV[`\x80\x85\x015\x81R`\xA0\x85\x015\x89\x82\x01R\x82\x89\x01R`\xC0\x84\x015\x92P\x82\x15\x15\x83\x14a\x06\x12W`\0\x80\x81\xFD[\x81\x01\x91\x90\x91R\x84R\x92\x84\x01\x92\x81\x01a\x05\x9FV[P\x90\x97\x96PPPPPPPV[`\0\x80`@\x83\x85\x03\x12\x15a\x06EW`\0\x80\xFD[\x825g\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x80\x82\x11\x15a\x06]W`\0\x80\xFD[a\x06i\x86\x83\x87\x01a\x05AV[\x93P` \x85\x015\x91P\x80\x82\x11\x15a\x06\x7FW`\0\x80\xFD[Pa\x06\x8C\x85\x82\x86\x01a\x05AV[\x91PP\x92P\x92\x90PV[`\0` \x82\x84\x03\x12\x15a\x06\xA8W`\0\x80\xFD[\x815`\x01`\x01`\xA0\x1B\x03\x81\x16\x81\x14a\x05:W`\0\x80\xFD[`\0\x81Q\x80\x84R` \x80\x85\x01\x94P` \x84\x01`\0[\x83\x81\x10\x15a\x07:W\x81Qa\x07\n\x88\x82Q\x80Q\x82R` \x81\x01Q` \x83\x01R`@\x81\x01Q`@\x83\x01R``\x81\x01Q``\x83\x01RPPV[\x80\x84\x01Q\x80Q`\x80\x8A\x01R\x84\x01Q`\xA0\x89\x01R`@\x01Q\x15\x15`\xC0\x88\x01R`\xE0\x90\x96\x01\x95\x90\x82\x01\x90`\x01\x01a\x06\xD4V[P\x94\x95\x94PPPPPV[`@\x81R`\0a\x07X`@\x83\x01\x85a\x06\xBFV[\x82\x81\x03` \x84\x01Ra\x07j\x81\x85a\x06\xBFV[\x95\x94PPPPPV[cNH{q`\xE0\x1B`\0R`2`\x04R`$`\0\xFD\xFE\xA1dsolcC\0\x08\x17\0\n"; + /// The deployed bytecode of the contract. + pub static PERMISSIONEDSTAKETABLE_DEPLOYED_BYTECODE: ::ethers::core::types::Bytes = + ::ethers::core::types::Bytes::from_static(__DEPLOYED_BYTECODE); + pub struct PermissionedStakeTable(::ethers::contract::Contract); + impl ::core::clone::Clone for PermissionedStakeTable { + fn clone(&self) -> Self { + Self(::core::clone::Clone::clone(&self.0)) + } + } + impl ::core::ops::Deref for PermissionedStakeTable { + type Target = ::ethers::contract::Contract; + fn deref(&self) -> &Self::Target { + &self.0 + } + } + impl ::core::ops::DerefMut for PermissionedStakeTable { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } + } + impl ::core::fmt::Debug for PermissionedStakeTable { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + f.debug_tuple(::core::stringify!(PermissionedStakeTable)) + .field(&self.address()) + .finish() + } + } + impl PermissionedStakeTable { + /// Creates a new contract instance with the specified `ethers` client at + /// `address`. The contract derefs to a `ethers::Contract` object. + pub fn new>( + address: T, + client: ::std::sync::Arc, + ) -> Self { + Self(::ethers::contract::Contract::new( + address.into(), + PERMISSIONEDSTAKETABLE_ABI.clone(), + client, + )) + } + /// Constructs the general purpose `Deployer` instance based on the provided constructor arguments and sends it. + /// Returns a new instance of a deployer that returns an instance of this contract after sending the transaction + /// + /// Notes: + /// - If there are no constructor arguments, you should pass `()` as the argument. + /// - The default poll duration is 7 seconds. + /// - The default number of confirmations is 1 block. + /// + /// + /// # Example + /// + /// Generate contract bindings with `abigen!` and deploy a new contract instance. + /// + /// *Note*: this requires a `bytecode` and `abi` object in the `greeter.json` artifact. + /// + /// ```ignore + /// # async fn deploy(client: ::std::sync::Arc) { + /// abigen!(Greeter, "../greeter.json"); + /// + /// let greeter_contract = Greeter::deploy(client, "Hello world!".to_string()).unwrap().send().await.unwrap(); + /// let msg = greeter_contract.greet().call().await.unwrap(); + /// # } + /// ``` + pub fn deploy( + client: ::std::sync::Arc, + constructor_args: T, + ) -> ::core::result::Result< + ::ethers::contract::builders::ContractDeployer, + ::ethers::contract::ContractError, + > { + let factory = ::ethers::contract::ContractFactory::new( + PERMISSIONEDSTAKETABLE_ABI.clone(), + PERMISSIONEDSTAKETABLE_BYTECODE.clone().into(), + client, + ); + let deployer = factory.deploy(constructor_args)?; + let deployer = ::ethers::contract::ContractDeployer::new(deployer); + Ok(deployer) + } + ///Calls the contract's `_hashBlsKey` (0x9b30a5e6) function + pub fn hash_bls_key( + &self, + bls_vk: G2Point, + ) -> ::ethers::contract::builders::ContractCall { + self.0 + .method_hash([155, 48, 165, 230], (bls_vk,)) + .expect("method not found (this should never happen)") + } + ///Calls the contract's `isStaker` (0x75d705e9) function + pub fn is_staker( + &self, + staker: G2Point, + ) -> ::ethers::contract::builders::ContractCall { + self.0 + .method_hash([117, 215, 5, 233], (staker,)) + .expect("method not found (this should never happen)") + } + ///Calls the contract's `owner` (0x8da5cb5b) function + pub fn owner( + &self, + ) -> ::ethers::contract::builders::ContractCall { + self.0 + .method_hash([141, 165, 203, 91], ()) + .expect("method not found (this should never happen)") + } + ///Calls the contract's `renounceOwnership` (0x715018a6) function + pub fn renounce_ownership(&self) -> ::ethers::contract::builders::ContractCall { + self.0 + .method_hash([113, 80, 24, 166], ()) + .expect("method not found (this should never happen)") + } + ///Calls the contract's `transferOwnership` (0xf2fde38b) function + pub fn transfer_ownership( + &self, + new_owner: ::ethers::core::types::Address, + ) -> ::ethers::contract::builders::ContractCall { + self.0 + .method_hash([242, 253, 227, 139], new_owner) + .expect("method not found (this should never happen)") + } + ///Calls the contract's `update` (0xa8a0ea5c) function + pub fn update( + &self, + stakers_to_remove: ::std::vec::Vec, + new_stakers: ::std::vec::Vec, + ) -> ::ethers::contract::builders::ContractCall { + self.0 + .method_hash([168, 160, 234, 92], (stakers_to_remove, new_stakers)) + .expect("method not found (this should never happen)") + } + ///Gets the contract's `OwnershipTransferred` event + pub fn ownership_transferred_filter( + &self, + ) -> ::ethers::contract::builders::Event<::std::sync::Arc, M, OwnershipTransferredFilter> + { + self.0.event() + } + ///Gets the contract's `StakersUpdated` event + pub fn stakers_updated_filter( + &self, + ) -> ::ethers::contract::builders::Event<::std::sync::Arc, M, StakersUpdatedFilter> + { + self.0.event() + } + /// Returns an `Event` builder for all the events of this contract. + pub fn events( + &self, + ) -> ::ethers::contract::builders::Event<::std::sync::Arc, M, PermissionedStakeTableEvents> + { + self.0 + .event_with_filter(::core::default::Default::default()) + } + } + impl From<::ethers::contract::Contract> + for PermissionedStakeTable + { + fn from(contract: ::ethers::contract::Contract) -> Self { + Self::new(contract.address(), contract.client()) + } + } + ///Custom Error type `OwnableInvalidOwner` with signature `OwnableInvalidOwner(address)` and selector `0x1e4fbdf7` + #[derive( + Clone, + ::ethers::contract::EthError, + ::ethers::contract::EthDisplay, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + #[etherror(name = "OwnableInvalidOwner", abi = "OwnableInvalidOwner(address)")] + pub struct OwnableInvalidOwner { + pub owner: ::ethers::core::types::Address, + } + ///Custom Error type `OwnableUnauthorizedAccount` with signature `OwnableUnauthorizedAccount(address)` and selector `0x118cdaa7` + #[derive( + Clone, + ::ethers::contract::EthError, + ::ethers::contract::EthDisplay, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + #[etherror( + name = "OwnableUnauthorizedAccount", + abi = "OwnableUnauthorizedAccount(address)" + )] + pub struct OwnableUnauthorizedAccount { + pub account: ::ethers::core::types::Address, + } + ///Custom Error type `StakerAlreadyExists` with signature `StakerAlreadyExists((uint256,uint256,uint256,uint256))` and selector `0x360dc282` + #[derive( + Clone, + ::ethers::contract::EthError, + ::ethers::contract::EthDisplay, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + #[etherror( + name = "StakerAlreadyExists", + abi = "StakerAlreadyExists((uint256,uint256,uint256,uint256))" + )] + pub struct StakerAlreadyExists(pub G2Point); + ///Custom Error type `StakerNotFound` with signature `StakerNotFound((uint256,uint256,uint256,uint256))` and selector `0x34a7561f` + #[derive( + Clone, + ::ethers::contract::EthError, + ::ethers::contract::EthDisplay, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + #[etherror( + name = "StakerNotFound", + abi = "StakerNotFound((uint256,uint256,uint256,uint256))" + )] + pub struct StakerNotFound(pub G2Point); + ///Container type for all of the contract's custom errors + #[derive( + Clone, + ::ethers::contract::EthAbiType, + serde::Serialize, + serde::Deserialize, + Debug, + PartialEq, + Eq, + Hash, + )] + pub enum PermissionedStakeTableErrors { + OwnableInvalidOwner(OwnableInvalidOwner), + OwnableUnauthorizedAccount(OwnableUnauthorizedAccount), + StakerAlreadyExists(StakerAlreadyExists), + StakerNotFound(StakerNotFound), + /// The standard solidity revert string, with selector + /// Error(string) -- 0x08c379a0 + RevertString(::std::string::String), + } + impl ::ethers::core::abi::AbiDecode for PermissionedStakeTableErrors { + fn decode( + data: impl AsRef<[u8]>, + ) -> ::core::result::Result { + let data = data.as_ref(); + if let Ok(decoded) = + <::std::string::String as ::ethers::core::abi::AbiDecode>::decode(data) + { + return Ok(Self::RevertString(decoded)); + } + if let Ok(decoded) = + ::decode(data) + { + return Ok(Self::OwnableInvalidOwner(decoded)); + } + if let Ok(decoded) = + ::decode(data) + { + return Ok(Self::OwnableUnauthorizedAccount(decoded)); + } + if let Ok(decoded) = + ::decode(data) + { + return Ok(Self::StakerAlreadyExists(decoded)); + } + if let Ok(decoded) = ::decode(data) { + return Ok(Self::StakerNotFound(decoded)); + } + Err(::ethers::core::abi::Error::InvalidData.into()) + } + } + impl ::ethers::core::abi::AbiEncode for PermissionedStakeTableErrors { + fn encode(self) -> ::std::vec::Vec { + match self { + Self::OwnableInvalidOwner(element) => { + ::ethers::core::abi::AbiEncode::encode(element) + } + Self::OwnableUnauthorizedAccount(element) => { + ::ethers::core::abi::AbiEncode::encode(element) + } + Self::StakerAlreadyExists(element) => { + ::ethers::core::abi::AbiEncode::encode(element) + } + Self::StakerNotFound(element) => ::ethers::core::abi::AbiEncode::encode(element), + Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), + } + } + } + impl ::ethers::contract::ContractRevert for PermissionedStakeTableErrors { + fn valid_selector(selector: [u8; 4]) -> bool { + match selector { + [0x08, 0xc3, 0x79, 0xa0] => true, + _ if selector + == ::selector() => + { + true + } + _ if selector + == ::selector() => + { + true + } + _ if selector + == ::selector() => + { + true + } + _ if selector == ::selector() => { + true + } + _ => false, + } + } + } + impl ::core::fmt::Display for PermissionedStakeTableErrors { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + match self { + Self::OwnableInvalidOwner(element) => ::core::fmt::Display::fmt(element, f), + Self::OwnableUnauthorizedAccount(element) => ::core::fmt::Display::fmt(element, f), + Self::StakerAlreadyExists(element) => ::core::fmt::Display::fmt(element, f), + Self::StakerNotFound(element) => ::core::fmt::Display::fmt(element, f), + Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), + } + } + } + impl ::core::convert::From<::std::string::String> for PermissionedStakeTableErrors { + fn from(value: String) -> Self { + Self::RevertString(value) + } + } + impl ::core::convert::From for PermissionedStakeTableErrors { + fn from(value: OwnableInvalidOwner) -> Self { + Self::OwnableInvalidOwner(value) + } + } + impl ::core::convert::From for PermissionedStakeTableErrors { + fn from(value: OwnableUnauthorizedAccount) -> Self { + Self::OwnableUnauthorizedAccount(value) + } + } + impl ::core::convert::From for PermissionedStakeTableErrors { + fn from(value: StakerAlreadyExists) -> Self { + Self::StakerAlreadyExists(value) + } + } + impl ::core::convert::From for PermissionedStakeTableErrors { + fn from(value: StakerNotFound) -> Self { + Self::StakerNotFound(value) + } + } + #[derive( + Clone, + ::ethers::contract::EthEvent, + ::ethers::contract::EthDisplay, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + #[ethevent( + name = "OwnershipTransferred", + abi = "OwnershipTransferred(address,address)" + )] + pub struct OwnershipTransferredFilter { + #[ethevent(indexed)] + pub previous_owner: ::ethers::core::types::Address, + #[ethevent(indexed)] + pub new_owner: ::ethers::core::types::Address, + } + #[derive( + Clone, + ::ethers::contract::EthEvent, + ::ethers::contract::EthDisplay, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + #[ethevent( + name = "StakersUpdated", + abi = "StakersUpdated(((uint256,uint256,uint256,uint256),(uint256,uint256),bool)[],((uint256,uint256,uint256,uint256),(uint256,uint256),bool)[])" + )] + pub struct StakersUpdatedFilter { + pub removed: ::std::vec::Vec, + pub added: ::std::vec::Vec, + } + ///Container type for all of the contract's events + #[derive( + Clone, + ::ethers::contract::EthAbiType, + serde::Serialize, + serde::Deserialize, + Debug, + PartialEq, + Eq, + Hash, + )] + pub enum PermissionedStakeTableEvents { + OwnershipTransferredFilter(OwnershipTransferredFilter), + StakersUpdatedFilter(StakersUpdatedFilter), + } + impl ::ethers::contract::EthLogDecode for PermissionedStakeTableEvents { + fn decode_log( + log: &::ethers::core::abi::RawLog, + ) -> ::core::result::Result { + if let Ok(decoded) = OwnershipTransferredFilter::decode_log(log) { + return Ok(PermissionedStakeTableEvents::OwnershipTransferredFilter( + decoded, + )); + } + if let Ok(decoded) = StakersUpdatedFilter::decode_log(log) { + return Ok(PermissionedStakeTableEvents::StakersUpdatedFilter(decoded)); + } + Err(::ethers::core::abi::Error::InvalidData) + } + } + impl ::core::fmt::Display for PermissionedStakeTableEvents { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + match self { + Self::OwnershipTransferredFilter(element) => ::core::fmt::Display::fmt(element, f), + Self::StakersUpdatedFilter(element) => ::core::fmt::Display::fmt(element, f), + } + } + } + impl ::core::convert::From for PermissionedStakeTableEvents { + fn from(value: OwnershipTransferredFilter) -> Self { + Self::OwnershipTransferredFilter(value) + } + } + impl ::core::convert::From for PermissionedStakeTableEvents { + fn from(value: StakersUpdatedFilter) -> Self { + Self::StakersUpdatedFilter(value) + } + } + ///Container type for all input parameters for the `_hashBlsKey` function with signature `_hashBlsKey((uint256,uint256,uint256,uint256))` and selector `0x9b30a5e6` + #[derive( + Clone, + ::ethers::contract::EthCall, + ::ethers::contract::EthDisplay, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + #[ethcall( + name = "_hashBlsKey", + abi = "_hashBlsKey((uint256,uint256,uint256,uint256))" + )] + pub struct HashBlsKeyCall { + pub bls_vk: G2Point, + } + ///Container type for all input parameters for the `isStaker` function with signature `isStaker((uint256,uint256,uint256,uint256))` and selector `0x75d705e9` + #[derive( + Clone, + ::ethers::contract::EthCall, + ::ethers::contract::EthDisplay, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + #[ethcall(name = "isStaker", abi = "isStaker((uint256,uint256,uint256,uint256))")] + pub struct IsStakerCall { + pub staker: G2Point, + } + ///Container type for all input parameters for the `owner` function with signature `owner()` and selector `0x8da5cb5b` + #[derive( + Clone, + ::ethers::contract::EthCall, + ::ethers::contract::EthDisplay, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + #[ethcall(name = "owner", abi = "owner()")] + pub struct OwnerCall; + ///Container type for all input parameters for the `renounceOwnership` function with signature `renounceOwnership()` and selector `0x715018a6` + #[derive( + Clone, + ::ethers::contract::EthCall, + ::ethers::contract::EthDisplay, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + #[ethcall(name = "renounceOwnership", abi = "renounceOwnership()")] + pub struct RenounceOwnershipCall; + ///Container type for all input parameters for the `transferOwnership` function with signature `transferOwnership(address)` and selector `0xf2fde38b` + #[derive( + Clone, + ::ethers::contract::EthCall, + ::ethers::contract::EthDisplay, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + #[ethcall(name = "transferOwnership", abi = "transferOwnership(address)")] + pub struct TransferOwnershipCall { + pub new_owner: ::ethers::core::types::Address, + } + ///Container type for all input parameters for the `update` function with signature `update(((uint256,uint256,uint256,uint256),(uint256,uint256),bool)[],((uint256,uint256,uint256,uint256),(uint256,uint256),bool)[])` and selector `0xa8a0ea5c` + #[derive( + Clone, + ::ethers::contract::EthCall, + ::ethers::contract::EthDisplay, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + #[ethcall( + name = "update", + abi = "update(((uint256,uint256,uint256,uint256),(uint256,uint256),bool)[],((uint256,uint256,uint256,uint256),(uint256,uint256),bool)[])" + )] + pub struct UpdateCall { + pub stakers_to_remove: ::std::vec::Vec, + pub new_stakers: ::std::vec::Vec, + } + ///Container type for all of the contract's call + #[derive( + Clone, + ::ethers::contract::EthAbiType, + serde::Serialize, + serde::Deserialize, + Debug, + PartialEq, + Eq, + Hash, + )] + pub enum PermissionedStakeTableCalls { + HashBlsKey(HashBlsKeyCall), + IsStaker(IsStakerCall), + Owner(OwnerCall), + RenounceOwnership(RenounceOwnershipCall), + TransferOwnership(TransferOwnershipCall), + Update(UpdateCall), + } + impl ::ethers::core::abi::AbiDecode for PermissionedStakeTableCalls { + fn decode( + data: impl AsRef<[u8]>, + ) -> ::core::result::Result { + let data = data.as_ref(); + if let Ok(decoded) = ::decode(data) { + return Ok(Self::HashBlsKey(decoded)); + } + if let Ok(decoded) = ::decode(data) { + return Ok(Self::IsStaker(decoded)); + } + if let Ok(decoded) = ::decode(data) { + return Ok(Self::Owner(decoded)); + } + if let Ok(decoded) = + ::decode(data) + { + return Ok(Self::RenounceOwnership(decoded)); + } + if let Ok(decoded) = + ::decode(data) + { + return Ok(Self::TransferOwnership(decoded)); + } + if let Ok(decoded) = ::decode(data) { + return Ok(Self::Update(decoded)); + } + Err(::ethers::core::abi::Error::InvalidData.into()) + } + } + impl ::ethers::core::abi::AbiEncode for PermissionedStakeTableCalls { + fn encode(self) -> Vec { + match self { + Self::HashBlsKey(element) => ::ethers::core::abi::AbiEncode::encode(element), + Self::IsStaker(element) => ::ethers::core::abi::AbiEncode::encode(element), + Self::Owner(element) => ::ethers::core::abi::AbiEncode::encode(element), + Self::RenounceOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), + Self::TransferOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), + Self::Update(element) => ::ethers::core::abi::AbiEncode::encode(element), + } + } + } + impl ::core::fmt::Display for PermissionedStakeTableCalls { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + match self { + Self::HashBlsKey(element) => ::core::fmt::Display::fmt(element, f), + Self::IsStaker(element) => ::core::fmt::Display::fmt(element, f), + Self::Owner(element) => ::core::fmt::Display::fmt(element, f), + Self::RenounceOwnership(element) => ::core::fmt::Display::fmt(element, f), + Self::TransferOwnership(element) => ::core::fmt::Display::fmt(element, f), + Self::Update(element) => ::core::fmt::Display::fmt(element, f), + } + } + } + impl ::core::convert::From for PermissionedStakeTableCalls { + fn from(value: HashBlsKeyCall) -> Self { + Self::HashBlsKey(value) + } + } + impl ::core::convert::From for PermissionedStakeTableCalls { + fn from(value: IsStakerCall) -> Self { + Self::IsStaker(value) + } + } + impl ::core::convert::From for PermissionedStakeTableCalls { + fn from(value: OwnerCall) -> Self { + Self::Owner(value) + } + } + impl ::core::convert::From for PermissionedStakeTableCalls { + fn from(value: RenounceOwnershipCall) -> Self { + Self::RenounceOwnership(value) + } + } + impl ::core::convert::From for PermissionedStakeTableCalls { + fn from(value: TransferOwnershipCall) -> Self { + Self::TransferOwnership(value) + } + } + impl ::core::convert::From for PermissionedStakeTableCalls { + fn from(value: UpdateCall) -> Self { + Self::Update(value) + } + } + ///Container type for all return fields from the `_hashBlsKey` function with signature `_hashBlsKey((uint256,uint256,uint256,uint256))` and selector `0x9b30a5e6` + #[derive( + Clone, + ::ethers::contract::EthAbiType, + ::ethers::contract::EthAbiCodec, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + pub struct HashBlsKeyReturn(pub [u8; 32]); + ///Container type for all return fields from the `isStaker` function with signature `isStaker((uint256,uint256,uint256,uint256))` and selector `0x75d705e9` + #[derive( + Clone, + ::ethers::contract::EthAbiType, + ::ethers::contract::EthAbiCodec, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + pub struct IsStakerReturn(pub bool); + ///Container type for all return fields from the `owner` function with signature `owner()` and selector `0x8da5cb5b` + #[derive( + Clone, + ::ethers::contract::EthAbiType, + ::ethers::contract::EthAbiCodec, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + pub struct OwnerReturn(pub ::ethers::core::types::Address); + ///`G2Point(uint256,uint256,uint256,uint256)` + #[derive( + Clone, + ::ethers::contract::EthAbiType, + ::ethers::contract::EthAbiCodec, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + pub struct G2Point { + pub x_0: ::ethers::core::types::U256, + pub x_1: ::ethers::core::types::U256, + pub y_0: ::ethers::core::types::U256, + pub y_1: ::ethers::core::types::U256, + } + ///`EdOnBN254Point(uint256,uint256)` + #[derive( + Clone, + ::ethers::contract::EthAbiType, + ::ethers::contract::EthAbiCodec, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + pub struct EdOnBN254Point { + pub x: ::ethers::core::types::U256, + pub y: ::ethers::core::types::U256, + } + ///`NodeInfo((uint256,uint256,uint256,uint256),(uint256,uint256),bool)` + #[derive( + Clone, + ::ethers::contract::EthAbiType, + ::ethers::contract::EthAbiCodec, + serde::Serialize, + serde::Deserialize, + Default, + Debug, + PartialEq, + Eq, + Hash, + )] + pub struct NodeInfo { + pub bls_vk: G2Point, + pub schnorr_vk: EdOnBN254Point, + pub is_da: bool, + } +} diff --git a/contracts/rust/adapter/Cargo.toml b/contracts/rust/adapter/Cargo.toml index 88e7e6c1b2..edb5f6e343 100644 --- a/contracts/rust/adapter/Cargo.toml +++ b/contracts/rust/adapter/Cargo.toml @@ -8,11 +8,14 @@ edition = { workspace = true } [dependencies] anyhow = { workspace = true } ark-bn254 = { workspace = true } +ark-ec = { workspace = true } +ark-ed-on-bn254 = { workspace = true } ark-ff = { workspace = true } ark-poly = { workspace = true } ark-serialize = { workspace = true } ark-std = { workspace = true } contract-bindings = { path = "../../../contract-bindings" } +derive_more = { workspace = true } diff-test-bn254 = { git = "https://github.com/EspressoSystems/solidity-bn254.git" } ethers = { version = "2.0.4" } hotshot-types = { workspace = true } @@ -22,6 +25,7 @@ jf-utils = { workspace = true } libp2p = { workspace = true, features = ["serde"] } num-bigint = { version = "0.4", default-features = false } num-traits = { version = "0.2", default-features = false } +serde = { workspace = true } [[bin]] name = "eval-domain" diff --git a/contracts/rust/adapter/src/jellyfish.rs b/contracts/rust/adapter/src/jellyfish.rs index 85ef13fa63..b0031c8aaf 100644 --- a/contracts/rust/adapter/src/jellyfish.rs +++ b/contracts/rust/adapter/src/jellyfish.rs @@ -176,7 +176,7 @@ impl From> for ParsedVerifyingKey { // ```rust // let srs = ark_srs::kzg10::aztec20::setup(2u64.pow(6) as usize + 2).expect("Aztec SRS fail to load"); // println!("{}", hex::encode(jf_utils::to_bytes!(&srs.beta_h).unwrap())); - // ```` + // ``` assert_eq!( g2_lsb.encode_hex::(), String::from("b0838893ec1f237e8b07323b0744599f4e97b598b3b589bcc2bc37b8d5c41801") diff --git a/contracts/rust/adapter/src/lib.rs b/contracts/rust/adapter/src/lib.rs index b7cd11b355..8347358d4a 100644 --- a/contracts/rust/adapter/src/lib.rs +++ b/contracts/rust/adapter/src/lib.rs @@ -2,6 +2,7 @@ pub mod jellyfish; pub mod light_client; +pub mod stake_table; // Archived, legacy helpers and tests, to be removed soon. not included, reference/read only // mod archived diff --git a/contracts/rust/adapter/src/stake_table.rs b/contracts/rust/adapter/src/stake_table.rs new file mode 100644 index 0000000000..fa7acf6dd2 --- /dev/null +++ b/contracts/rust/adapter/src/stake_table.rs @@ -0,0 +1,224 @@ +use crate::jellyfish::u256_to_field; +use ark_ec::{ + short_weierstrass, + twisted_edwards::{self, Affine, TECurveConfig}, + AffineRepr, +}; +use ark_ed_on_bn254::EdwardsConfig; +use ark_ff::{BigInteger, PrimeField}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use contract_bindings::permissioned_stake_table::{self, EdOnBN254Point, NodeInfo}; +use diff_test_bn254::ParsedG2Point; +use ethers::{ + abi::AbiDecode, + prelude::{AbiError, EthAbiCodec, EthAbiType}, + types::U256, +}; +use hotshot_types::{light_client::StateVerKey, network::PeerConfigKeys, signature_key::BLSPubKey}; +use serde::{Deserialize, Serialize}; +use std::str::FromStr; + +// TODO: (alex) maybe move these commonly shared util to a crate +/// convert a field element to U256, panic if field size is larger than 256 bit +pub fn field_to_u256(f: F) -> U256 { + if F::MODULUS_BIT_SIZE > 256 { + panic!("Shouldn't convert a >256-bit field to U256"); + } + U256::from_little_endian(&f.into_bigint().to_bytes_le()) +} + +/// an intermediate representation of `EdOnBN254Point` in solidity. +#[derive(Clone, PartialEq, Eq, Debug, EthAbiType, EthAbiCodec)] +pub struct ParsedEdOnBN254Point { + /// x coordinate of affine repr + pub x: U256, + /// y coordinate of affine repr + pub y: U256, +} + +// this is convention from BN256 precompile +impl Default for ParsedEdOnBN254Point { + fn default() -> Self { + Self { + x: U256::from(0), + y: U256::from(0), + } + } +} + +impl From for EdOnBN254Point { + fn from(value: ParsedEdOnBN254Point) -> Self { + Self { + x: value.x, + y: value.y, + } + } +} + +impl From for ParsedEdOnBN254Point { + fn from(value: EdOnBN254Point) -> Self { + let EdOnBN254Point { x, y } = value; + Self { x, y } + } +} + +impl FromStr for ParsedEdOnBN254Point { + type Err = AbiError; + fn from_str(s: &str) -> Result { + let parsed: (Self,) = AbiDecode::decode_hex(s)?; + Ok(parsed.0) + } +} + +impl From> for ParsedEdOnBN254Point +where + P::BaseField: PrimeField, +{ + fn from(p: Affine

) -> Self { + if p.is_zero() { + // this convention is from the BN precompile + Self { + x: U256::from(0), + y: U256::from(0), + } + } else { + Self { + x: field_to_u256::(*p.x().unwrap()), + y: field_to_u256::(*p.y().unwrap()), + } + } + } +} + +impl From for Affine

+where + P::BaseField: PrimeField, +{ + fn from(p: ParsedEdOnBN254Point) -> Self { + if p == ParsedEdOnBN254Point::default() { + Self::default() + } else { + Self::new_unchecked( + u256_to_field::(p.x), + u256_to_field::(p.y), + ) + } + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct NodeInfoJf { + pub stake_table_key: BLSPubKey, + pub state_ver_key: StateVerKey, + pub da: bool, +} + +impl From for NodeInfo { + fn from(value: NodeInfoJf) -> Self { + let NodeInfoJf { + stake_table_key, + state_ver_key, + da, + } = value; + let ParsedG2Point { x0, x1, y0, y1 } = stake_table_key.to_affine().into(); + let schnorr: ParsedEdOnBN254Point = state_ver_key.to_affine().into(); + Self { + bls_vk: permissioned_stake_table::G2Point { + x_0: x0, + x_1: x1, + y_0: y0, + y_1: y1, + }, + schnorr_vk: schnorr.into(), + is_da: da, + } + } +} + +impl From for NodeInfoJf { + fn from(value: NodeInfo) -> Self { + let NodeInfo { + bls_vk, + schnorr_vk, + is_da, + } = value; + let stake_table_key = { + let g2 = diff_test_bn254::ParsedG2Point { + x0: bls_vk.x_0, + x1: bls_vk.x_1, + y0: bls_vk.y_0, + y1: bls_vk.y_1, + }; + let g2_affine = short_weierstrass::Affine::::from(g2); + let mut bytes = vec![]; + // TODO: remove serde round-trip once jellyfin provides a way to + // convert from Affine representation to VerKey. + // + // Serialization and de-serialization shouldn't fail. + g2_affine + .into_group() + .serialize_compressed(&mut bytes) + .unwrap(); + BLSPubKey::deserialize_compressed(&bytes[..]).unwrap() + }; + let state_ver_key = { + let g1_point: ParsedEdOnBN254Point = schnorr_vk.into(); + let state_sk_affine = twisted_edwards::Affine::::from(g1_point); + StateVerKey::from(state_sk_affine) + }; + Self { + stake_table_key, + state_ver_key, + da: is_da, + } + } +} + +impl From> for NodeInfoJf { + fn from(value: PeerConfigKeys) -> Self { + let PeerConfigKeys { + stake_table_key, + state_ver_key, + da, + .. + } = value; + Self { + stake_table_key, + state_ver_key, + da, + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use ark_std::rand::{Rng, RngCore}; + use hotshot_types::{light_client::StateKeyPair, traits::signature_key::BuilderSignatureKey}; + + impl NodeInfoJf { + fn random(rng: &mut impl RngCore) -> Self { + let mut seed = [0u8; 32]; + rng.fill_bytes(&mut seed); + + let (stake_table_key, _) = BLSPubKey::generated_from_seed_indexed(seed, 0); + let state_key_pair = StateKeyPair::generate_from_seed_indexed(seed, 0); + Self { + stake_table_key, + state_ver_key: state_key_pair.ver_key(), + da: rng.gen(), + } + } + } + + #[test] + fn test_node_info_round_trip() { + let mut rng = ark_std::rand::thread_rng(); + for _ in 0..20 { + let jf = NodeInfoJf::random(&mut rng); + let sol: NodeInfo = jf.clone().into(); + let jf2: NodeInfoJf = sol.into(); + assert_eq!(jf2, jf); + } + } +} diff --git a/contracts/script/multisigTransactionProposals/README.md b/contracts/script/multisigTransactionProposals/README.md index ddd2ea47a4..d1c248c4e4 100644 --- a/contracts/script/multisigTransactionProposals/README.md +++ b/contracts/script/multisigTransactionProposals/README.md @@ -33,7 +33,7 @@ source .env.contracts && \ ts-node contracts/script/multisigTransactionProposals/safeSDK/modifyProverModeProposal.ts setProver ``` -Open the the URL shown in the console to sign the transaction in the Safe UI. +Open the URL shown in the console to sign the transaction in the Safe UI. Once successful, all signers will see a transaction request on the SAFE UI e.g. `https://app.safe.global/transactions/queue?safe=$SAFE_MULTISIG_ADDRESS` @@ -84,7 +84,7 @@ source .env.contracts && \ ts-node contracts/script/multisigTransactionProposals/safeSDK/modifyStateHistoryRetentionPeriod.ts ``` -Open the the URL shown in the console to sign the transaction in the Safe UI. +Open the URL shown in the console to sign the transaction in the Safe UI. Once successful, all signers will see a transaction request on the SAFE UI e.g. `https://app.safe.global/transactions/queue?safe=$SAFE_MULTISIG_ADDRESS` diff --git a/contracts/src/PermissionedStakeTable.sol b/contracts/src/PermissionedStakeTable.sol new file mode 100644 index 0000000000..17c1341750 --- /dev/null +++ b/contracts/src/PermissionedStakeTable.sol @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; +import { BN254 } from "bn254/BN254.sol"; +import { EdOnBN254 } from "./libraries/EdOnBn254.sol"; + +/** + * @title SimpleStakeTable + * @dev An stake table mapping with owner-only access control. + */ +contract PermissionedStakeTable is Ownable { + event StakersUpdated(NodeInfo[] removed, NodeInfo[] added); + + error StakerAlreadyExists(BN254.G2Point); + error StakerNotFound(BN254.G2Point); + + struct NodeInfo { + /// The consensus signing key + BN254.G2Point blsVK; + /// The consensus signing key. Only used for storage in this contract. + EdOnBN254.EdOnBN254Point schnorrVK; + /// Is the Node DA Node? Only used for storage in this contract. + bool isDA; + } + + // State mapping from staker IDs to their staking status + mapping(bytes32 nodeID => bool isStaker) private stakers; + + constructor(NodeInfo[] memory initialStakers) Ownable(msg.sender) { + insert(initialStakers); + } + + // public methods + + function update(NodeInfo[] memory stakersToRemove, NodeInfo[] memory newStakers) + public + onlyOwner + { + remove(stakersToRemove); + insert(newStakers); + emit StakersUpdated(stakersToRemove, newStakers); + } + + // internal methods + + function insert(NodeInfo[] memory newStakers) internal { + // TODO: revert if array empty + for (uint256 i = 0; i < newStakers.length; i++) { + bytes32 stakerID = _hashBlsKey(newStakers[i].blsVK); + if (stakers[stakerID]) { + revert StakerAlreadyExists(newStakers[i].blsVK); + } + stakers[stakerID] = true; + } + } + + function remove(NodeInfo[] memory stakersToRemove) internal { + // TODO: revert if array empty + for (uint256 i = 0; i < stakersToRemove.length; i++) { + bytes32 stakerID = _hashBlsKey(stakersToRemove[i].blsVK); + if (!stakers[stakerID]) { + revert StakerNotFound(stakersToRemove[i].blsVK); + } + stakers[stakerID] = false; + } + } + + // view methods + + function isStaker(BN254.G2Point memory staker) external view returns (bool) { + return stakers[_hashBlsKey(staker)]; + } + + function _hashBlsKey(BN254.G2Point memory blsVK) public pure returns (bytes32) { + return keccak256(abi.encode(blsVK.x0, blsVK.x1, blsVK.y0, blsVK.y1)); + } +} diff --git a/contracts/test/PermissionedStakeTable.t.sol b/contracts/test/PermissionedStakeTable.t.sol new file mode 100644 index 0000000000..bd5fbb974a --- /dev/null +++ b/contracts/test/PermissionedStakeTable.t.sol @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import "forge-std/Test.sol"; +import { PermissionedStakeTable } from "../src/PermissionedStakeTable.sol"; +import { EdOnBN254 } from "../src/libraries/EdOnBn254.sol"; +import { BN254 } from "bn254/BN254.sol"; +import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; + +contract PermissionedStakeTableTest is Test { + PermissionedStakeTable stakeTable; + address owner = address(1); + + function setUp() public { + vm.prank(owner); + PermissionedStakeTable.NodeInfo[] memory initialStakers = nodes(0, 1); + stakeTable = new PermissionedStakeTable(initialStakers); + } + + // Create `numNodes` node IDs from `start` for testing. + function nodes(uint64 start, uint64 numNodes) + private + returns (PermissionedStakeTable.NodeInfo[] memory) + { + string[] memory cmds = new string[](3); + cmds[0] = "diff-test"; + cmds[1] = "gen-random-g2-point"; + + PermissionedStakeTable.NodeInfo[] memory ps = + new PermissionedStakeTable.NodeInfo[](numNodes); + + for (uint64 i = 0; i < numNodes; i++) { + cmds[2] = vm.toString(start + 1 + i); + bytes memory result = vm.ffi(cmds); + BN254.G2Point memory bls = abi.decode(result, (BN254.G2Point)); + ps[i] = PermissionedStakeTable.NodeInfo(bls, EdOnBN254.EdOnBN254Point(0, 1), true); + } + return ps; + } + + function testInsert() public { + vm.prank(owner); + PermissionedStakeTable.NodeInfo[] memory stakers = nodes(1, 1); + PermissionedStakeTable.NodeInfo[] memory empty = nodes(1, 0); + + vm.expectEmit(); + emit PermissionedStakeTable.StakersUpdated(empty, stakers); + + stakeTable.update(empty, stakers); + + assertTrue(stakeTable.isStaker(stakers[0].blsVK)); + } + + function testInsertMany() public { + vm.prank(owner); + PermissionedStakeTable.NodeInfo[] memory stakers = nodes(1, 10); + PermissionedStakeTable.NodeInfo[] memory empty = nodes(1, 0); + + vm.expectEmit(); + emit PermissionedStakeTable.StakersUpdated(empty, stakers); + + stakeTable.update(empty, stakers); + + assertTrue(stakeTable.isStaker(stakers[0].blsVK)); + } + + function testInsertRevertsIfStakerExists() public { + vm.prank(owner); + PermissionedStakeTable.NodeInfo[] memory stakers = nodes(1, 1); + PermissionedStakeTable.NodeInfo[] memory empty = nodes(1, 0); + stakeTable.update(empty, stakers); + + // Try adding the same staker again + vm.expectRevert( + abi.encodeWithSelector( + PermissionedStakeTable.StakerAlreadyExists.selector, stakers[0].blsVK + ) + ); + vm.prank(owner); + stakeTable.update(empty, stakers); + } + + function testRemove() public { + PermissionedStakeTable.NodeInfo[] memory stakers = nodes(1, 1); + PermissionedStakeTable.NodeInfo[] memory empty = nodes(1, 0); + vm.prank(owner); + stakeTable.update(empty, stakers); + + vm.prank(owner); + + vm.expectEmit(); + emit PermissionedStakeTable.StakersUpdated(stakers, empty); + + stakeTable.update(stakers, empty); + + assertFalse(stakeTable.isStaker(stakers[0].blsVK)); + } + + function testRemoveRevertsIfStakerNotFound() public { + vm.prank(owner); + PermissionedStakeTable.NodeInfo[] memory stakers = nodes(1, 1); + PermissionedStakeTable.NodeInfo[] memory empty = nodes(1, 0); + vm.expectRevert( + abi.encodeWithSelector(PermissionedStakeTable.StakerNotFound.selector, stakers[0].blsVK) + ); + // Attempt to remove a non-existent staker + stakeTable.update(stakers, empty); + } + + function testNonOwnerCannotInsert() public { + vm.prank(address(2)); + vm.expectRevert( + abi.encodeWithSelector(Ownable.OwnableUnauthorizedAccount.selector, address(2)) + ); + PermissionedStakeTable.NodeInfo[] memory stakers = nodes(1, 1); + PermissionedStakeTable.NodeInfo[] memory empty = nodes(1, 0); + stakeTable.update(empty, stakers); + } + + function testNonOwnerCannotRemove() public { + vm.prank(address(2)); + vm.expectRevert( + abi.encodeWithSelector(Ownable.OwnableUnauthorizedAccount.selector, address(2)) + ); + PermissionedStakeTable.NodeInfo[] memory stakers = nodes(1, 1); + PermissionedStakeTable.NodeInfo[] memory empty = nodes(1, 0); + stakeTable.update(stakers, empty); + } +} diff --git a/data/chain_config.bin b/data/chain_config.bin deleted file mode 100644 index 1888124061..0000000000 Binary files a/data/chain_config.bin and /dev/null differ diff --git a/data/chain_config.json b/data/chain_config.json deleted file mode 100644 index 4f0650f18e..0000000000 --- a/data/chain_config.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "base_fee": "0", - "chain_id": "35353", - "fee_contract": "0x0000000000000000000000000000000000000000", - "fee_recipient": "0x0000000000000000000000000000000000000000", - "max_block_size": "10240" -} diff --git a/data/fee_info.bin b/data/fee_info.bin deleted file mode 100644 index debddd9d05..0000000000 Binary files a/data/fee_info.bin and /dev/null differ diff --git a/data/fee_info.json b/data/fee_info.json deleted file mode 100644 index ae0867bb1a..0000000000 --- a/data/fee_info.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "account": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", - "amount": "0" -} diff --git a/data/header.bin b/data/header.bin deleted file mode 100644 index 1c3990498c..0000000000 Binary files a/data/header.bin and /dev/null differ diff --git a/data/initial_stake_table.toml b/data/initial_stake_table.toml new file mode 100644 index 0000000000..59384a6509 --- /dev/null +++ b/data/initial_stake_table.toml @@ -0,0 +1,30 @@ +[[public_keys]] +stake_table_key = "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" +state_ver_key = "SCHNORR_VER_KEY~ibJCbfPOhDoURqiGLe683TDJ_KOLQCx8_Hdq43dOviSuL6WJJ_2mARKO3xA2k5zpXE3iiq4_z7mzvA-V1VXvIWw" +da = true +stake = 1 + +[[public_keys]] +stake_table_key = "BLS_VER_KEY~4zQnaCOFJ7m95OjxeNls0QOOwWbz4rfxaL3NwmN2zSdnf8t5Nw_dfmMHq05ee8jCegw6Bn5T8inmrnGGAsQJMMWLv77nd7FJziz2ViAbXg-XGGF7o4HyzELCmypDOIYF3X2UWferFE_n72ZX0iQkUhOvYZZ7cfXToXxRTtb_mwRR" +state_ver_key = "SCHNORR_VER_KEY~lNCMqH5qLthH5OXxW_Z25tLXJUqmzzhsuQ6oVuaPWhtRPmgIKSqcBoJTaEbmGZL2VfTyQNguaoQL4U_4tCA_HmI" +da = true +stake = 1 + +[[public_keys]] +stake_table_key = "BLS_VER_KEY~IBRoz_Q1EXvcm1pNZcmVlyYZU8hZ7qmy337ePAjEMhz8Hl2q8vWPFOd3BaLwgRS1UzAPW3z4E-XIgRDGcRBTAMZX9b_0lKYjlyTlNF2EZfNnKmvv-xJ0yurkfjiveeYEsD2l5d8q_rJJbH1iZdXy-yPEbwI0SIvQfwdlcaKw9po4" +state_ver_key = "SCHNORR_VER_KEY~nkFKzpLhJAafJ3LBkY_0h9OzxSyTu95Z029EUFPO4QNkeUo6DHQGTTVjxmprTA5H8jRSn73i0slJvig6dZ5kLX4" +da = true +stake = 1 + +[[public_keys]] +stake_table_key = "BLS_VER_KEY~rO2PIjyY30HGfapFcloFe3mNDKMIFi6JlOLkH5ZWBSYoRm5fE2-Rm6Lp3EvmAcB5r7KFJ0c1Uor308x78r04EY_sfjcsDCWt7RSJdL4cJoD_4fSTCv_bisO8k98hs_8BtqQt8BHlPeJohpUXvcfnK8suXJETiJ6Er97pfxRbzgAL" +state_ver_key = "SCHNORR_VER_KEY~NwYhzlWarlZHxTNvChWuf74O3fP7zIt5NdC7V8gV6w2W92JOBDkrNmKQeMGxMUke-G5HHxUjHlZEWr1m1xLjEaI" +da = false +stake = 1 + + +[[public_keys]] +stake_table_key = "BLS_VER_KEY~r6b-Cwzp-b3czlt0MHmYPJIow5kMsXbrNmZsLSYg9RV49oCCO4WEeCRFR02x9bqLCa_sgNFMrIeNdEa11qNiBAohApYFIvrSa-zP5QGj3xbZaMOCrshxYit6E2TR-XsWvv6gjOrypmugjyTAth-iqQzTboSfmO9DD1-gjJIdCaD7" +state_ver_key = "SCHNORR_VER_KEY~qMfMj1c1hRVTnugvz3MKNnVC5JA9jvZcV3ZCLL_J4Ap-u0i6ulGWveTk3OOelZj2-kd_WD5ojtYGWV1jHx9wCaA" +da = true +stake = 1 \ No newline at end of file diff --git a/data/l1_block.bin b/data/l1_block.bin deleted file mode 100644 index ecffa9fdcf..0000000000 Binary files a/data/l1_block.bin and /dev/null differ diff --git a/data/messages.bin b/data/messages.bin deleted file mode 100644 index a3962ad3d4..0000000000 Binary files a/data/messages.bin and /dev/null differ diff --git a/data/messages.json b/data/messages.json deleted file mode 100644 index 3a5a376864..0000000000 --- a/data/messages.json +++ /dev/null @@ -1,427 +0,0 @@ -[ - { - "kind": { - "Consensus": { - "General": { - "Proposal": { - "_pd": null, - "data": { - "block_header": { - "block_merkle_tree_root": "MERKLE_COMM~AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAQA", - "builder_commitment": "BUILDER_COMMITMENT~tEvs0rxqOiMCvfe2R0omNNaphSlUiEDrb2q0IZpRcgA_", - "builder_signature": null, - "chain_config": { - "chain_config": { - "Left": { - "base_fee": "0", - "chain_id": "35353", - "fee_contract": null, - "fee_recipient": "0x0000000000000000000000000000000000000000", - "max_block_size": "10240" - } - } - }, - "fee_info": { - "account": "0x0000000000000000000000000000000000000000", - "amount": "0" - }, - "fee_merkle_tree_root": "MERKLE_COMM~AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAKA", - "height": 0, - "l1_finalized": null, - "l1_head": 0, - "ns_table": { - "bytes": "AAAAAA==" - }, - "payload_commitment": "HASH~AazstQer_ho1SqgGT0r10_Gs0BnjfbPBHJdSO3HHbp29", - "timestamp": 0 - }, - "justify_qc": { - "_pd": null, - "data": { - "leaf_commit": "COMMIT~eaBGKF8-lw-t211wxLq7tcXEEqacbvDVcFFR9aCddO3G" - }, - "signatures": null, - "view_number": 0, - "vote_commitment": "COMMIT~0-ZxNgMSsUEPeGDdq5-TZE8PDsYwgI4O2fVGMKooP87D" - }, - "proposal_certificate": { - "Timeout": { - "_pd": null, - "data": { - "view": 0 - }, - "signatures": null, - "view_number": 0, - "vote_commitment": "COMMIT~TZG1F34lxU6Ny9aKQMkjZAxjW9zotdwW75EHEGbyALOi" - } - }, - "upgrade_certificate": { - "_pd": null, - "data": { - "decide_by": 0, - "new_version": { - "major": 1, - "minor": 0 - }, - "new_version_first_view": 0, - "new_version_hash": [], - "old_version": { - "major": 0, - "minor": 1 - }, - "old_version_last_view": 0 - }, - "signatures": null, - "view_number": 0, - "vote_commitment": "COMMIT~roiQgLLeI4uYqYxjz-0jPwtLyNhSlnfriPEVZ-_4RUrW" - }, - "view_number": 0 - }, - "signature": "BLS_SIG~g3CUcLMD7fnDsBhItKvSqXLwEqdWfvusSrgpL1GBAxf-SWFW0t32Agt2jrOiempjjpI7dBwYGgXv-0mvI4sGEEE" - } - } - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - }, - { - "kind": { - "Consensus": { - "General": { - "Vote": { - "data": { - "leaf_commit": "COMMIT~eaBGKF8-lw-t211wxLq7tcXEEqacbvDVcFFR9aCddO3G" - }, - "signature": [ - "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U", - "BLS_SIG~g3CUcLMD7fnDsBhItKvSqXLwEqdWfvusSrgpL1GBAxf-SWFW0t32Agt2jrOiempjjpI7dBwYGgXv-0mvI4sGEEE" - ], - "view_number": 0 - } - } - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - }, - { - "kind": { - "Consensus": { - "General": { - "ViewSyncPreCommitVote": { - "data": { - "relay": 0, - "round": 0 - }, - "signature": [ - "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U", - "BLS_SIG~g3CUcLMD7fnDsBhItKvSqXLwEqdWfvusSrgpL1GBAxf-SWFW0t32Agt2jrOiempjjpI7dBwYGgXv-0mvI4sGEEE" - ], - "view_number": 0 - } - } - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - }, - { - "kind": { - "Consensus": { - "General": { - "ViewSyncCommitVote": { - "data": { - "relay": 0, - "round": 0 - }, - "signature": [ - "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U", - "BLS_SIG~g3CUcLMD7fnDsBhItKvSqXLwEqdWfvusSrgpL1GBAxf-SWFW0t32Agt2jrOiempjjpI7dBwYGgXv-0mvI4sGEEE" - ], - "view_number": 0 - } - } - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - }, - { - "kind": { - "Consensus": { - "General": { - "ViewSyncFinalizeVote": { - "data": { - "relay": 0, - "round": 0 - }, - "signature": [ - "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U", - "BLS_SIG~g3CUcLMD7fnDsBhItKvSqXLwEqdWfvusSrgpL1GBAxf-SWFW0t32Agt2jrOiempjjpI7dBwYGgXv-0mvI4sGEEE" - ], - "view_number": 0 - } - } - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - }, - { - "kind": { - "Consensus": { - "General": { - "ViewSyncPreCommitCertificate": { - "_pd": null, - "data": { - "relay": 0, - "round": 0 - }, - "signatures": null, - "view_number": 0, - "vote_commitment": "COMMIT~OQecZxfFpuEuPJgkpsQoglnqY0fm6Qi1PUarYCgiFQ0T" - } - } - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - }, - { - "kind": { - "Consensus": { - "General": { - "ViewSyncCommitCertificate": { - "_pd": null, - "data": { - "relay": 0, - "round": 0 - }, - "signatures": null, - "view_number": 0, - "vote_commitment": "COMMIT~POgBCaDjtUV3Il5-FXVr5KN2KzYSgipfKX6Ci0-nxduO" - } - } - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - }, - { - "kind": { - "Consensus": { - "General": { - "ViewSyncFinalizeCertificate": { - "_pd": null, - "data": { - "relay": 0, - "round": 0 - }, - "signatures": null, - "view_number": 0, - "vote_commitment": "COMMIT~s5i9wmQWH7VU90CUiEWRdAG19LI1iXydSMxp9gZ7kHco" - } - } - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - }, - { - "kind": { - "Consensus": { - "General": { - "TimeoutVote": { - "data": { - "view": 0 - }, - "signature": [ - "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U", - "BLS_SIG~g3CUcLMD7fnDsBhItKvSqXLwEqdWfvusSrgpL1GBAxf-SWFW0t32Agt2jrOiempjjpI7dBwYGgXv-0mvI4sGEEE" - ], - "view_number": 0 - } - } - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - }, - { - "kind": { - "Consensus": { - "General": { - "UpgradeProposal": { - "_pd": null, - "data": { - "upgrade_proposal": { - "decide_by": 0, - "new_version": { - "major": 1, - "minor": 0 - }, - "new_version_first_view": 0, - "new_version_hash": [], - "old_version": { - "major": 0, - "minor": 1 - }, - "old_version_last_view": 0 - }, - "view_number": 0 - }, - "signature": "BLS_SIG~g3CUcLMD7fnDsBhItKvSqXLwEqdWfvusSrgpL1GBAxf-SWFW0t32Agt2jrOiempjjpI7dBwYGgXv-0mvI4sGEEE" - } - } - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - }, - { - "kind": { - "Consensus": { - "General": { - "UpgradeVote": { - "data": { - "decide_by": 0, - "new_version": { - "major": 1, - "minor": 0 - }, - "new_version_first_view": 0, - "new_version_hash": [], - "old_version": { - "major": 0, - "minor": 1 - }, - "old_version_last_view": 0 - }, - "signature": [ - "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U", - "BLS_SIG~g3CUcLMD7fnDsBhItKvSqXLwEqdWfvusSrgpL1GBAxf-SWFW0t32Agt2jrOiempjjpI7dBwYGgXv-0mvI4sGEEE" - ], - "view_number": 0 - } - } - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - }, - { - "kind": { - "Consensus": { - "Da": { - "DaProposal": { - "_pd": null, - "data": { - "encoded_transactions": [ - 1, - 0, - 0, - 0, - 3, - 0, - 0, - 0, - 1, - 2, - 3 - ], - "metadata": { - "bytes": "AQAAAAEAAAALAAAA" - }, - "view_number": 0 - }, - "signature": "BLS_SIG~g3CUcLMD7fnDsBhItKvSqXLwEqdWfvusSrgpL1GBAxf-SWFW0t32Agt2jrOiempjjpI7dBwYGgXv-0mvI4sGEEE" - } - } - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - }, - { - "kind": { - "Consensus": { - "Da": { - "DaVote": { - "data": { - "payload_commit": "HASH~AazstQer_ho1SqgGT0r10_Gs0BnjfbPBHJdSO3HHbp29" - }, - "signature": [ - "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U", - "BLS_SIG~g3CUcLMD7fnDsBhItKvSqXLwEqdWfvusSrgpL1GBAxf-SWFW0t32Agt2jrOiempjjpI7dBwYGgXv-0mvI4sGEEE" - ], - "view_number": 0 - } - } - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - }, - { - "kind": { - "Consensus": { - "Da": { - "DaCertificate": { - "_pd": null, - "data": { - "payload_commit": "HASH~AazstQer_ho1SqgGT0r10_Gs0BnjfbPBHJdSO3HHbp29" - }, - "signatures": null, - "view_number": 0, - "vote_commitment": "COMMIT~5E3F3rC4E9DBhMBqOTmjZ9tjX4VFaV5gqrZJl0y6V05D" - } - } - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - }, - { - "kind": { - "Consensus": { - "Da": { - "VidDisperseMsg": { - "_pd": null, - "data": { - "common": { - "all_evals_digest": "FIELD~rF4TMFZMXJCieDeov31aNuDG5nDGR-iQdteEgBjXkErn", - "multiplicity": 1, - "num_storage_nodes": 1, - "payload_byte_len": 11, - "poly_commits": "FIELD~AQAAAAAAAAD2xsICwO-z0CXx_ucl0FV1j-zJ3tgPO-OL8gYLvXkIkNE" - }, - "payload_commitment": "HASH~Z03vXeC1EEaBGf5iacsBEWYiA7PHi3K6uS9gVpmlUx3t", - "recipient_key": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U", - "share": { - "aggregate_proofs": "FIELD~AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQGY", - "evals": "FIELD~AQAAAAAAAAABAAAAAwAAAAECAwAAAAAAAAAAAAAAAAAAAAAAAAAAAMk", - "evals_proof": { - "pos": "FIELD~AAAAAAAAAAD7", - "proof": [ - { - "Leaf": { - "elem": "FIELD~AQAAAAAAAAABAAAAAwAAAAECAwAAAAAAAAAAAAAAAAAAAAAAAAAAAMk", - "pos": "FIELD~AAAAAAAAAAD7", - "value": "FIELD~rF4TMFZMXJCieDeov31aNuDG5nDGR-iQdteEgBjXkErn" - } - } - ] - }, - "index": 0 - }, - "view_number": 0 - }, - "signature": "BLS_SIG~g3CUcLMD7fnDsBhItKvSqXLwEqdWfvusSrgpL1GBAxf-SWFW0t32Agt2jrOiempjjpI7dBwYGgXv-0mvI4sGEEE" - } - } - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - }, - { - "kind": { - "Data": { - "SubmitTransaction": [ - { - "namespace": 1, - "payload": "AQID" - }, - 0 - ] - } - }, - "sender": "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" - } -] \ No newline at end of file diff --git a/data/ns_table.bin b/data/ns_table.bin deleted file mode 100644 index 02a363d7cc..0000000000 Binary files a/data/ns_table.bin and /dev/null differ diff --git a/data/payload.bin b/data/payload.bin deleted file mode 100644 index 806d83c2a3..0000000000 Binary files a/data/payload.bin and /dev/null differ diff --git a/data/payload.json b/data/payload.json deleted file mode 100644 index 2387c7aa4b..0000000000 --- a/data/payload.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "ns_table": { - "bytes": "AwAAAO7/wAAcBgAAobC5EkAOAABksAWiXBQAAA==" - }, - "raw_payload": "BgAAAAABAAAAAgAAAAMAAAAEAAAABQAAAAYAAF2eIXFMYkyrHrYISOdpUPxLq2dgmfDilwq9z71rSHb2xcbmS2nim/Ak33mSXGqypeXatObNfgf921YXBvra9Z/oPCq6AEfRmM5J+ZKvcX4URjpPP/AvIPcF3PmLmF/kgyq1cPnT/lQPW5Z9Nb4ka3C1IqLMBqqmNxca+jwWrXOXtaj/VTrLdtRleItPSrcC9ShFVqMk4NJrFe9i6LiuDYom7JdoIeitb+le/x4cl3koNvqFcmVY3013dIXijRwffVUQHWATDTBLMJRTtTiA9gTg+Dl4gsboJzOzlKjaqEgNUouP/E/YMHdTVgUvz3TyvPOCxePq9xg0yeNT8BDKA+kPUd5oezPOoFRQum/hkAidqKl453/BTMpijENApkbiy/s0FV7kT2CyBp+kMHNdVO7IK4JyCO4Mj1HBxtcFaNjhXNg7QAXB0c8t/9E4DhvDU1YX9UB8mSqCRrq35BXOFHRDmMFaXj/WWWMZkeUHeAQZF7Y8k4cHnImIhdZg532eg6QcPNwuHgCdLQOnoZteoZWKoMloJvpS1xcRusvh8Y4wV4L1eURLQLq8eva5UTFs6LzRyPQZ6EOlZvrMTlunRwCGBT5lGiyX3wWGY2m2JWfhR+rlccHcUlwb43d40VkXY3oVg4eR4nWS9eBaqkjHXYuQaX0INhMYV+byHlUn7wJkqiHjp3hY0xts2yr/mjCkUeScU9vG7C+x5lg8r+RDFox1OaF7s9/jYcDEXLZzfyrX5iE8x9ot6A+E6W5rp1Ek/ymkRvK3rr7DmTvGrRpgNfSndlb2FsH7Vu1lnYsE/mNEMwwbhjTrRI1qoWRNdkrJYUZsTOw4NM6SfN0wvZ4PVtTlFb97KvWd5b/y5f9WoGEbqunvtky528CtZyn3e44a0Rd9ECWhYifqb9L7R+D8yGm8NePNpyiAx2TdlX4Su4eN/VeHV04moVpH8ygXKKhMNX2KpDqu7sVB4T5/wUZNOCyvdzoQsZ7ot8JVOol4qLvegxsm/VbK4tQKmzeC/2tXleHPHSl8Nxsd+chgyAGJTuAH8EdCO3vJHSf7BqWzcDAeyz7tWe2sDkW0XJAco+uepk6bISMHItKAdO9tsaf+XMOdrg2lPghOYD2baTpvaF9d9jFZL+P5T+2LwRbKkID3WSC2vt7Ipq5KKsrMoGAzZO+AMOIQJQbGhta8YHLaz7hWrrpfzThxipZnBaGmjeKUMP7aJD4pP0uA5VFbtLYuiXFFh86rq0ovBZFMIe1d99EmksRPhFS1TwG2BjBkvMCjNI7eHYU1/yXvB35UfCNr8hEiaMvfOZ4ZUIzoBymPQtTIhaW/vPGR/4cDt0anBEZ8/4tdyaswCW9tTzfecNzzlt37ymGIDDGsH5UAlM4Th5TDYi+G5rHreV6cwIkaKKwTkVmAwG1eG51EYyO9NKdV1ikjdemEonxX9E4h3ZYZziwlNWCigg5eeo96cqMWL562APWSiyp0ARGlKGLUbrTLeQ8vuEbOOZqiOffH7Lnp24hDGhuUiPeARi15iru4e7YMPjpLD3D38jZHh+wo1BcdA2LWsOGCstaPKaQIgdyDD2xt846nIv6g4ozCsPmVXIBjJ46xvZkFbmmKFXQ0ZfopR2CGqAkAqbo/2EklZzo0kPQNlqDb2gDGafEDC4ND+mBTilxklxx354LlwTO/70S/CJrIs2kQgBPwyblcOguKfJpSLE0aYScDmLbqqIXWJTI6cmmKUf+RvYb+dhp9HGXSVkUpilYqkzRNz+NEwVTH9Afqh3PguK1YEXKINBwm0FMEstgeulTBCErnGYB2+XUdT4tKvC3XnQdsFthPW4VJ0Z8Wo1zt3uOlktbdUePvXLkUbWvaWJppmFidgFy1XBfD5mUyHMuaO7OsXMZHgCcAdx0OhqNAGKhLeN14LeiA8LaBJEV6SGqLy0fh5Vj501jGT+Z8PJ7MKZ38o1q+zUY+q/NnbRnRROTZZCSSAjbo+VosW3arhJeNeFHlABDaalv1GxCY3KIq5/nPK6sPBkIRIIumGXMYDJ9N+vF7rlgeCB+VgQgAAAAAAQAAAAIAAAADAAAABAAAAAUAAAAGAAAABwAAAAgAAM9kwJjeoi0EAJ6Ut9v7a/RueDt+T9TdqgR8o/fpx1WCUgOMGSTcPkOnP2NkuC2GLzV+zEIah3u4xPLjQ1lRZHr2yTAf2B3rtETPpqvYr065U/AobcXdWD3vdMivApM/emVjgu7qFboSXYCBPmr2qqCZndfCcilnWV9sAgns2CfpFFNcWxr6O0tvSChdJC0As3r1Edye2ep7erYCjTZlPFaSykV/QY+A60JWHr81x+0l8/sP/AM0X7uZPh8JSfuPPJJ3Shz8E0W0DHyKcJInJkus4JGGOSIF3xU2OtZTH2YDvpZmkhFKVTAiig7YzN5DAv+aRsmOiNVCbu4jDm6nMqkMjyEolLrO7aPLZUtXr6O8BAUCprmDe0o0TdCR+8pWgGLDQyf3z7Yvo4n7+oNS8j+D0R2zBqyToAnMwOkAecJYvUr27OlzXvEcmAGStoz1A+OngS+3gHIwAMQOrDjwiUra5NausPZ7CbfcuY3FBY8pFrBY/4IkTKql55InYmQTFhQHj5YLFbzHbdZOJpYybVk0OkntRe6OEEQVuo8ivUVHr3lmABW4LT+Xpceqie5i6dqh87b+TqqnNcyEUo66TFOPJWRjVeC8v/Hoy05WnM098DsBMjGvmrFe/9JNQowEnh1XzpjiUHGuHpJPlAo+221hkTaXRve2th1mXmFSzy5fVziicoKc0YVOM3qEmyI2u/ZFXYaGCmWeigQ0mq3JJuKGZXrcfxTivGHXoa4VwELGpxero1XnEwO11rggj/wg8N8jh1UHjdevZiHJOZUuGuve11dSqe5KUdG6ty0exPWWdMiohVJ+coEz98GheJDDGjBFOcF/q1rTcNWRVURpeDx8oxkQ3/E8fwxD76lG8snJjNip/CcFDiDI8orkn6cQsnMybACRf0kKzAAnitxWIuzuvVB99tJ/iDUD+U5fplD5w2ER2Q0kRVLYxEjwdNr/5nzJjHoVehLYupVYvT1hMF+8iQQJfyex4ZeSUeD48p7UEy9IG9JolNVKIH+YpGfR5WU/nfWbieWQtUR+DGGv6CQB20Pztd20I4z/+NcWnFOddFRT51GK8T++Ot7h5jKsaEReI5kKChnPUwA0ZB8GGmKfTC1UOUGESv2l+r5pfSp/st+mT0s9iLcIiBqxzazIlcsAFG/RjpS+UXTC0Vn6ut37XtxSGprSYc4zByNOHW40Wl27pErIEmpivjBgTuI1ta02YAlUcN/AMIDxBq9KcsZMCHnBb11fCU7s+EZCgKWPzXnpwbstf+zMOtMiwwJdiEKq//xbmrK3/JFuXfaomDzQJXsq4OIvGuDZuV3TcXn925y4sqvqwhjGzDZNi4JBzc4o3+4GiyrF6Lz6k0A7G6Ivvb4ZawTJ4IKO99+1rgTKrCrnlTU7DbZssIxSnwH2qW48y+FirOUYH9jwn4gfI6KYAbOmjejZsU7X44sHaGEzNjqXZ3Vb57gUW7vK/cSsuKXDnB8q5bCiudg9BuLz1cuPwAQ4d7NRRJujYeAF+H8Upsmer6sgfl5SvWLzxxkpkz4NcMdH5IJ8Ll9UwoD3/14DOz6EX2drBl075iejXBE0ViBty/CSt0FD1+3tvJbObV3dqq4PrGyEDdp1rg8pVrTtCq6BNEEWM/9n1WRPYFBI6nKMIyqXslNNssZJ4Eya1bwXPxVJxTevGKbsbKFAwFiLq0JzQe2BecfHKGpmvZAjJ54vXwaRVEmbuE6Sw47ec0s4jLAqZ8FMWcxCMho6vcINVeJFll0UZLZSTd8u67tBsHCrXsa54fwwTZ/tL2IayrBa2+y4zgDU6mf/PXBDxn5rUHcqK/LUwxdOvVvFst/HImU0ty8Y+F8eHkTocENVRVTNxPJ2UGQX8DXguv33WQ+kNjO7vY8V18BPTWZYC4Jf3GY2FvAGGr6jGbrAD9/Wb7zetKhS6zrbOnz2KOSJabYRpaNj85+5mALigd9unzZd2evLiX9CZCu/cVwZKoQZw6L4tKYwx5tA9M3qcKvZMNpDxURGStPwpiXnFiAa0+kZ+ugMH9IiEWQyCEInXBMUskVu7+rVs6HqFarEcQGhi3VSxl7S2/PG6K7mNzgBZzgWMeXpAixkWd6zjYESBLsFe6GsYHQjYNq4z0Jjv5Rjx3IOgtbNUZehBkcI+zmpbqf3tv1odmHtxpL/hVgBG2NvWdE+q2OtDaz3JOVZ27Pf6kf0glHhnaPpDDfbjfTygd7B4LFli5k+ED2bMc5264Oa+N2yWRLqhorqibx0L2Y/KzjOVh+qCinziGmfIGVMEhiMJB/SaAw4PMVg7dR+N0C4bHHJBQS8MPv6aydBmPYm9j9kh8ee6pFYqvXgwdXebkJS8j1D1vHb9g3ndhsr4J6x+u0GrlHz1J4cyivM4HG1GqixkW2pCiB3Fh4dEIUeJsUemr0hB9dMdbOC0WBn42Xc6v8MP9gnIrHhCueK7CleMPMjS38E5MgVas0GerM4cGLMBxvjL30zdcBAeMZdRaPR8WQbw5B4jwY93C6LKcxO+gWix02aa0i5ZDcHZY3IjZV7fxy3vvtEs/qxXMto8gLK8I7kjDAblmAIV5yleBUgQ8pzw6BkqbK2/AA6cJcAg7gJUVAA6wT9fsrAUgiMcWuf1+DSSVrBK/FXa7AY5X/PGPaWeZdOCLs1ncGLrVmOSNCFfCWT/5UYvh952uFc+/SghFcdEyQ2bznu9S55FpNFVQ29Ia10aeYMffzonMx+eT01BgAAAAABAAAAAgAAAAMAAAAEAAAABQAAAAYAAKLQrjdgptCHUAMU4Epx9OxmKmeD0v9LZ4yGEf3K9lgBYxrATdQAzMHtZ9GmmubMEgpTgWew4elHoHC7Cw9H889c2Z86VqYBGQ+8xlYRQjy9W5IQtu5SddmMIgUzoBx2itmtCyrthgf79qAInSyQwaJrKiuRRq16wwwoa5j3YQr3AOfrjR+kglN/K8prXBIN8YYJxLi+ntqQ2B5mvIY+3yy+mDsrI2BvS9dcfaAPi+x7V1AgwzXpEvA5Hh/rp3e4WfbH5cUjdubwUr8Qx/a7RUb9jMDZvBm2aKuX5p0kVpntV9nVt2p/fKJ4sXqguB2dHhcU3o5OWMyelM/3qFkLAFKWhRA8XRm7jjQAN7B1+ZtmaPqQIsSza0ToJiYzUR3K/5yy52gfFTwa5G4dpFqRFz+GSTBn/ZiMXLHfZcJwa/ILkNRGSqEj6TFBXYJPaMhrTz6JeA43PcD2x8OF6vKgunyd+HPh3Vi1Mm5UY0q8HHoiZpJ5g+kKKT3dAxS2LekFg+fsDdphawltbsoCeIDHKmLIQ0OEHoB4Y13bb96RzZHFKMRUgGSommgmsEA/yoS20e4wWU1uEv7DcM4PQ75QsntW1296s4DBkHAV0JIe/zetUPnMK/pj4DTlluQR74BPvYmCd6PNdwm8Nw9Y+luvmNm63A3BemUg0yOJV+xC6Lp6i8NuBuwpS2ICoQW/aKNgnBrX0msaee9YhzKiV0gBBjCibHdJNFnWDUBwYJQZc4fdJtgE5gIyTi7UMjgQz65QKbIrCv+jtFmkBN89VflXl1616Cn/9AxGXABurhggbm7QajQpBjJtLaFmSJxHc6/39v+bWHfQFs7Xoi3hDFf1Lk6G+x+972g0ThDI1eZVYaRckd6u1Hg/M8f5GC/J9Ib4RTGHIgAbFfs/CRA7LcSiGY9PEZxSUhMMRhiexEw4x0bkeb8lrsy14aBsM45krvzGWBlJMKRGMtuvsBA3dU+GCjU9+savW/QEIjhm7sWfkvKLcqVreKl1HFHdsB/iKMfHU5Lfd917BM9ip0oOe8y4GLDCscgs/6lUTzO0g30OzZVvlsdOk2pSNRbrbrnhy6HCGsxPxAYuyIJ93ZmHaZ+hEiq1KgISHq7bIxV7ffUEZIZ3+RyTWsvJGB82C027X1NXcrLRtmfJ9L9oUask0rqNz7elKvd8FFET54rUaDOx/dorXjMwoWuxHnViT+jYwsSA+73kiOA5H0Q4y0XT6F58/wo7JSR3hS2PQNPtsuLgX4ZIadYn4njN3w7Uocuz48iw/JYLIM+AlrW16RgduDVr0YhVIcMVAeeeizNM/wkfulryWH5LvDqJ6oH6ir9b5NX5f82FPE7o2p7shS1I02wV3zPpoqcOn4C1z9LC1V0twA4xSRyQc0nmcWZMpIR3/IkSIMRdMa/OB2WsJza/u+DOzJ6HcDfLtqNNIpm/ksVqJiQWkl48B2rCv26KDkRwXFoLopuT2KRlB1kRFew7lNRaTQesbiGMrYFd98AHjDJzuA6197J5c7XgRA7EVaFK72Rbzsudh2oHun/ninbEHNDvEtYrBlZszVJ48a+EL8X/r8gMwoi7Wq33dhwFaDmIezr0/A9RjJELXTEtczYQwt3SP/vkSqjqwbC0LNJVsvoqf8QpkTmfBFyEtakDYgSqOqAei92d3d1yBIq+HFQu4wy83N25zw3fSMJXJixQwp7QEIScYLxQP/9Q3NGiV1QtxGUMdu1bGVTvjcvmNx6sezGBjleuHUBYlF9+y8yVWc3dOyQ9EqEMomqZ4fkeBvNgElGjPTH2SA9nl0ZVKM4EgQ1cvRUYwBWtFYkrfcZquK/G6+/REOOB4Q+xIIcxbsrzCI0DYsFLllEVKIfWLbdJH2WwWINsP96rBhh3aLnTeM0srC3IuZfjI+wgpXnEyMqdWJ1rqjrkMBZHvjecJmWaUhFFmBE22RGJ544P1PsMp/pVEFPGMdb+sSuqhuA88XnuYKI8ENxLXXTHE/to1mQ9khDnpntjARASxeSAFmEddAmrfSoh9JV3P49YR8QBlWAyVPt/aw==" -} \ No newline at end of file diff --git a/data/transaction.bin b/data/transaction.bin deleted file mode 100644 index 9f086e3901..0000000000 Binary files a/data/transaction.bin and /dev/null differ diff --git a/data/tx_index.bin b/data/tx_index.bin deleted file mode 100644 index efdcb0dd86..0000000000 Binary files a/data/tx_index.bin and /dev/null differ diff --git a/data/tx_index.json b/data/tx_index.json deleted file mode 100644 index 0c24f617f1..0000000000 --- a/data/tx_index.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "ns_index": [ - 2, - 0, - 0, - 0 - ], - "tx_index": [ - 5, - 0, - 0, - 0 - ] -} \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml index 1e0fe5e8bf..b104120012 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -17,10 +17,11 @@ services: deploy-sequencer-contracts: image: ghcr.io/espressosystems/espresso-sequencer/deploy:main - command: deploy --only fee-contract + command: deploy --only fee-contract,permissioned-stake-table environment: - ESPRESSO_SEQUENCER_ETH_MULTISIG_ADDRESS - ESPRESSO_SEQUENCER_L1_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_DEPLOYER_ACCOUNT_INDEX - RUST_LOG - RUST_LOG_FORMAT @@ -35,6 +36,7 @@ services: environment: - ESPRESSO_SEQUENCER_ORCHESTRATOR_URL - ESPRESSO_SEQUENCER_L1_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_SEQUENCER_URL - ESPRESSO_SEQUENCER_STAKE_TABLE_CAPACITY - ESPRESSO_SEQUENCER_PERMISSIONED_PROVER @@ -57,6 +59,7 @@ services: command: espresso-bridge deposit environment: - L1_PROVIDER=$ESPRESSO_SEQUENCER_L1_PROVIDER + - L1_POLLING_INTERVAL=$ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_PROVIDER=http://sequencer1:$ESPRESSO_SEQUENCER_API_PORT - CONTRACT_ADDRESS=0xa15bb66138824a1c7167f5e85b957d04dd34e468 - MNEMONIC=$ESPRESSO_BUILDER_ETH_MNEMONIC @@ -236,6 +239,7 @@ services: - ESPRESSO_SEQUENCER_POSTGRES_PASSWORD=password - ESPRESSO_SEQUENCER_POSTGRES_DATABASE=sequencer - ESPRESSO_SEQUENCER_L1_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_SEQUENCER_L1_EVENTS_MAX_BLOCK_RANGE - ESPRESSO_STATE_RELAY_SERVER_URL - ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY=$ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_0 @@ -293,6 +297,7 @@ services: - ESPRESSO_SEQUENCER_POSTGRES_PASSWORD=password - ESPRESSO_SEQUENCER_POSTGRES_DATABASE=sequencer - ESPRESSO_SEQUENCER_L1_PROVIDER=$ESPRESSO_SEQUENCER_L1_WS_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_SEQUENCER_L1_EVENTS_MAX_BLOCK_RANGE - ESPRESSO_STATE_RELAY_SERVER_URL - ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY=$ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_1 @@ -345,6 +350,7 @@ services: - ESPRESSO_SEQUENCER_MAX_CONNECTIONS - ESPRESSO_SEQUENCER_STATE_PEERS=http://sequencer3:$ESPRESSO_SEQUENCER_API_PORT - ESPRESSO_SEQUENCER_L1_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_SEQUENCER_L1_EVENTS_MAX_BLOCK_RANGE - ESPRESSO_STATE_RELAY_SERVER_URL - ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY=$ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_2 @@ -395,6 +401,7 @@ services: - ESPRESSO_SEQUENCER_MAX_CONNECTIONS - ESPRESSO_SEQUENCER_STATE_PEERS=http://sequencer4:$ESPRESSO_SEQUENCER_API_PORT - ESPRESSO_SEQUENCER_L1_PROVIDER=$ESPRESSO_SEQUENCER_L1_WS_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_SEQUENCER_L1_EVENTS_MAX_BLOCK_RANGE - ESPRESSO_STATE_RELAY_SERVER_URL - ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY=$ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_3 @@ -446,6 +453,7 @@ services: - ESPRESSO_SEQUENCER_STATE_PEERS=http://sequencer0:$ESPRESSO_SEQUENCER_API_PORT - ESPRESSO_SEQUENCER_API_PEERS=http://sequencer0:$ESPRESSO_SEQUENCER_API_PORT - ESPRESSO_SEQUENCER_L1_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_SEQUENCER_L1_EVENTS_MAX_BLOCK_RANGE - ESPRESSO_STATE_RELAY_SERVER_URL - ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY=$ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_4 @@ -733,8 +741,7 @@ services: dev-rollup: image: ghcr.io/espressosystems/espresso-sequencer/dev-rollup:main - command: - dev-rollup register --ns 1; dev-rollup register --ns 2; dev-rollup register --ns 3 + command: dev-rollup register --ns 1; dev-rollup register --ns 2; dev-rollup register --ns 3 environment: - ESPRESSO_MARKETPLACE_SOLVER_API_URL=http://marketplace-solver:$ESPRESSO_MARKETPLACE_SOLVER_API_PORT depends_on: diff --git a/justfile b/justfile index 5d7521a925..da0955a597 100644 --- a/justfile +++ b/justfile @@ -10,13 +10,21 @@ demo *args: demo-native *args: build scripts/demo-native {{args}} -build: +lint: + #!/usr/bin/env bash + set -euxo pipefail + # Use the same target dir for both `clippy` invocations + export CARGO_TARGET_DIR=${CARGO_TARGET_DIR:-target} + cargo clippy --workspace --features testing --all-targets -- -D warnings + cargo clippy --workspace --all-targets --manifest-path sequencer-sqlite/Cargo.toml -- -D warnings + +build profile="test": #!/usr/bin/env bash set -euxo pipefail # Use the same target dir for both `build` invocations export CARGO_TARGET_DIR=${CARGO_TARGET_DIR:-target} - cargo build --profile test - cargo build --profile test --manifest-path ./sequencer-sqlite/Cargo.toml + cargo build --profile {{profile}} + cargo build --profile {{profile}} --manifest-path ./sequencer-sqlite/Cargo.toml demo-native-mp *args: build scripts/demo-native -f process-compose.yaml -f process-compose-mp.yml {{args}} @@ -93,7 +101,7 @@ build-docker-images: scripts/build-docker-images-native # generate rust bindings for contracts -REGEXP := "^LightClient$|^LightClientStateUpdateVK$|^FeeContract$|PlonkVerifier$|^ERC1967Proxy$|^LightClientMock$|^LightClientStateUpdateVKMock$|^PlonkVerifier2$" +REGEXP := "^LightClient$|^LightClientStateUpdateVK$|^FeeContract$|PlonkVerifier$|^ERC1967Proxy$|^LightClientMock$|^LightClientStateUpdateVKMock$|^PlonkVerifier2$|^PermissionedStakeTable$" gen-bindings: forge bind --contracts ./contracts/src/ --crate-name contract-bindings --bindings-path contract-bindings --select "{{REGEXP}}" --overwrite --force diff --git a/marketplace-builder/src/builder.rs b/marketplace-builder/src/builder.rs index 46a2005f59..f0daf5e318 100644 --- a/marketplace-builder/src/builder.rs +++ b/marketplace-builder/src/builder.rs @@ -30,6 +30,7 @@ use hotshot_types::{ data::{fake_commitment, Leaf, ViewNumber}, traits::{ block_contents::{vid_commitment, Transaction as _, GENESIS_VID_NUM_STORAGE_NODES}, + metrics::NoMetrics, node_implementation::{ConsensusTime, NodeType, Versions}, EncodeBytes, }, @@ -74,6 +75,7 @@ pub async fn build_instance_state( Arc::new(StatePeers::::from_urls( state_peers, Default::default(), + &NoMetrics, )), V::Base::version(), ); diff --git a/marketplace-solver/src/state.rs b/marketplace-solver/src/state.rs index efd06fe690..697a273539 100644 --- a/marketplace-solver/src/state.rs +++ b/marketplace-solver/src/state.rs @@ -203,7 +203,7 @@ impl UpdateSolverState for GlobalState { registration.body.active = active; } - // The given signature key should also be from the database `signature_keys`.` + // The given signature key should also be from the database `signature_keys`. if !registration.body.signature_keys.contains(&signature_key) { return Err(SolverError::SignatureKeysMismatch( signature_key.to_string(), diff --git a/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs b/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs index c052601fe7..eb05938302 100644 --- a/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs +++ b/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs @@ -158,7 +158,7 @@ impl HotShotEventProcessingTask { let public_api_url = roll_call_info.public_api_url; - // Send the the discovered public url to the sink + // Send the discovered public url to the sink let send_result = url_sender.send(public_api_url).await; if let Err(err) = send_result { tracing::error!("url sender closed: {}", err); diff --git a/node-metrics/src/api/node_validator/v0/mod.rs b/node-metrics/src/api/node_validator/v0/mod.rs index d0a08ebaa6..90d1867474 100644 --- a/node-metrics/src/api/node_validator/v0/mod.rs +++ b/node-metrics/src/api/node_validator/v0/mod.rs @@ -452,7 +452,7 @@ impl HotshotQueryServiceLeafStreamRetriever { /// will use the given base [Url] to be able to retrieve the stream of /// [Leaf]s from the Hotshot Query Service. /// - /// The [Url] is expected to point to the the API version root of the + /// The [Url] is expected to point to the API version root of the /// Hotshot Query Service. Example: /// https://example.com/v0 pub fn new(base_url: Url) -> Self { diff --git a/node-metrics/src/service/client_state/mod.rs b/node-metrics/src/service/client_state/mod.rs index 2c66e44837..ac4c0fea46 100644 --- a/node-metrics/src/service/client_state/mod.rs +++ b/node-metrics/src/service/client_state/mod.rs @@ -1003,7 +1003,7 @@ impl ProcessDistributeBlockDetailHandlingTask { } /// [process_distribute_block_detail_handling_stream] is a function that - /// processes the the [Stream] of incoming [BlockDetail] and distributes them + /// processes the [Stream] of incoming [BlockDetail] and distributes them /// to all subscribed clients. async fn process_distribute_block_detail_handling_stream( client_thread_state: Arc>>, @@ -1073,7 +1073,7 @@ impl ProcessDistributeNodeIdentityHandlingTask { } /// [process_distribute_node_identity_handling_stream] is a function that - /// processes the the [Stream] of incoming [NodeIdentity] and distributes them + /// processes the [Stream] of incoming [NodeIdentity] and distributes them /// to all subscribed clients. async fn process_distribute_node_identity_handling_stream( client_thread_state: Arc>>, @@ -1143,7 +1143,7 @@ impl ProcessDistributeVotersHandlingTask { } /// [process_distribute_voters_handling_stream] is a function that processes - /// the the [Stream] of incoming [BitVec] and distributes them to all + /// the [Stream] of incoming [BitVec] and distributes them to all /// subscribed clients. async fn process_distribute_voters_handling_stream( client_thread_state: Arc>>, diff --git a/node-metrics/src/service/data_state/mod.rs b/node-metrics/src/service/data_state/mod.rs index 5aeea99070..96599607a6 100644 --- a/node-metrics/src/service/data_state/mod.rs +++ b/node-metrics/src/service/data_state/mod.rs @@ -247,7 +247,7 @@ where // Where's the stake table? let signatures = signatures.as_ref(); - // Let's determine the the participants of the voter participants + // Let's determine the participants of the voter participants // in the Quorum Certificate. // We shouldn't ever have a BitVec that is empty, with the possible diff --git a/process-compose.yaml b/process-compose.yaml index ba66f74176..5ce3c552c7 100644 --- a/process-compose.yaml +++ b/process-compose.yaml @@ -7,6 +7,7 @@ environment: - ESPRESSO_SEQUENCER_L1_PROVIDER=http://localhost:$ESPRESSO_SEQUENCER_L1_PORT - ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo.toml - ESPRESSO_BUILDER_GENESIS_FILE=data/genesis/demo.toml + - ESPRESSO_SEQUENCER_INITIAL_PERMISSIONED_STAKE_TABLE_PATH=data/initial_stake_table.toml - ESPRESSO_STATE_RELAY_SERVER_URL=http://localhost:$ESPRESSO_STATE_RELAY_SERVER_PORT - QUERY_SERVICE_URI=http://localhost:$ESPRESSO_SEQUENCER1_API_PORT/v0/ - NODE_VALIDATOR_URI=ws://localhost:$ESPRESSO_NODE_VALIDATOR_PORT/v0/ @@ -18,18 +19,16 @@ processes: readiness_probe: exec: command: "[ $(docker inspect -f '{{.State.Health.Status}}' espresso-sequencer-demo-l1-network-1) = 'healthy' ]" - initial_delay_seconds: 5 - period_seconds: 6 + period_seconds: 1 timeout_seconds: 5 - success_threshold: 1 - failure_threshold: 20 + failure_threshold: 30 deploy-sequencer-contracts: # The contract addresses are implicitly inherited from .env. We need to unset these or else the # script will think they're already deployed. command: unset ESPRESSO_SEQUENCER_HOTSHOT_ADDRESS ESPRESSO_SEQUENCER_LIGHT_CLIENT_PROXY_ADDRESS && deploy --only - fee-contract + fee-contract,permissioned-stake-table namespace: setup depends_on: demo-l1-network: @@ -56,6 +55,7 @@ processes: namespace: setup environment: - L1_PROVIDER=http://localhost:$ESPRESSO_SEQUENCER_L1_PORT + - L1_POLLING_INTERVAL=$ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_PROVIDER=http://localhost:$ESPRESSO_SEQUENCER1_API_PORT - CONTRACT_ADDRESS=0xa15bb66138824a1c7167f5e85b957d04dd34e468 - MNEMONIC=$ESPRESSO_BUILDER_ETH_MNEMONIC @@ -89,6 +89,8 @@ processes: state-relay-server: command: state-relay-server readiness_probe: + failure_threshold: 10 + period_seconds: 1 http_get: scheme: http host: localhost @@ -163,6 +165,7 @@ processes: host: localhost port: $ESPRESSO_SEQUENCER_API_PORT path: /healthcheck + period_seconds: 1 failure_threshold: 100 sequencer1: @@ -215,6 +218,7 @@ processes: host: localhost port: $ESPRESSO_SEQUENCER1_API_PORT path: /healthcheck + period_seconds: 1 failure_threshold: 100 availability: restart: exit_on_failure @@ -262,6 +266,7 @@ processes: port: $ESPRESSO_SEQUENCER2_API_PORT path: /healthcheck failure_threshold: 100 + period_seconds: 1 availability: exit_on_skipped: true restart: exit_on_failure @@ -310,6 +315,7 @@ processes: port: $ESPRESSO_SEQUENCER3_API_PORT path: /healthcheck failure_threshold: 100 + period_seconds: 1 availability: exit_on_skipped: true restart: exit_on_failure @@ -356,6 +362,7 @@ processes: port: $ESPRESSO_SEQUENCER4_API_PORT path: /healthcheck failure_threshold: 100 + period_seconds: 1 availability: exit_on_skipped: true restart: exit_on_failure @@ -387,9 +394,9 @@ processes: host: localhost port: $ESPRESSO_SEQUENCER1_API_PORT path: /healthcheck + period_seconds: 1 failure_threshold: 100 - # We use KeyDB (a Redis variant) to maintain consistency between # different parts of the CDN # Cheating a bit here too, but KeyDB is not available as a Nix package. @@ -416,6 +423,7 @@ processes: host: 127.0.0.1 port: 9093 path: /metrics + period_seconds: 1 failure_threshold: 100 # A broker is the main message-routing unit of the CDN @@ -430,6 +438,7 @@ processes: host: 127.0.0.1 port: 9091 path: /metrics + period_seconds: 1 failure_threshold: 100 # A broker is the main message-routing unit of the CDN @@ -448,6 +457,7 @@ processes: host: 127.0.0.1 port: 9092 path: /metrics + period_seconds: 1 failure_threshold: 100 cdn-whitelist: @@ -477,6 +487,7 @@ processes: port: $ESPRESSO_SUBMIT_TRANSACTIONS_PUBLIC_PORT path: /healthcheck failure_threshold: 100 + period_seconds: 1 availability: exit_on_skipped: true @@ -497,6 +508,7 @@ processes: port: $ESPRESSO_SUBMIT_TRANSACTIONS_PRIVATE_PORT path: /healthcheck failure_threshold: 100 + period_seconds: 1 availability: exit_on_skipped: true @@ -520,6 +532,7 @@ processes: port: $ESPRESSO_BUILDER_SERVER_PORT path: /healthcheck failure_threshold: 100 + period_seconds: 1 availability: restart: "exit_on_failure" @@ -560,6 +573,7 @@ processes: host: localhost port: $ESPRESSO_MARKETPLACE_SOLVER_API_PORT path: /healthcheck + period_seconds: 1 failure_threshold: 100 sequencer-db-0: @@ -623,4 +637,3 @@ processes: depends_on: sequencer1: condition: process_healthy - diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index ad03aad196..0779e290f8 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "Inflector" @@ -2752,15 +2752,17 @@ dependencies = [ "committable", "contract-bindings", "derive_more 1.0.0", - "dyn-clone", + "diff-test-bn254", "ethers", "fluent-asserter", "futures", "hotshot", + "hotshot-contract-adapter", "hotshot-query-service", "hotshot-types", "itertools 0.12.1", "jf-merkle-tree", + "jf-signature 0.2.0", "jf-utils", "jf-vid", "lru 0.12.5", @@ -3960,11 +3962,14 @@ version = "0.1.0" dependencies = [ "anyhow", "ark-bn254", + "ark-ec", + "ark-ed-on-bn254", "ark-ff", "ark-poly", "ark-serialize", "ark-std", "contract-bindings", + "derive_more 1.0.0", "diff-test-bn254", "ethers", "hotshot-types", @@ -3974,6 +3979,7 @@ dependencies = [ "libp2p", "num-bigint", "num-traits", + "serde", ] [[package]] @@ -4083,7 +4089,7 @@ dependencies = [ [[package]] name = "hotshot-query-service" version = "0.1.75" -source = "git+https://github.com/EspressoSystems/hotshot-query-service?branch=hotshot/0.5.82#5e2c984d19da3826f4cc8d80c5cf1a84dcd377f7" +source = "git+https://github.com/EspressoSystems/hotshot-query-service?tag=v0.1.75#dffefa160f441a663723a67bc54efedb11a88b02" dependencies = [ "anyhow", "ark-serialize", @@ -7139,6 +7145,17 @@ dependencies = [ "uint", ] +[[package]] +name = "priority-queue" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "714c75db297bc88a63783ffc6ab9f830698a6705aa0201416931759ef4c8183d" +dependencies = [ + "autocfg", + "equivalent", + "indexmap 2.7.0", +] + [[package]] name = "proc-macro-crate" version = "3.2.0" @@ -8319,7 +8336,7 @@ dependencies = [ "anyhow", "ark-ff", "ark-serialize", - "async-broadcast", + "async-channel 2.3.1", "async-lock 3.4.0", "async-once-cell", "async-trait", @@ -8334,7 +8351,6 @@ dependencies = [ "derivative", "derive_more 1.0.0", "dotenvy", - "dyn-clone", "espresso-types", "ethers", "futures", @@ -8359,6 +8375,7 @@ dependencies = [ "num_enum", "parking_lot", "portpicker", + "priority-queue", "rand 0.8.5", "rand_chacha 0.3.1", "rand_distr", @@ -8409,6 +8426,7 @@ dependencies = [ "futures", "hotshot", "hotshot-contract-adapter", + "hotshot-types", "log-panics", "portpicker", "reqwest 0.11.27", @@ -8417,6 +8435,7 @@ dependencies = [ "surf", "tempfile", "tokio", + "toml 0.8.19", "tracing", "url", ] diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index 5fb19d8f63..d05d749208 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -43,7 +43,7 @@ vergen = { workspace = true } anyhow = { workspace = true } ark-ff = { workspace = true } ark-serialize = { workspace = true, features = ["derive"] } -async-broadcast = { workspace = true } +async-channel = { workspace = true } async-lock = { workspace = true } async-once-cell = { workspace = true } async-trait = { workspace = true } @@ -62,7 +62,6 @@ csv = "1" derivative = "2.2" derive_more = { workspace = true } dotenvy = { workspace = true } -dyn-clone = { workspace = true } espresso-types = { path = "../types" } ethers = { workspace = true } futures = { workspace = true } @@ -95,6 +94,7 @@ marketplace-solver = { path = "../marketplace-solver" } num_enum = "0.7" parking_lot = "0.12" portpicker = { workspace = true } +priority-queue = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } rand_distr = { workspace = true } diff --git a/sequencer/api/migrations/postgres/V401__archive_provider.sql b/sequencer/api/migrations/postgres/V401__archive_provider.sql new file mode 100644 index 0000000000..2c733d5b96 --- /dev/null +++ b/sequencer/api/migrations/postgres/V401__archive_provider.sql @@ -0,0 +1,21 @@ +-- Add information needed for consensus storage to act as a provider for archive recovery. + +-- Add payload hash to DA proposal, since the query service requests missing payloads by hash. +ALTER TABLE da_proposal + ADD COLUMN payload_hash VARCHAR; +CREATE INDEX da_proposal_payload_hash_idx ON da_proposal (payload_hash); + +-- Add payload hash to VID share, since the query service requests missing VID common by payload +-- hash. +ALTER TABLE vid_share + ADD COLUMN payload_hash VARCHAR; +CREATE INDEX vid_share_payload_hash_idx ON vid_share (payload_hash); + +-- Add QC storage, since the query service requires missing leaves to be fetched alongside a QC with +-- that leaf hash. +CREATE TABLE quorum_certificate ( + view BIGINT PRIMARY KEY, + leaf_hash VARCHAR NOT NULL, + data BYTEA NOT NULL +); +CREATE INDEX quorum_certificate_leaf_hash_idx ON quorum_certificate (leaf_hash); diff --git a/sequencer/api/migrations/sqlite/V201__archive_provider.sql b/sequencer/api/migrations/sqlite/V201__archive_provider.sql new file mode 100644 index 0000000000..1c1779d320 --- /dev/null +++ b/sequencer/api/migrations/sqlite/V201__archive_provider.sql @@ -0,0 +1,21 @@ +-- Add information needed for consensus storage to act as a provider for archive recovery. + +-- Add payload hash to DA proposal, since the query service requests missing payloads by hash. +ALTER TABLE da_proposal + ADD COLUMN payload_hash VARCHAR; +CREATE INDEX da_proposal_payload_hash_idx ON da_proposal (payload_hash); + +-- Add payload hash to VID share, since the query service requests missing VID common by payload +-- hash. +ALTER TABLE vid_share + ADD COLUMN payload_hash VARCHAR; +CREATE INDEX vid_share_payload_hash_idx ON vid_share (payload_hash); + +-- Add QC storage, since the query service requires missing leaves to be fetched alongside a QC with +-- that leaf hash. +CREATE TABLE quorum_certificate ( + view BIGINT PRIMARY KEY, + leaf_hash VARCHAR NOT NULL, + data BLOB NOT NULL +); +CREATE INDEX quorum_certificate_leaf_hash_idx ON quorum_certificate (leaf_hash); diff --git a/sequencer/api/public-env-vars.toml b/sequencer/api/public-env-vars.toml index 9c9d203467..c58af6c35c 100644 --- a/sequencer/api/public-env-vars.toml +++ b/sequencer/api/public-env-vars.toml @@ -60,6 +60,9 @@ variables = [ "ESPRESSO_SEQUENCER_CATCHUP_MAX_RETRY_DELAY", "ESPRESSO_SEQUENCER_CDN_ENDPOINT", "ESPRESSO_SEQUENCER_CHUNK_FETCH_DELAY", + "ESPRESSO_SEQUENCER_CONSENSUS_STORAGE_MINIMUM_RETENTION", + "ESPRESSO_SEQUENCER_CONSENSUS_STORAGE_TARGET_RETENTION", + "ESPRESSO_SEQUENCER_CONSENSUS_STORAGE_TARGET_USAGE", "ESPRESSO_SEQUENCER_FETCH_RATE_LIMIT", "ESPRESSO_SEQUENCER_HOTSHOT_ADDRESS", "ESPRESSO_SEQUENCER_HOTSHOT_EVENT_STREAMING_API_PORT", diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 12f8191e2a..21b1fe343a 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1066,10 +1066,15 @@ mod api_tests { }; use hotshot_types::drb::{INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}; use hotshot_types::{ - data::QuorumProposal2, event::LeafInfo, simple_certificate::QuorumCertificate, - traits::node_implementation::ConsensusTime, + data::{DaProposal, QuorumProposal2, VidDisperseShare}, + event::LeafInfo, + message::Proposal, + simple_certificate::QuorumCertificate, + traits::{node_implementation::ConsensusTime, signature_key::SignatureKey, EncodeBytes}, + vid::vid_scheme, }; + use jf_vid::VidScheme; use portpicker::pick_unused_port; use sequencer_utils::test_utils::setup_test; use std::fmt::Debug; @@ -1226,6 +1231,7 @@ mod api_tests { } setup_test(); + let (pubkey, privkey) = PubKey::generated_from_seed_indexed([0; 32], 1); let storage = D::create_storage().await; let persistence = D::persistence_options(&storage).create().await.unwrap(); @@ -1240,11 +1246,13 @@ mod api_tests { // Create two non-consecutive leaf chains. let mut chain1 = vec![]; + let genesis = Leaf::genesis(&Default::default(), &NodeState::mock()).await; + let payload = genesis.block_payload().unwrap(); + let payload_bytes_arc = payload.encode(); + let disperse = vid_scheme(2).disperse(payload_bytes_arc.clone()).unwrap(); + let payload_commitment = disperse.commit; let mut quorum_proposal = QuorumProposal2:: { - block_header: Leaf::genesis(&Default::default(), &NodeState::mock()) - .await - .block_header() - .clone(), + block_header: genesis.block_header().clone(), view_number: ViewNumber::genesis(), justify_qc: QuorumCertificate::genesis::( &ValidatedState::default(), @@ -1274,6 +1282,50 @@ mod api_tests { qc.data.leaf_commit = Committable::commit(&leaf); justify_qc = qc.clone(); chain1.push((leaf.clone(), qc.clone())); + + // Include a quorum proposal for each leaf. + let quorum_proposal_signature = + PubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) + .expect("Failed to sign quorum_proposal"); + persistence + .append_quorum_proposal(&Proposal { + data: quorum_proposal.clone(), + signature: quorum_proposal_signature, + _pd: Default::default(), + }) + .await + .unwrap(); + + // Include VID information for each leaf. + let share = VidDisperseShare:: { + view_number: leaf.view_number(), + payload_commitment, + share: disperse.shares[0].clone(), + common: disperse.common.clone(), + recipient_key: pubkey, + }; + persistence + .append_vid(&share.to_proposal(&privkey).unwrap()) + .await + .unwrap(); + + // Include payload information for each leaf. + let block_payload_signature = + PubKey::sign(&privkey, &payload_bytes_arc).expect("Failed to sign block payload"); + let da_proposal_inner = DaProposal:: { + encoded_transactions: payload_bytes_arc.clone(), + metadata: payload.ns_table().clone(), + view_number: leaf.view_number(), + }; + let da_proposal = Proposal { + data: da_proposal_inner, + signature: block_payload_signature, + _pd: Default::default(), + }; + persistence + .append_da(&da_proposal, payload_commitment) + .await + .unwrap(); } // Split into two chains. let mut chain2 = chain1.split_off(2); @@ -1312,7 +1364,8 @@ mod api_tests { .await .unwrap(); - // Check that the leaves were moved to archive storage. + // Check that the leaves were moved to archive storage, along with payload and VID + // information. for (leaf, qc) in chain1.iter().chain(&chain2) { tracing::info!(height = leaf.height(), "check archive"); let qd = data_source.get_leaf(leaf.height() as usize).await.await; @@ -1320,7 +1373,128 @@ mod api_tests { let stored_qc = qd.qc().clone().to_qc2(); assert_eq!(&stored_leaf, leaf); assert_eq!(&stored_qc, qc); + + data_source + .get_block(leaf.height() as usize) + .await + .try_resolve() + .ok() + .unwrap(); + data_source + .get_vid_common(leaf.height() as usize) + .await + .try_resolve() + .ok() + .unwrap(); + + // Check that all data has been garbage collected for the decided views. + assert!(persistence + .load_da_proposal(leaf.view_number()) + .await + .unwrap() + .is_none()); + assert!(persistence + .load_vid_share(leaf.view_number()) + .await + .unwrap() + .is_none()); + assert!(persistence + .load_quorum_proposal(leaf.view_number()) + .await + .is_err()); } + + // Check that data has _not_ been garbage collected for the missing view. + assert!(persistence + .load_da_proposal(ViewNumber::new(2)) + .await + .unwrap() + .is_some()); + assert!(persistence + .load_vid_share(ViewNumber::new(2)) + .await + .unwrap() + .is_some()); + persistence + .load_quorum_proposal(ViewNumber::new(2)) + .await + .unwrap(); + } + + #[tokio::test(flavor = "multi_thread")] + pub async fn test_decide_missing_data() + where + D: TestableSequencerDataSource + Debug + 'static, + { + setup_test(); + + let storage = D::create_storage().await; + let persistence = D::persistence_options(&storage).create().await.unwrap(); + let data_source: Arc> = + Arc::new(StorageState::new( + D::create(D::persistence_options(&storage), Default::default(), false) + .await + .unwrap(), + ApiState::new(future::pending()), + )); + let consumer = ApiEventConsumer::from(data_source.clone()); + + let mut qc = QuorumCertificate::genesis::( + &ValidatedState::default(), + &NodeState::mock(), + ) + .await + .to_qc2(); + let leaf = Leaf::genesis(&ValidatedState::default(), &NodeState::mock()).await; + + // Append the genesis leaf. We don't use this for the test, because the update function will + // automatically fill in the missing data for genesis. We just append this to get into a + // consistent state to then append the leaf from view 1, which will have missing data. + tracing::info!(?leaf, ?qc, "decide genesis leaf"); + persistence + .append_decided_leaves( + leaf.view_number(), + [(&leaf_info(leaf.clone().into()), qc.clone())], + &consumer, + ) + .await + .unwrap(); + + // Create another leaf, with missing data. + let mut block_header = leaf.block_header().clone(); + *block_header.height_mut() += 1; + let qp = QuorumProposal2 { + block_header, + view_number: leaf.view_number() + 1, + justify_qc: qc.clone(), + upgrade_certificate: None, + view_change_evidence: None, + drb_seed: INITIAL_DRB_SEED_INPUT, + drb_result: INITIAL_DRB_RESULT, + }; + + let leaf = Leaf2::from_quorum_proposal(&qp); + qc.view_number = leaf.view_number(); + qc.data.leaf_commit = Committable::commit(&leaf); + + // Decide a leaf without the corresponding payload or VID. + tracing::info!(?leaf, ?qc, "append leaf 1"); + persistence + .append_decided_leaves( + leaf.view_number(), + [(&leaf_info(leaf.clone()), qc)], + &consumer, + ) + .await + .unwrap(); + + // Check that we still processed the leaf. + assert_eq!( + leaf, + data_source.get_leaf(1).await.await.leaf().clone().into() + ); + assert!(data_source.get_vid_common(1).await.is_pending()); + assert!(data_source.get_block(1).await.is_pending()); } fn leaf_info(leaf: Leaf2) -> LeafInfo { @@ -1527,6 +1701,7 @@ mod test { StatePeers::>::from_urls( vec![format!("http://localhost:{port}").parse().unwrap()], Default::default(), + &NoMetrics, ) })) .build(); @@ -1571,6 +1746,7 @@ mod test { StatePeers::>::from_urls( vec![format!("http://localhost:{port}").parse().unwrap()], Default::default(), + &NoMetrics, ), &NoMetrics, test_helpers::STAKE_TABLE_CAPACITY_FOR_TEST, @@ -1636,6 +1812,7 @@ mod test { StatePeers::>::from_urls( vec![format!("http://localhost:{port}").parse().unwrap()], Default::default(), + &NoMetrics, ) })) .network_config(TestConfigBuilder::default().l1_url(l1).build()) @@ -1713,6 +1890,7 @@ mod test { StatePeers::>::from_urls( vec![format!("http://localhost:{port}").parse().unwrap()], Default::default(), + &NoMetrics, ) })) .network_config(TestConfigBuilder::default().l1_url(l1).build()) @@ -1773,6 +1951,7 @@ mod test { StatePeers::::from_urls( vec![format!("http://localhost:{port}").parse().unwrap()], BackoffParams::default(), + &NoMetrics, ) }); @@ -1780,6 +1959,7 @@ mod test { peers[2] = StatePeers::::from_urls( vec![url.clone()], BackoffParams::default(), + &NoMetrics, ); let config = TestNetworkConfigBuilder::::with_num_nodes() @@ -1801,13 +1981,16 @@ mod test { // The catchup should successfully retrieve the correct chain config. let node = &network.peers[0]; let peers = node.node_state().peers; - peers.try_fetch_chain_config(cf.commit()).await.unwrap(); + peers.try_fetch_chain_config(0, cf.commit()).await.unwrap(); // Test a catchup request for node #1, which is connected to a dishonest peer. // This request will result in an error due to the malicious chain config provided by the peer. let node = &network.peers[1]; let peers = node.node_state().peers; - peers.try_fetch_chain_config(cf.commit()).await.unwrap_err(); + peers + .try_fetch_chain_config(0, cf.commit()) + .await + .unwrap_err(); network.server.shut_down().await; handle.abort(); @@ -1963,6 +2146,7 @@ mod test { StatePeers::::from_urls( vec![format!("http://localhost:{port}").parse().unwrap()], Default::default(), + &NoMetrics, ) })) .network_config( @@ -2136,6 +2320,7 @@ mod test { StatePeers::>::from_urls( vec![format!("http://localhost:{port}").parse().unwrap()], Default::default(), + &NoMetrics, ) })) .network_config(TestConfigBuilder::default().l1_url(l1).build()) @@ -2200,6 +2385,7 @@ mod test { let peers = StatePeers::>::from_urls( vec!["https://notarealnode.network".parse().unwrap(), url], Default::default(), + &NoMetrics, ); // Fetch the config from node 1, a different node than the one running the service. diff --git a/sequencer/src/api/options.rs b/sequencer/src/api/options.rs index e640f6ab07..7d7d01284d 100644 --- a/sequencer/src/api/options.rs +++ b/sequencer/src/api/options.rs @@ -3,7 +3,7 @@ use anyhow::{bail, Context}; use clap::Parser; use espresso_types::{ - v0::traits::{EventConsumer, NullEventConsumer, SequencerPersistence}, + v0::traits::{EventConsumer, NullEventConsumer, PersistenceOptions, SequencerPersistence}, BlockMerkleTree, PubKey, }; use futures::{ @@ -13,6 +13,7 @@ use futures::{ use hotshot_events_service::events::Error as EventStreamingError; use hotshot_query_service::{ data_source::{ExtensibleDataSource, MetricsDataSource}, + fetching::provider::QueryServiceProvider, status::{self, UpdateStatusData}, ApiState as AppState, Error, }; @@ -27,7 +28,7 @@ use vbs::version::StaticVersionType; use super::{ data_source::{ - provider, CatchupDataSource, HotShotConfigDataSource, NodeStateDataSource, + provider, CatchupDataSource, HotShotConfigDataSource, NodeStateDataSource, Provider, SequencerDataSource, StateSignatureDataSource, SubmitDataSource, }, endpoints, fs, sql, @@ -333,12 +334,18 @@ impl Options { N: ConnectedNetwork, P: SequencerPersistence, { - let ds = sql::DataSource::create( - mod_opt.clone(), - provider::(query_opt.peers.clone(), bind_version), - false, - ) - .await?; + let mut provider = Provider::default(); + + // Use the database itself as a fetching provider: sometimes we can fetch data that is + // missing from the query service from ephemeral consensus storage. + provider = provider.with_provider(mod_opt.clone().create().await?); + // If that fails, fetch missing data from peers. + for peer in query_opt.peers { + tracing::info!("will fetch missing data from {peer}"); + provider = provider.with_provider(QueryServiceProvider::new(peer, bind_version)); + } + + let ds = sql::DataSource::create(mod_opt.clone(), provider, false).await?; let (metrics, ds, mut app) = self .init_app_modules(ds, state.clone(), bind_version) .await?; diff --git a/sequencer/src/api/sql.rs b/sequencer/src/api/sql.rs index f08a55a790..c265f89743 100644 --- a/sequencer/src/api/sql.rs +++ b/sequencer/src/api/sql.rs @@ -53,7 +53,7 @@ impl SequencerDataSource for DataSource { let fetch_limit = opt.fetch_rate_limit; let active_fetch_delay = opt.active_fetch_delay; let chunk_fetch_delay = opt.chunk_fetch_delay; - let mut cfg = Config::try_from(opt)?; + let mut cfg = Config::try_from(&opt)?; if reset { cfg = cfg.reset_schema(); @@ -471,7 +471,9 @@ mod impl_testable_data_source { #[cfg(feature = "embedded-db")] { - let opt = crate::persistence::sql::SqliteOptions { path: db.path() }; + let opt = crate::persistence::sql::SqliteOptions { + path: Some(db.path()), + }; opt.into() } } diff --git a/sequencer/src/bin/deploy.rs b/sequencer/src/bin/deploy.rs index 6a87bfc9c9..afd6e96f7b 100644 --- a/sequencer/src/bin/deploy.rs +++ b/sequencer/src/bin/deploy.rs @@ -1,6 +1,7 @@ -use std::{fs::File, io::stdout, path::PathBuf}; +use std::{fs::File, io::stdout, path::PathBuf, time::Duration}; use clap::Parser; +use espresso_types::parse_duration; use ethers::types::Address; use futures::FutureExt; use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; @@ -8,6 +9,7 @@ use hotshot_state_prover::service::light_client_genesis; use sequencer_utils::{ deployer::{deploy, ContractGroup, Contracts, DeployedContracts}, logging, + stake_table::PermissionedStakeTableConfig, }; use url::Url; @@ -38,6 +40,15 @@ struct Options { )] rpc_url: Url, + /// Request rate when polling L1. + #[clap( + long, + env = "ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL", + default_value = "7s", + value_parser = parse_duration, + )] + pub l1_polling_interval: Duration, + /// URL of a sequencer node that is currently providing the HotShot config. /// This is used to initialize the stake table. #[clap( @@ -111,6 +122,20 @@ struct Options { #[clap(long, env = "ESPRESSO_SEQUENCER_PERMISSIONED_PROVER")] permissioned_prover: Option

, + /// A toml file with the initial stake table. + /// + /// Schema: + /// + /// public_keys = [ + /// { + /// stake_table_key = "BLS_VER_KEY~...", + /// state_ver_key = "SCHNORR_VER_KEY~...", + /// da = true, + /// }, + /// ] + #[clap(long, env = "ESPRESSO_SEQUENCER_INITIAL_PERMISSIONED_STAKE_TABLE_PATH")] + initial_stake_table_path: Option, + #[clap(flatten)] logging: logging::Config, } @@ -126,8 +151,15 @@ async fn main() -> anyhow::Result<()> { let genesis = light_client_genesis(&sequencer_url, opt.stake_table_capacity).boxed(); + let initial_stake_table = if let Some(path) = opt.initial_stake_table_path { + Some(PermissionedStakeTableConfig::from_toml_file(&path)?.into()) + } else { + None + }; + let contracts = deploy( opt.rpc_url, + opt.l1_polling_interval, opt.mnemonic, opt.account_index, opt.multisig_address, @@ -136,6 +168,7 @@ async fn main() -> anyhow::Result<()> { genesis, opt.permissioned_prover, contracts, + initial_stake_table, ) .await?; diff --git a/sequencer/src/bin/espresso-bridge.rs b/sequencer/src/bin/espresso-bridge.rs index 18e8ecfa40..2ae2c02632 100644 --- a/sequencer/src/bin/espresso-bridge.rs +++ b/sequencer/src/bin/espresso-bridge.rs @@ -2,7 +2,7 @@ use anyhow::{bail, ensure, Context}; use clap::{Parser, Subcommand}; use client::SequencerClient; use contract_bindings::fee_contract::FeeContract; -use espresso_types::{eth_signature_key::EthKeyPair, Header}; +use espresso_types::{eth_signature_key::EthKeyPair, parse_duration, Header}; use ethers::{ middleware::{Middleware, SignerMiddleware}, providers::Provider, @@ -10,7 +10,7 @@ use ethers::{ }; use futures::stream::StreamExt; use sequencer_utils::logging; -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use surf_disco::Url; /// Command-line utility for working with the Espresso bridge. @@ -37,6 +37,16 @@ struct Deposit { #[clap(short, long, env = "L1_PROVIDER")] rpc_url: Url, + /// Request rate when polling L1. + #[clap( + short, + long, + env = "L1_POLLING_INTERVAL", + default_value = "7s", + value_parser = parse_duration + )] + l1_interval: Duration, + /// Espresso query service provider. /// /// This must point to an Espresso node running the /availability, /node and Merklized state @@ -106,6 +116,16 @@ struct L1Balance { #[clap(short, long, env = "L1_PROVIDER")] rpc_url: Url, + /// Request rate when polling L1. + #[clap( + short, + long, + env = "L1_POLLING_INTERVAL", + default_value = "7s", + value_parser = parse_duration + )] + l1_interval: Duration, + /// Account to check. #[clap(short, long, env = "ADDRESS", required_unless_present = "mnemonic")] address: Option
, @@ -134,7 +154,7 @@ async fn deposit(opt: Deposit) -> anyhow::Result<()> { let key_pair = EthKeyPair::from_mnemonic(opt.mnemonic, opt.account_index)?; // Connect to L1. - let rpc = Provider::try_from(opt.rpc_url.to_string())?; + let rpc = Provider::try_from(opt.rpc_url.to_string())?.interval(opt.l1_interval); let signer = key_pair.signer(); let l1 = Arc::new(SignerMiddleware::new_with_provider_chain(rpc, signer).await?); let contract = FeeContract::new(opt.contract_address, l1.clone()); @@ -198,7 +218,7 @@ async fn deposit(opt: Deposit) -> anyhow::Result<()> { let Some(l1_finalized) = header.l1_finalized() else { continue; }; - if l1_finalized.number >= l1_block { + if l1_finalized.number() >= l1_block { tracing::info!(block = header.height(), "deposit finalized on Espresso"); break header.height(); } else { @@ -258,7 +278,7 @@ async fn l1_balance(opt: L1Balance) -> anyhow::Result<()> { bail!("address or mnemonic must be provided"); }; - let l1 = Provider::try_from(opt.rpc_url.to_string())?; + let l1 = Provider::try_from(opt.rpc_url.to_string())?.interval(opt.l1_interval); let block = opt.block.map(BlockId::from); tracing::debug!(%address, ?block, "fetching L1 balance"); diff --git a/sequencer/src/bin/espresso-dev-node.rs b/sequencer/src/bin/espresso-dev-node.rs index b7cd9b7fe8..dcd4adb588 100644 --- a/sequencer/src/bin/espresso-dev-node.rs +++ b/sequencer/src/bin/espresso-dev-node.rs @@ -42,6 +42,17 @@ struct Args { /// If this is not provided, an Avil node will be launched automatically. #[clap(short, long, env = "ESPRESSO_SEQUENCER_L1_PROVIDER")] rpc_url: Option, + + /// Request rate when polling L1. + #[clap( + short, + long, + env = "ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL", + default_value = "7s", + value_parser = parse_duration + )] + l1_interval: Duration, + /// Mnemonic for an L1 wallet. /// /// This wallet is used to deploy the contracts, @@ -165,6 +176,7 @@ async fn main() -> anyhow::Result<()> { retry_interval, alt_prover_retry_intervals, alt_prover_update_intervals, + l1_interval, } = cli_params; logging.init(); @@ -262,6 +274,7 @@ async fn main() -> anyhow::Result<()> { let contracts = deploy( url.clone(), + l1_interval, mnemonic.clone(), account_index, multisig_address, @@ -270,10 +283,13 @@ async fn main() -> anyhow::Result<()> { async { Ok(lc_genesis.clone()) }.boxed(), None, contracts.clone(), + None, // initial stake table ) .await?; - let provider = Provider::::try_from(url.as_str()).unwrap(); + let provider = Provider::::try_from(url.as_str()) + .unwrap() + .interval(l1_interval); let chain_id = provider.get_chainid().await.unwrap().as_u64(); let wallet = MnemonicBuilder::::default() diff --git a/sequencer/src/catchup.rs b/sequencer/src/catchup.rs index ccfe74322c..8c8ca7a667 100644 --- a/sequencer/src/catchup.rs +++ b/sequencer/src/catchup.rs @@ -1,6 +1,7 @@ use std::sync::Arc; -use anyhow::{bail, Context}; +use anyhow::{anyhow, bail, ensure, Context}; +use async_lock::RwLock; use async_trait::async_trait; use committable::Commitment; use committable::Committable; @@ -9,17 +10,24 @@ use espresso_types::{ v0::traits::StateCatchup, v0_99::ChainConfig, BackoffParams, BlockMerkleTree, FeeAccount, FeeAccountProof, FeeMerkleCommitment, FeeMerkleTree, Leaf2, NodeState, }; -use futures::future::{Future, FutureExt}; +use futures::future::{Future, FutureExt, TryFuture, TryFutureExt}; use hotshot_types::{ - data::ViewNumber, network::NetworkConfig, traits::node_implementation::ConsensusTime as _, + data::ViewNumber, + network::NetworkConfig, + traits::{ + metrics::{Counter, CounterFamily, Metrics}, + node_implementation::ConsensusTime as _, + }, ValidatorConfig, }; use itertools::Itertools; use jf_merkle_tree::{prelude::MerkleNode, ForgetableMerkleTreeScheme, MerkleTreeScheme}; +use priority_queue::PriorityQueue; use serde::de::DeserializeOwned; -use std::collections::HashMap; +use std::{cmp::Ordering, collections::HashMap, fmt::Display, time::Duration}; use surf_disco::Request; use tide_disco::error::ServerError; +use tokio::time::timeout; use url::Url; use vbs::version::StaticVersionType; @@ -34,12 +42,20 @@ use crate::{ struct Client { inner: surf_disco::Client, url: Url, + requests: Arc>, + failures: Arc>, } impl Client { - pub fn new(url: Url) -> Self { + pub fn new( + url: Url, + requests: &(impl CounterFamily + ?Sized), + failures: &(impl CounterFamily + ?Sized), + ) -> Self { Self { inner: surf_disco::Client::new(url.clone()), + requests: Arc::new(requests.create(vec![url.to_string()])), + failures: Arc::new(failures.create(vec![url.to_string()])), url, } } @@ -64,49 +80,165 @@ pub(crate) async fn local_and_remote( } } +/// A score of a catchup peer, based on our interactions with that peer. +/// +/// The score accounts for malicious peers -- i.e. peers that gave us an invalid response to a +/// verifiable request -- and faulty/unreliable peers -- those that fail to respond to requests at +/// all. The score has a comparison function where higher is better, or in other words `p1 > p2` +/// means we believe we are more likely to successfully catch up using `p1` than `p2`. This makes it +/// convenient and efficient to collect peers in a priority queue which we can easily convert to a +/// list sorted by reliability. +#[derive(Clone, Copy, Debug, Default)] +struct PeerScore { + requests: usize, + failures: usize, +} + +impl Ord for PeerScore { + fn cmp(&self, other: &Self) -> Ordering { + // Compare failure rates: `self` is better than `other` if + // self.failures / self.requests < other.failures / other.requests + // or equivalently + // other.failures * self.requests > self.failures * other.requests + (other.failures * self.requests).cmp(&(self.failures * other.requests)) + } +} + +impl PartialOrd for PeerScore { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for PeerScore { + fn eq(&self, other: &Self) -> bool { + self.cmp(other).is_eq() + } +} + +impl Eq for PeerScore {} + #[derive(Debug, Clone, Default)] pub struct StatePeers { + // Peer IDs, ordered by reliability score. Each ID is an index into `clients`. + scores: Arc>>, clients: Vec>, backoff: BackoffParams, } impl StatePeers { - pub fn from_urls(urls: Vec, backoff: BackoffParams) -> Self { + async fn fetch( + &self, + retry: usize, + f: impl Fn(Client) -> Fut, + ) -> anyhow::Result + where + Fut: TryFuture, + { + // Since we have generally have multiple peers we can catch up from, we want a fairly + // aggressive timeout for requests: if a peer is not responding quickly, we're better off + // just trying the next one rather than waiting, and this prevents a malicious peer from + // delaying catchup for a long time. + // + // However, if we set the timeout _too_ aggressively, we might fail to catch up even from an + // honest peer, and thus never make progress. Thus, we start with a timeout of 500ms, which + // is aggressive but still very reasonable for an HTTP request. If that fails with all of + // our peers, we increase the timeout by 1 second for each successive retry, until we + // eventually succeed. + let timeout_dur = Duration::from_millis(500) * (retry as u32 + 1); + + // Keep track of which peers we make requests to and which succeed (`true`) or fail (`false`), + // so we can update reliability scores at the end. + let mut requests = HashMap::new(); + let mut res = Err(anyhow!("failed fetching from every peer")); + + // Try each peer in order of reliability score, until we succeed. We clone out of + // `self.scores` because it is small (contains only numeric IDs and scores), so this clone + // is a lot cheaper than holding the read lock the entire time we are making requests (which + // could be a while). + let mut scores = { (*self.scores.read().await).clone() }; + while let Some((id, score)) = scores.pop() { + let client = &self.clients[id]; + tracing::info!("fetching from {}", client.url); + match timeout(timeout_dur, f(client.clone()).into_future()).await { + Ok(Ok(t)) => { + requests.insert(id, true); + res = Ok(t); + break; + } + Ok(Err(err)) => { + tracing::warn!(id, ?score, peer = %client.url, "error from peer: {err:#}"); + requests.insert(id, false); + } + Err(_) => { + tracing::warn!(id, ?score, peer = %client.url, ?timeout_dur, "request timed out"); + requests.insert(id, false); + } + } + } + + // Update client scores. + let mut scores = self.scores.write().await; + for (id, success) in requests { + scores.change_priority_by(&id, |score| { + score.requests += 1; + self.clients[id].requests.add(1); + if !success { + score.failures += 1; + self.clients[id].failures.add(1); + } + }); + } + + res + } + + pub fn from_urls( + urls: Vec, + backoff: BackoffParams, + metrics: &(impl Metrics + ?Sized), + ) -> Self { if urls.is_empty() { panic!("Cannot create StatePeers with no peers"); } + let metrics = metrics.subgroup("catchup".into()); + let requests = metrics.counter_family("requests".into(), vec!["peer".into()]); + let failures = metrics.counter_family("request_failures".into(), vec!["peer".into()]); + + let scores = urls + .iter() + .enumerate() + .map(|(i, _)| (i, PeerScore::default())) + .collect(); + let clients = urls + .into_iter() + .map(|url| Client::new(url, &*requests, &*failures)) + .collect(); + Self { - clients: urls.into_iter().map(Client::new).collect(), + clients, + scores: Arc::new(RwLock::new(scores)), backoff, } } + #[tracing::instrument(skip(self, my_own_validator_config))] pub async fn fetch_config( &self, my_own_validator_config: ValidatorConfig, ) -> anyhow::Result> { self.backoff() - .retry(self, move |provider| { + .retry(self, move |provider, retry| { let my_own_validator_config = my_own_validator_config.clone(); async move { - for client in &provider.clients { - tracing::info!("fetching config from {}", client.url); - match client - .get::("config/hotshot") - .send() - .await - { - Ok(res) => { - return res.into_network_config(my_own_validator_config) - .context(format!("fetched config from {}, but failed to convert to private config", client.url)); - } - Err(err) => { - tracing::warn!("error fetching config from peer: {err:#}"); - } - } - } - bail!("could not fetch config from any peer"); + let cfg = provider + .fetch(retry, |client| { + client.get::("config/hotshot").send() + }) + .await?; + cfg.into_network_config(my_own_validator_config) + .context("fetched config, but failed to convert to private config") } .boxed() }) @@ -119,115 +251,82 @@ impl StateCatchup for StatePeers { #[tracing::instrument(skip(self, _instance))] async fn try_fetch_accounts( &self, + retry: usize, _instance: &NodeState, height: u64, view: ViewNumber, fee_merkle_tree_root: FeeMerkleCommitment, accounts: &[FeeAccount], ) -> anyhow::Result { - for client in self.clients.iter() { - tracing::info!("Fetching accounts from {}", client.url); - let req = match client + self.fetch(retry, |client| async move { + let snapshot = client .inner - .post::(&format!("catchup/{height}/{}/accounts", view.u64(),)) - .body_binary(&accounts.to_vec()) - { - Ok(req) => req, - Err(err) => { - tracing::warn!("failed to construct accounts catchup request: {err:#}"); - continue; - } - }; - let snapshot = match req.send().await { - Ok(res) => res, - Err(err) => { - tracing::info!(peer = %client.url, "error fetching accounts from peer: {err:#}"); - continue; - } - }; + .post::(&format!("catchup/{height}/{}/accounts", view.u64())) + .body_binary(&accounts.to_vec())? + .send() + .await?; // Verify proofs. for account in accounts { - let Some((proof, _)) = FeeAccountProof::prove(&snapshot, (*account).into()) else { - tracing::warn!(peer = %client.url, "response from peer missing account {account}"); - continue; - }; - if let Err(err) = proof.verify(&fee_merkle_tree_root) { - tracing::warn!(peer = %client.url, "peer gave invalid proof for account {account}: {err:#}"); - continue; - } + let (proof, _) = FeeAccountProof::prove(&snapshot, (*account).into()) + .context(format!("response missing account {account}"))?; + proof + .verify(&fee_merkle_tree_root) + .context(format!("invalid proof for accoujnt {account}"))?; } - return Ok(snapshot); - } - bail!("Could not fetch account from any peer"); + anyhow::Ok(snapshot) + }) + .await } #[tracing::instrument(skip(self, _instance, mt))] async fn try_remember_blocks_merkle_tree( &self, + retry: usize, _instance: &NodeState, height: u64, view: ViewNumber, mt: &mut BlockMerkleTree, ) -> anyhow::Result<()> { - for client in self.clients.iter() { - tracing::debug!(peer = %client.url, "fetching frontier from peer"); - match client - .get::(&format!("catchup/{height}/{}/blocks", view.u64())) - .send() - .await - { - Ok(frontier) => { - let Some(elem) = frontier.elem() else { - tracing::warn!(peer = %client.url, "Provided frontier is missing leaf element"); - continue; - }; - match mt.remember(mt.num_leaves() - 1, *elem, &frontier) { - Ok(_) => return Ok(()), - Err(err) => { - tracing::warn!(peer = %client.url, "Error verifying block proof: {err:#}"); - continue; - } - } - } - Err(err) => { - tracing::info!(peer = %client.url, "error fetching blocks from peer: {err:#}"); + *mt = self + .fetch(retry, |client| { + let mut mt = mt.clone(); + async move { + let frontier = client + .get::(&format!("catchup/{height}/{}/blocks", view.u64())) + .send() + .await?; + let elem = frontier + .elem() + .context("provided frontier is missing leaf element")?; + mt.remember(mt.num_leaves() - 1, *elem, &frontier) + .context("verifying block proof")?; + anyhow::Ok(mt) } - } - } - bail!("Could not fetch frontier from any peer"); + }) + .await?; + Ok(()) } async fn try_fetch_chain_config( &self, + retry: usize, commitment: Commitment, ) -> anyhow::Result { - for client in self.clients.iter() { - tracing::info!("Fetching chain config from {}", client.url); - match client + self.fetch(retry, |client| async move { + let cf = client .get::(&format!("catchup/chain-config/{}", commitment)) .send() - .await - { - Ok(cf) => { - if cf.commit() == commitment { - return Ok(cf); - } else { - tracing::error!( - "Received chain config with mismatched commitment from {}: expected {}, got {}", - client.url, - commitment, - cf.commit(), - ); - } - } - Err(err) => { - tracing::warn!("Error fetching chain config from peer: {}", err); - } - } - } - bail!("Could not fetch chain config from any peer"); + .await?; + ensure!( + cf.commit() == commitment, + "received chain config with mismatched commitment: expected {commitment}, got {}", + cf.commit() + ); + Ok(cf) + }) + .await } fn backoff(&self) -> &BackoffParams { @@ -358,9 +457,10 @@ where { // TODO: add a test for the account proof validation // issue # 2102 (https://github.com/EspressoSystems/espresso-sequencer/issues/2102) - #[tracing::instrument(skip(self, instance))] + #[tracing::instrument(skip(self, _retry, instance))] async fn try_fetch_accounts( &self, + _retry: usize, instance: &NodeState, block_height: u64, view: ViewNumber, @@ -374,9 +474,10 @@ where .0) } - #[tracing::instrument(skip(self, instance, mt))] + #[tracing::instrument(skip(self, _retry, instance, mt))] async fn try_remember_blocks_merkle_tree( &self, + _retry: usize, instance: &NodeState, bh: u64, view: ViewNumber, @@ -401,6 +502,7 @@ where async fn try_fetch_chain_config( &self, + _retry: usize, commitment: Commitment, ) -> anyhow::Result { let cf = self.db.get_chain_config(commitment).await?; @@ -461,6 +563,7 @@ impl NullStateCatchup { impl StateCatchup for NullStateCatchup { async fn try_fetch_accounts( &self, + _retry: usize, _instance: &NodeState, _height: u64, _view: ViewNumber, @@ -472,6 +575,7 @@ impl StateCatchup for NullStateCatchup { async fn try_remember_blocks_merkle_tree( &self, + _retry: usize, _instance: &NodeState, _height: u64, _view: ViewNumber, @@ -482,6 +586,7 @@ impl StateCatchup for NullStateCatchup { async fn try_fetch_chain_config( &self, + _retry: usize, commitment: Commitment, ) -> anyhow::Result { self.chain_configs @@ -498,3 +603,25 @@ impl StateCatchup for NullStateCatchup { "NullStateCatchup".into() } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_peer_priority() { + let good_peer = PeerScore { + requests: 1000, + failures: 2, + }; + let bad_peer = PeerScore { + requests: 10, + failures: 1, + }; + assert!(good_peer > bad_peer); + + let mut peers: PriorityQueue<_, _> = [(0, good_peer), (1, bad_peer)].into_iter().collect(); + assert_eq!(peers.pop(), Some((0, good_peer))); + assert_eq!(peers.pop(), Some((1, bad_peer))); + } +} diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 06e27e3b1c..141ff9e4f9 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -1,13 +1,9 @@ use std::{fmt::Display, sync::Arc}; use anyhow::Context; -use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLock; -use clap::Parser; -use committable::Commitment; use derivative::Derivative; use espresso_types::{ - parse_duration, v0::traits::{EventConsumer as PersistenceEventConsumer, SequencerPersistence}, NodeState, PubKey, Transaction, ValidatedState, }; @@ -22,32 +18,27 @@ use hotshot::{ }; use hotshot_events_service::events_source::{EventConsumer, EventsStreamer}; use parking_lot::Mutex; -use tokio::{ - spawn, - task::JoinHandle, - time::{sleep, timeout}, -}; +use tokio::{spawn, task::JoinHandle}; use hotshot_orchestrator::client::OrchestratorClient; use hotshot_types::{ consensus::ConsensusMetricsValue, - data::{EpochNumber, Leaf2, ViewNumber}, + data::{Leaf2, ViewNumber}, network::NetworkConfig, traits::{ metrics::Metrics, network::ConnectedNetwork, - node_implementation::{ConsensusTime, NodeType, Versions}, - ValidatedState as _, + node_implementation::{NodeType, Versions}, }, - utils::{View, ViewInner}, PeerConfig, ValidatorConfig, }; -use std::time::Duration; +use std::fmt::Debug; use tracing::{Instrument, Level}; use url::Url; use crate::{ external_event_handler::{self, ExternalEventHandler}, + proposal_fetcher::ProposalFetcherConfig, state_signature::StateSigner, static_stake_table_commitment, Node, SeqTypes, SequencerApiVersion, }; @@ -55,37 +46,6 @@ use crate::{ /// The consensus handle pub type Consensus = SystemContextHandle, V>; -#[derive(Clone, Copy, Debug, Parser)] -pub struct ProposalFetcherConfig { - #[clap( - long = "proposal-fetcher-num-workers", - env = "ESPRESSO_SEQUENCER_PROPOSAL_FETCHER_NUM_WORKERS", - default_value = "2" - )] - pub num_workers: usize, - - #[clap( - long = "proposal-fetcher-channel-capacity", - env = "ESPRESSO_SEQUENCER_PROPOSAL_FETCHER_CHANNEL_CAPACITY", - default_value = "100" - )] - pub channel_capacity: usize, - - #[clap( - long = "proposal-fetcher-fetch-timeout", - env = "ESPRESSO_SEQUENCER_PROPOSAL_FETCHER_FETCH_TIMEOUT", - default_value = "2s", - value_parser = parse_duration, - )] - pub fetch_timeout: Duration, -} - -impl Default for ProposalFetcherConfig { - fn default() -> Self { - Self::parse_from(std::iter::empty::()) - } -} - /// The sequencer context contains a consensus handle and other sequencer specific information. #[derive(Derivative, Clone)] #[derivative(Debug(bound = ""))] @@ -210,6 +170,7 @@ impl, P: SequencerPersistence, V: Versions> Sequence event_consumer, anchor_view, proposal_fetcher_cfg, + metrics, ) .with_task_list(tasks)) } @@ -228,6 +189,7 @@ impl, P: SequencerPersistence, V: Versions> Sequence event_consumer: impl PersistenceEventConsumer + 'static, anchor_view: Option, proposal_fetcher_cfg: ProposalFetcherConfig, + metrics: &dyn Metrics, ) -> Self { let events = handle.event_stream(); @@ -245,19 +207,12 @@ impl, P: SequencerPersistence, V: Versions> Sequence }; // Spawn proposal fetching tasks. - let (send, recv) = broadcast(proposal_fetcher_cfg.channel_capacity); - ctx.spawn("proposal scanner", scan_proposals(ctx.handle.clone(), send)); - for i in 0..proposal_fetcher_cfg.num_workers { - ctx.spawn( - format!("proposal fetcher {i}"), - fetch_proposals( - ctx.handle.clone(), - persistence.clone(), - recv.clone(), - proposal_fetcher_cfg.fetch_timeout, - ), - ); - } + proposal_fetcher_cfg.spawn( + &mut ctx.tasks, + ctx.handle.clone(), + persistence.clone(), + metrics, + ); // Spawn event handling loop. ctx.spawn( @@ -357,7 +312,7 @@ impl, P: SequencerPersistence, V: Versions> Sequence /// /// When this context is dropped or [`shut_down`](Self::shut_down), background tasks will be /// cancelled in the reverse order that they were spawned. - pub fn spawn(&mut self, name: impl Display, task: impl Future + Send + 'static) { + pub fn spawn(&mut self, name: impl Display, task: impl Future + Send + 'static) { self.tasks.spawn(name, task); } @@ -368,7 +323,11 @@ impl, P: SequencerPersistence, V: Versions> Sequence /// /// The only difference between a short-lived background task and a [long-lived](Self::spawn) /// one is how urgently logging related to the task is treated. - pub fn spawn_short_lived(&mut self, name: impl Display, task: impl Future + Send + 'static) { + pub fn spawn_short_lived( + &mut self, + name: impl Display, + task: impl Future + Send + 'static, + ) { self.tasks.spawn_short_lived(name, task); } @@ -475,127 +434,6 @@ async fn handle_events( } } -#[tracing::instrument(skip_all)] -async fn scan_proposals( - consensus: Arc>>, - fetcher: Sender<(ViewNumber, Commitment>)>, -) where - N: ConnectedNetwork, - P: SequencerPersistence, - V: Versions, -{ - let mut events = consensus.read().await.event_stream(); - while let Some(event) = events.next().await { - let EventType::QuorumProposal { proposal, .. } = event.event else { - continue; - }; - // Whenever we see a quorum proposal, ensure we have the chain of proposals stretching back - // to the anchor. This allows state replay from the decided state. - let parent_view = proposal.data.justify_qc.view_number; - let parent_leaf = proposal.data.justify_qc.data.leaf_commit; - fetcher - .broadcast_direct((parent_view, parent_leaf)) - .await - .ok(); - } -} - -#[tracing::instrument(skip_all)] -async fn fetch_proposals( - consensus: Arc>>, - persistence: Arc, - mut scanner: Receiver<(ViewNumber, Commitment>)>, - fetch_timeout: Duration, -) where - N: ConnectedNetwork, - P: SequencerPersistence, - V: Versions, -{ - let sender = scanner.new_sender(); - while let Some((view, leaf)) = scanner.next().await { - let span = tracing::warn_span!("fetch proposal", ?view, %leaf); - let res: anyhow::Result<()> = async { - let anchor_view = load_anchor_view(&*persistence).await; - if view <= anchor_view { - tracing::debug!(?anchor_view, "skipping already-decided proposal"); - return Ok(()); - } - - match persistence.load_quorum_proposal(view).await { - Ok(proposal) => { - // If we already have the proposal in storage, keep traversing the chain to its - // parent. - let view = proposal.data.justify_qc.view_number; - let leaf = proposal.data.justify_qc.data.leaf_commit; - sender.broadcast_direct((view, leaf)).await.ok(); - return Ok(()); - } - Err(err) => { - tracing::info!("proposal missing from storage; fetching from network: {err:#}"); - } - } - - let future = - consensus - .read() - .await - .request_proposal(view, EpochNumber::genesis(), leaf)?; - let proposal = timeout(fetch_timeout, future) - .await - .context("timed out fetching proposal")? - .context("error fetching proposal")?; - persistence - .append_quorum_proposal(&proposal) - .await - .context("error saving fetched proposal")?; - - // Add the fetched leaf to HotShot state, so consensus can make use of it. - let leaf = Leaf2::from_quorum_proposal(&proposal.data); - let handle = consensus.read().await; - let consensus = handle.consensus(); - let mut consensus = consensus.write().await; - if matches!( - consensus.validated_state_map().get(&view), - None | Some(View { - // Replace a Da-only view with a Leaf view, which has strictly more information. - view_inner: ViewInner::Da { .. } - }) - ) { - let state = Arc::new(ValidatedState::from_header(leaf.block_header())); - if let Err(err) = consensus.update_leaf(leaf, state, None) { - tracing::warn!("unable to update leaf: {err:#}"); - } - } - - Ok(()) - } - .instrument(span) - .await; - if let Err(err) = res { - tracing::warn!("failed to fetch proposal: {err:#}"); - - // Avoid busy loop when operations are failing. - sleep(Duration::from_secs(1)).await; - - // If we fail fetching the proposal, don't let it clog up the fetching task. Just push - // it back onto the queue and move onto the next proposal. - sender.broadcast_direct((view, leaf)).await.ok(); - } - } -} - -async fn load_anchor_view(persistence: &impl SequencerPersistence) -> ViewNumber { - loop { - match persistence.load_anchor_view().await { - Ok(view) => break view, - Err(err) => { - tracing::warn!("error loading anchor view: {err:#}"); - sleep(Duration::from_secs(1)).await; - } - } - } -} - #[derive(Debug, Default, Clone)] #[allow(clippy::type_complexity)] pub(crate) struct TaskList(Arc)>>>); @@ -609,8 +447,8 @@ macro_rules! spawn_with_log_level { spawn( async move { tracing::event!($lvl, "spawning background task"); - $task.await; - tracing::event!($lvl, "background task exited"); + let res = $task.await; + tracing::event!($lvl, ?res, "background task exited"); } .instrument(span), ) @@ -624,7 +462,7 @@ impl TaskList { /// /// When this [`TaskList`] is dropped or [`shut_down`](Self::shut_down), background tasks will /// be cancelled in the reverse order that they were spawned. - pub fn spawn(&mut self, name: impl Display, task: impl Future + Send + 'static) { + pub fn spawn(&mut self, name: impl Display, task: impl Future + Send + 'static) { spawn_with_log_level!(self, Level::INFO, name, task); } @@ -635,7 +473,11 @@ impl TaskList { /// /// The only difference between a short-lived background task and a [long-lived](Self::spawn) /// one is how urgently logging related to the task is treated. - pub fn spawn_short_lived(&mut self, name: impl Display, task: impl Future + Send + 'static) { + pub fn spawn_short_lived( + &mut self, + name: impl Display, + task: impl Future + Send + 'static, + ) { spawn_with_log_level!(self, Level::DEBUG, name, task); } diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index 9c9f6c4d60..c25a6eac21 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -594,7 +594,7 @@ mod test { let genesis: Genesis = toml::from_str(&toml).unwrap_or_else(|err| panic!("{err:#}")); - // validate the the fee_contract address + // validate the fee_contract address let result = genesis.validate_fee_contract(anvil.endpoint()).await; // check if the result from the validation is an error diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index bba6356ad3..acc973152a 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -2,6 +2,7 @@ pub mod api; pub mod catchup; pub mod context; pub mod genesis; +mod proposal_fetcher; mod external_event_handler; pub mod options; @@ -13,16 +14,17 @@ mod message_compat_tests; use anyhow::Context; use catchup::StatePeers; -use context::{ProposalFetcherConfig, SequencerContext}; +use context::SequencerContext; use espresso_types::{ traits::EventConsumer, BackoffParams, L1ClientOptions, NodeState, PubKey, SeqTypes, SolverAuctionResultsProvider, ValidatedState, }; -use futures::FutureExt; use genesis::L1Finalized; use hotshot::traits::election::static_committee::StaticCommittee; use hotshot_types::traits::election::Membership; +use proposal_fetcher::ProposalFetcherConfig; use std::sync::Arc; +use tokio::select; // Should move `STAKE_TABLE_CAPACITY` in the sequencer repo when we have variate stake table support use libp2p::Multiaddr; use network::libp2p::split_off_peer_id; @@ -53,7 +55,7 @@ use hotshot_types::{ light_client::{StateKeyPair, StateSignKey}, signature_key::{BLSPrivKey, BLSPubKey}, traits::{ - metrics::Metrics, + metrics::{Metrics, NoMetrics}, network::ConnectedNetwork, node_implementation::{NodeImplementation, NodeType, Versions}, }, @@ -312,8 +314,11 @@ pub async fn init_node( // If we were told to fetch the config from an already-started peer, do so. (None, Some(peers)) => { tracing::info!(?peers, "loading network config from peers"); - let peers = - StatePeers::::from_urls(peers, network_params.catchup_backoff); + let peers = StatePeers::::from_urls( + peers, + network_params.catchup_backoff, + &NoMetrics, + ); let config = peers.fetch_config(validator_config.clone()).await?; tracing::info!( @@ -457,11 +462,11 @@ pub async fn init_node( })?; tracing::warn!("Waiting for at least one connection to be initialized"); - futures::select! { - _ = cdn_network.wait_for_ready().fuse() => { + select! { + _ = cdn_network.wait_for_ready() => { tracing::warn!("CDN connection initialized"); }, - _ = p2p_network.wait_for_ready().fuse() => { + _ = p2p_network.wait_for_ready() => { tracing::warn!("P2P connection initialized"); }, }; @@ -509,6 +514,7 @@ pub async fn init_node( StatePeers::::from_urls( network_params.state_peers, network_params.catchup_backoff, + metrics, ), ) .await, diff --git a/sequencer/src/options.rs b/sequencer/src/options.rs index 9882283d3f..b7fabe1d78 100644 --- a/sequencer/src/options.rs +++ b/sequencer/src/options.rs @@ -21,7 +21,7 @@ use hotshot_types::{light_client::StateSignKey, signature_key::BLSPrivKey}; use libp2p::Multiaddr; use url::Url; -use crate::{api, context::ProposalFetcherConfig, persistence}; +use crate::{api, persistence, proposal_fetcher::ProposalFetcherConfig}; // This options struct is a bit unconventional. The sequencer has multiple optional modules which // can be added, in any combination, to the service. These include, for example, the API server. diff --git a/sequencer/src/persistence.rs b/sequencer/src/persistence.rs index 5c0d201a59..c705920191 100644 --- a/sequencer/src/persistence.rs +++ b/sequencer/src/persistence.rs @@ -23,16 +23,20 @@ pub trait ChainConfigPersistence: Sized + Send + Sync { #[cfg(any(test, feature = "testing"))] mod testing { - use espresso_types::v0::traits::SequencerPersistence; + use espresso_types::v0::traits::{PersistenceOptions, SequencerPersistence}; use super::*; #[allow(dead_code)] #[async_trait] pub trait TestablePersistence: SequencerPersistence { - type Storage; + type Storage: Sync; async fn tmp_storage() -> Self::Storage; - async fn connect(storage: &Self::Storage) -> Self; + fn options(storage: &Self::Storage) -> impl PersistenceOptions; + + async fn connect(storage: &Self::Storage) -> Self { + Self::options(storage).create().await.unwrap() + } } } @@ -45,7 +49,8 @@ mod persistence_tests { use async_lock::RwLock; use committable::Committable; use espresso_types::{ - traits::EventConsumer, Event, Leaf, Leaf2, NodeState, PubKey, SeqTypes, ValidatedState, + traits::{EventConsumer, NullEventConsumer, PersistenceOptions}, + Event, Leaf, Leaf2, NodeState, PubKey, SeqTypes, ValidatedState, }; use hotshot::types::{BLSPubKey, SignatureKey}; use hotshot_example_types::node_types::TestVersions; @@ -747,4 +752,132 @@ mod persistence_tests { assert!(info.leaf.block_payload().is_some()); } } + + #[tokio::test(flavor = "multi_thread")] + pub async fn test_pruning() { + setup_test(); + + let tmp = P::tmp_storage().await; + + let mut options = P::options(&tmp); + options.set_view_retention(1); + let storage = options.create().await.unwrap(); + + // Add some "old" data, from view 0. + let leaf = Leaf::genesis(&ValidatedState::default(), &NodeState::mock()).await; + let leaf_payload = leaf.block_payload().unwrap(); + let leaf_payload_bytes_arc = leaf_payload.encode(); + let disperse = vid_scheme(2) + .disperse(leaf_payload_bytes_arc.clone()) + .unwrap(); + let payload_commitment = disperse.commit; + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); + let vid_share = VidDisperseShare:: { + view_number: ViewNumber::new(0), + payload_commitment, + share: disperse.shares[0].clone(), + common: disperse.common, + recipient_key: pubkey, + } + .to_proposal(&privkey) + .unwrap() + .clone(); + + let quorum_proposal = QuorumProposal2:: { + block_header: leaf.block_header().clone(), + view_number: ViewNumber::genesis(), + justify_qc: QuorumCertificate::genesis::( + &ValidatedState::default(), + &NodeState::mock(), + ) + .await + .to_qc2(), + upgrade_certificate: None, + view_change_evidence: None, + drb_seed: INITIAL_DRB_SEED_INPUT, + drb_result: INITIAL_DRB_RESULT, + }; + let quorum_proposal_signature = + BLSPubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) + .expect("Failed to sign quorum proposal"); + let quorum_proposal = Proposal { + data: quorum_proposal, + signature: quorum_proposal_signature, + _pd: Default::default(), + }; + + let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) + .expect("Failed to sign block payload"); + let da_proposal = Proposal { + data: DaProposal:: { + encoded_transactions: leaf_payload_bytes_arc, + metadata: leaf_payload.ns_table().clone(), + view_number: ViewNumber::new(0), + }, + signature: block_payload_signature, + _pd: Default::default(), + }; + + storage + .append_da(&da_proposal, payload_commitment) + .await + .unwrap(); + storage.append_vid(&vid_share).await.unwrap(); + storage + .append_quorum_proposal(&quorum_proposal) + .await + .unwrap(); + + // Decide a newer view, view 1. + storage + .append_decided_leaves(ViewNumber::new(1), [], &NullEventConsumer) + .await + .unwrap(); + + // The old data is not more than the retention period (1 view) old, so it should not be + // GCed. + assert_eq!( + storage + .load_da_proposal(ViewNumber::new(0)) + .await + .unwrap() + .unwrap(), + da_proposal + ); + assert_eq!( + storage + .load_vid_share(ViewNumber::new(0)) + .await + .unwrap() + .unwrap(), + vid_share + ); + assert_eq!( + storage + .load_quorum_proposal(ViewNumber::new(0)) + .await + .unwrap(), + quorum_proposal + ); + + // Decide an even newer view, triggering GC of the old data. + storage + .append_decided_leaves(ViewNumber::new(2), [], &NullEventConsumer) + .await + .unwrap(); + assert!(storage + .load_da_proposal(ViewNumber::new(0)) + .await + .unwrap() + .is_none()); + assert!(storage + .load_vid_share(ViewNumber::new(0)) + .await + .unwrap() + .is_none()); + assert!(storage + .load_quorum_proposal(ViewNumber::new(0)) + .await + .is_err()); + } } diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index e482090e73..43e5541aa6 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -12,7 +12,10 @@ use hotshot_types::{ event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, simple_certificate::{QuorumCertificate, QuorumCertificate2, UpgradeCertificate}, - traits::{block_contents::BlockPayload, node_implementation::ConsensusTime}, + traits::{ + block_contents::{BlockHeader, BlockPayload}, + node_implementation::ConsensusTime, + }, utils::View, vid::VidSchemeType, vote::HasViewNumber, @@ -23,6 +26,7 @@ use std::{ collections::BTreeMap, fs::{self, File, OpenOptions}, io::{Read, Seek, SeekFrom, Write}, + ops::RangeInclusive, path::{Path, PathBuf}, }; @@ -39,6 +43,24 @@ pub struct Options { #[clap(long, env = "ESPRESSO_SEQUENCER_STORE_UNDECIDED_STATE", hide = true)] store_undecided_state: bool, + + /// Number of views to retain in consensus storage before data that hasn't been archived is + /// garbage collected. + /// + /// The longer this is, the more certain that all data will eventually be archived, even if + /// there are temporary problems with archive storage or partially missing data. This can be set + /// very large, as most data is garbage collected as soon as it is finalized by consensus. This + /// setting only applies to views which never get decided (ie forks in consensus) and views for + /// which this node is partially offline. These should be exceptionally rare. + /// + /// The default of 130000 views equates to approximately 3 days (259200 seconds) at an average + /// view time of 2s. + #[clap( + long, + env = "ESPRESSO_SEQUENCER_CONSENSUS_VIEW_RETENTION", + default_value = "130000" + )] + pub(crate) consensus_view_retention: u64, } impl Default for Options { @@ -52,6 +74,7 @@ impl Options { Self { path, store_undecided_state: false, + consensus_view_retention: 130000, } } @@ -64,13 +87,21 @@ impl Options { impl PersistenceOptions for Options { type Persistence = Persistence; + fn set_view_retention(&mut self, view_retention: u64) { + self.consensus_view_retention = view_retention; + } + async fn create(&mut self) -> anyhow::Result { let path = self.path.clone(); let store_undecided_state = self.store_undecided_state; + let view_retention = self.consensus_view_retention; Ok(Persistence { store_undecided_state, - inner: Arc::new(RwLock::new(Inner { path })), + inner: Arc::new(RwLock::new(Inner { + path, + view_retention, + })), }) } @@ -93,6 +124,7 @@ pub struct Persistence { #[derive(Debug)] struct Inner { path: PathBuf, + view_retention: u64, } impl Inner { @@ -178,47 +210,64 @@ impl Inner { Ok(()) } - fn collect_garbage(&mut self, view: ViewNumber) -> anyhow::Result<()> { + fn collect_garbage( + &mut self, + view: ViewNumber, + intervals: &[RangeInclusive], + ) -> anyhow::Result<()> { let view_number = view.u64(); + let prune_view = view.saturating_sub(self.view_retention); - let delete_files = |view_number: u64, dir_path: PathBuf| -> anyhow::Result<()> { - if !dir_path.is_dir() { - return Ok(()); - } - - for entry in fs::read_dir(dir_path)? { - let entry = entry?; - let path = entry.path(); + let delete_files = + |intervals: &[RangeInclusive], keep, dir_path: PathBuf| -> anyhow::Result<()> { + if !dir_path.is_dir() { + return Ok(()); + } - if let Some(file) = path.file_stem().and_then(|n| n.to_str()) { - if let Ok(v) = file.parse::() { - if v <= view_number { - fs::remove_file(&path)?; + for entry in fs::read_dir(dir_path)? { + let entry = entry?; + let path = entry.path(); + + if let Some(file) = path.file_stem().and_then(|n| n.to_str()) { + if let Ok(v) = file.parse::() { + // If the view is the anchor view, keep it no matter what. + if let Some(keep) = keep { + if keep == v { + continue; + } + } + // Otherwise, delete it if it is time to prune this view _or_ if the + // given intervals, which we've already successfully processed, contain + // the view; in this case we simply don't need it anymore. + if v < prune_view || intervals.iter().any(|i| i.contains(&v)) { + fs::remove_file(&path)?; + } } } } - } - Ok(()) - }; + Ok(()) + }; - delete_files(view_number, self.da_dir_path())?; - delete_files(view_number, self.vid_dir_path())?; - delete_files(view_number, self.quorum_proposals_dir_path())?; + delete_files(intervals, None, self.da_dir_path())?; + delete_files(intervals, None, self.vid_dir_path())?; + delete_files(intervals, None, self.quorum_proposals_dir_path())?; // Save the most recent leaf as it will be our anchor point if the node restarts. - if view_number > 0 { - delete_files(view_number - 1, self.decided_leaf_path())?; - } + delete_files(intervals, Some(view_number), self.decided_leaf_path())?; Ok(()) } + /// Generate events based on persisted decided leaves. + /// + /// Returns a list of closed intervals of views which can be safely deleted, as all leaves + /// within these view ranges have been processed by the event consumer. async fn generate_decide_events( &self, view: ViewNumber, consumer: &impl EventConsumer, - ) -> anyhow::Result<()> { + ) -> anyhow::Result>> { // Generate a decide event for each leaf, to be processed by the event consumer. We make a // separate event for each leaf because it is possible we have non-consecutive leaves in our // storage, which would not be valid as a single decide with a single leaf chain. @@ -286,7 +335,10 @@ impl Inner { } } + let mut intervals = vec![]; + let mut current_interval = None; for (view, (leaf, qc)) in leaves { + let height = leaf.leaf.block_header().block_number(); consumer .handle_event(&Event { view_number: ViewNumber::new(view), @@ -297,9 +349,27 @@ impl Inner { }, }) .await?; + if let Some((start, end, current_height)) = current_interval.as_mut() { + if height == *current_height + 1 { + // If we have a chain of consecutive leaves, extend the current interval of + // views which are safe to delete. + *current_height += 1; + *end = view; + } else { + // Otherwise, end the current interval and start a new one. + intervals.push(*start..=*end); + current_interval = Some((view, view, height)); + } + } else { + // Start a new interval. + current_interval = Some((view, view, height)); + } + } + if let Some((start, end, _)) = current_interval { + intervals.push(start..=end); } - Ok(()) + Ok(intervals) } fn load_da_proposal( @@ -477,20 +547,21 @@ impl SequencerPersistence for Persistence { )?; } - // Event processing failure is not an error, since by this point we have at least managed to - // persist the decided leaves successfully, and the event processing will just run again at - // the next decide. If there is an error here, we just log it and return early with success - // to prevent GC from running before the decided leaves are processed. - if let Err(err) = inner.generate_decide_events(view, consumer).await { - tracing::warn!(?view, "event processing failed: {err:#}"); - return Ok(()); - } - - if let Err(err) = inner.collect_garbage(view) { - // Similarly, garbage collection is not an error. We have done everything we strictly - // needed to do, and GC will run again at the next decide. Log the error but do not - // return it. - tracing::warn!(?view, "GC failed: {err:#}"); + match inner.generate_decide_events(view, consumer).await { + Err(err) => { + // Event processing failure is not an error, since by this point we have at least + // managed to persist the decided leaves successfully, and the event processing will + // just run again at the next decide. + tracing::warn!(?view, "event processing failed: {err:#}"); + } + Ok(intervals) => { + if let Err(err) = inner.collect_garbage(view, &intervals) { + // Similarly, garbage collection is not an error. We have done everything we + // strictly needed to do, and GC will run again at the next decide. Log the + // error but do not return it. + tracing::warn!(?view, "GC failed: {err:#}"); + } + } } Ok(()) @@ -856,8 +927,8 @@ mod testing { TempDir::new().unwrap() } - async fn connect(storage: &Self::Storage) -> Self { - Options::new(storage.path().into()).create().await.unwrap() + fn options(storage: &Self::Storage) -> impl PersistenceOptions { + Options::new(storage.path().into()) } } } diff --git a/sequencer/src/persistence/no_storage.rs b/sequencer/src/persistence/no_storage.rs index f7b34aeb1a..e80e89205d 100644 --- a/sequencer/src/persistence/no_storage.rs +++ b/sequencer/src/persistence/no_storage.rs @@ -29,6 +29,8 @@ pub struct Options; impl PersistenceOptions for Options { type Persistence = NoStorage; + fn set_view_retention(&mut self, _: u64) {} + async fn create(&mut self) -> anyhow::Result { Ok(NoStorage) } diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 4cb250d9b4..61c6243556 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -10,13 +10,22 @@ use espresso_types::{ BackoffParams, Leaf, Leaf2, NetworkConfig, Payload, }; use futures::stream::StreamExt; -use hotshot_query_service::data_source::storage::sql::{syntax_helpers::MAX_FN, Db}; -use hotshot_query_service::data_source::{ - storage::{ - pruning::PrunerCfg, - sql::{include_migrations, query_as, Config, SqlStorage}, +use hotshot_query_service::{ + availability::LeafQueryData, + data_source::{ + storage::{ + pruning::PrunerCfg, + sql::{ + include_migrations, query_as, syntax_helpers::MAX_FN, Config, Db, SqlStorage, + Transaction, TransactionMode, Write, + }, + }, + Transaction as _, VersionedDataSource, + }, + fetching::{ + request::{LeafRequest, PayloadRequest, VidCommonRequest}, + Provider, }, - Transaction as _, VersionedDataSource, }; use hotshot_types::{ consensus::CommitmentMap, @@ -24,12 +33,15 @@ use hotshot_types::{ event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, simple_certificate::{QuorumCertificate, QuorumCertificate2, UpgradeCertificate}, - traits::{node_implementation::ConsensusTime, BlockPayload}, + traits::{ + block_contents::{BlockHeader, BlockPayload}, + node_implementation::ConsensusTime, + }, utils::View, - vid::VidSchemeType, + vid::{VidCommitment, VidCommon}, vote::HasViewNumber, }; -use jf_vid::VidScheme; +use itertools::Itertools; use sqlx::Row; use sqlx::{query, Executor}; use std::{collections::BTreeMap, path::PathBuf, str::FromStr, sync::Arc, time::Duration}; @@ -37,7 +49,7 @@ use std::{collections::BTreeMap, path::PathBuf, str::FromStr, sync::Arc, time::D use crate::{catchup::SqlStateCatchup, SeqTypes, ViewNumber}; /// Options for Postgres-backed persistence. -#[derive(Parser, Clone, Derivative, Default)] +#[derive(Parser, Clone, Derivative)] #[derivative(Debug)] pub struct PostgresOptions { /// Hostname for the remote Postgres database server. @@ -67,6 +79,12 @@ pub struct PostgresOptions { pub(crate) use_tls: bool, } +impl Default for PostgresOptions { + fn default() -> Self { + Self::parse_from(std::iter::empty::()) + } +} + #[derive(Parser, Clone, Derivative, Default, From, Into)] #[derivative(Debug)] pub struct SqliteOptions { @@ -77,7 +95,7 @@ pub struct SqliteOptions { env = "ESPRESSO_SEQUENCER_STORAGE_PATH", value_parser = build_sqlite_path )] - pub(crate) path: PathBuf, + pub(crate) path: Option, } pub fn build_sqlite_path(path: &str) -> anyhow::Result { @@ -93,7 +111,7 @@ pub fn build_sqlite_path(path: &str) -> anyhow::Result { } /// Options for database-backed persistence, supporting both Postgres and SQLite. -#[derive(Parser, Clone, Derivative, Default, From, Into)] +#[derive(Parser, Clone, Derivative, From, Into)] #[derivative(Debug)] pub struct Options { #[cfg(not(feature = "embedded-db"))] @@ -134,6 +152,10 @@ pub struct Options { #[clap(flatten)] pub(crate) pruning: PruningOptions, + /// Pruning parameters for ephemeral consensus storage. + #[clap(flatten)] + pub(crate) consensus_pruning: ConsensusPruningOptions, + #[clap(long, env = "ESPRESSO_SEQUENCER_STORE_UNDECIDED_STATE", hide = true)] pub(crate) store_undecided_state: bool, @@ -211,6 +233,12 @@ pub struct Options { pub(crate) pool: Option>, } +impl Default for Options { + fn default() -> Self { + Self::parse_from(std::iter::empty::()) + } +} + #[cfg(not(feature = "embedded-db"))] impl From for Config { fn from(opt: PostgresOptions) -> Self { @@ -254,7 +282,10 @@ impl From for Config { fn from(opt: SqliteOptions) -> Self { let mut cfg = Config::default(); - cfg = cfg.db_path(opt.path); + if let Some(path) = opt.path { + cfg = cfg.db_path(path); + } + cfg = cfg.max_connections(20); cfg = cfg.idle_connection_timeout(Duration::from_secs(120)); cfg = cfg.connection_timeout(Duration::from_secs(10240)); @@ -290,17 +321,17 @@ impl From for Options { } } } -impl TryFrom for Config { +impl TryFrom<&Options> for Config { type Error = anyhow::Error; - fn try_from(opt: Options) -> Result { - let mut cfg = match opt.uri { + fn try_from(opt: &Options) -> Result { + let mut cfg = match &opt.uri { Some(uri) => uri.parse()?, None => Self::default(), }; - if let Some(pool) = opt.pool { - cfg = cfg.pool(pool); + if let Some(pool) = &opt.pool { + cfg = cfg.pool(pool.clone()); } cfg = cfg.max_connections(opt.max_connections); @@ -315,10 +346,10 @@ impl TryFrom for Config { "$CARGO_MANIFEST_DIR/api/migrations/postgres" )); - let pg_options = opt.postgres_options; + let pg_options = &opt.postgres_options; - if let Some(host) = pg_options.host { - cfg = cfg.host(host); + if let Some(host) = &pg_options.host { + cfg = cfg.host(host.clone()); } if let Some(port) = pg_options.port { @@ -348,7 +379,9 @@ impl TryFrom for Config { "$CARGO_MANIFEST_DIR/api/migrations/sqlite" )); - cfg = cfg.db_path(opt.sqlite_options.path); + if let Some(path) = &opt.sqlite_options.path { + cfg = cfg.db_path(path.clone()); + } } if opt.prune { @@ -363,7 +396,7 @@ impl TryFrom for Config { } /// Pruning parameters. -#[derive(Parser, Clone, Debug, Default)] +#[derive(Parser, Clone, Copy, Debug)] pub struct PruningOptions { /// Threshold for pruning, specified in bytes. /// If the disk usage surpasses this threshold, pruning is initiated for data older than the specified minimum retention period. @@ -437,16 +470,79 @@ impl From for PrunerCfg { } } +/// Pruning parameters for ephemeral consensus storage. +#[derive(Parser, Clone, Copy, Debug)] +pub struct ConsensusPruningOptions { + /// Number of views to try to retain in consensus storage before data that hasn't been archived + /// is garbage collected. + /// + /// The longer this is, the more certain that all data will eventually be archived, even if + /// there are temporary problems with archive storage or partially missing data. This can be set + /// very large, as most data is garbage collected as soon as it is finalized by consensus. This + /// setting only applies to views which never get decided (ie forks in consensus) and views for + /// which this node is partially offline. These should be exceptionally rare. + /// + /// Note that in extreme scenarios, data may be garbage collected even before TARGET_RETENTION + /// views, if consensus storage exceeds TARGET_USAGE. For a hard lower bound on how long + /// consensus data will be retained, see MINIMUM_RETENTION. + /// + /// The default of 302000 views equates to approximately 1 week (604800 seconds) at an average + /// view time of 2s. + #[clap( + name = "TARGET_RETENTION", + long = "consensus-storage-target-retention", + env = "ESPRESSO_SEQUENCER_CONSENSUS_STORAGE_TARGET_RETENTION", + default_value = "302000" + )] + target_retention: u64, + + /// Minimum number of views to try to retain in consensus storage before data that hasn't been + /// archived is garbage collected. + /// + /// This bound allows data to be retained even if consensus storage occupies more than + /// TARGET_USAGE. This can be used to ensure sufficient time to move consensus data to archival + /// storage as necessary, even under extreme circumstances where otherwise garbage collection + /// would kick in based on TARGET_RETENTION. + /// + /// The default of 130000 views equates to approximately 3 days (259200 seconds) at an average + /// view time of 2s. + #[clap( + name = "MINIMUM_RETENTION", + long = "consensus-storage-minimum-retention", + env = "ESPRESSO_SEQUENCER_CONSENSUS_STORAGE_MINIMUM_RETENTION", + default_value = "130000" + )] + minimum_retention: u64, + + /// Amount (in bytes) of data to retain in consensus storage before garbage collecting more + /// aggressively. + /// + /// See also TARGET_RETENTION and MINIMUM_RETENTION. + #[clap( + name = "TARGET_USAGE", + long = "consensus-storage-target-usage", + env = "ESPRESSO_SEQUENCER_CONSENSUS_STORAGE_TARGET_USAGE", + default_value = "1000000000" + )] + target_usage: u64, +} + #[async_trait] impl PersistenceOptions for Options { type Persistence = Persistence; + fn set_view_retention(&mut self, view_retention: u64) { + self.consensus_pruning.target_retention = view_retention; + self.consensus_pruning.minimum_retention = view_retention; + } + async fn create(&mut self) -> anyhow::Result { let store_undecided_state = self.store_undecided_state; - let config = self.clone().try_into()?; + let config = (&*self).try_into()?; let persistence = Persistence { store_undecided_state, db: SqlStorage::connect(config).await?, + gc_opt: self.consensus_pruning, }; persistence.migrate_quorum_proposal_leaf_hashes().await?; self.pool = Some(persistence.db.pool()); @@ -454,16 +550,17 @@ impl PersistenceOptions for Options { } async fn reset(self) -> anyhow::Result<()> { - SqlStorage::connect(Config::try_from(self)?.reset_schema()).await?; + SqlStorage::connect(Config::try_from(&self)?.reset_schema()).await?; Ok(()) } } /// Postgres-backed persistence. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Persistence { db: SqlStorage, store_undecided_state: bool, + gc_opt: ConsensusPruningOptions, } impl Persistence { @@ -501,6 +598,313 @@ impl Persistence { tx.commit().await } + + async fn generate_decide_events(&self, consumer: &impl EventConsumer) -> anyhow::Result<()> { + let mut last_processed_view: Option = self + .db + .read() + .await? + .fetch_optional("SELECT last_processed_view FROM event_stream WHERE id = 1 LIMIT 1") + .await? + .map(|row| row.get("last_processed_view")); + loop { + // In SQLite, overlapping read and write transactions can lead to database errors. To + // avoid this: + // - start a read transaction to query and collect all the necessary data. + // - Commit (or implicitly drop) the read transaction once the data is fetched. + // - use the collected data to generate a "decide" event for the consumer. + // - begin a write transaction to delete the data and update the event stream. + let mut tx = self.db.read().await?; + + // Collect a chain of consecutive leaves, starting from the first view after the last + // decide. This will correspond to a decide event, and defines a range of views which + // can be garbage collected. This may even include views for which there was no leaf, + // for which we might still have artifacts like proposals that never finalized. + let from_view = match last_processed_view { + Some(v) => v + 1, + None => 0, + }; + + let mut parent = None; + let mut rows = query("SELECT leaf, qc FROM anchor_leaf WHERE view >= $1 ORDER BY view") + .bind(from_view) + .fetch(tx.as_mut()); + let mut leaves = vec![]; + let mut final_qc = None; + while let Some(row) = rows.next().await { + let row = match row { + Ok(row) => row, + Err(err) => { + // If there's an error getting a row, try generating an event with the rows + // we do have. + tracing::warn!("error loading row: {err:#}"); + break; + } + }; + + let leaf_data: Vec = row.get("leaf"); + let leaf = bincode::deserialize::(&leaf_data)?; + let qc_data: Vec = row.get("qc"); + let qc = bincode::deserialize::>(&qc_data)?; + let height = leaf.block_header().block_number(); + + // Ensure we are only dealing with a consecutive chain of leaves. We don't want to + // garbage collect any views for which we missed a leaf or decide event; at least + // not right away, in case we need to recover that data later. + if let Some(parent) = parent { + if height != parent + 1 { + tracing::debug!( + height, + parent, + "ending decide event at non-consecutive leaf" + ); + break; + } + } + parent = Some(height); + leaves.push(leaf); + final_qc = Some(qc); + } + drop(rows); + + let Some(final_qc) = final_qc else { + // End event processing when there are no more decided views. + tracing::debug!(from_view, "no new leaves at decide"); + return Ok(()); + }; + + // Find the range of views encompassed by this leaf chain. All data in this range can be + // processed by the consumer and then deleted. + let from_view = leaves[0].view_number(); + let to_view = leaves[leaves.len() - 1].view_number(); + + // Collect VID shares for the decide event. + let mut vid_shares = tx + .fetch_all( + query("SELECT view, data FROM vid_share where view >= $1 AND view <= $2") + .bind(from_view.u64() as i64) + .bind(to_view.u64() as i64), + ) + .await? + .into_iter() + .map(|row| { + let view: i64 = row.get("view"); + let data: Vec = row.get("data"); + let vid_proposal = bincode::deserialize::< + Proposal>, + >(&data)?; + Ok((view as u64, vid_proposal.data)) + }) + .collect::>>()?; + + // Collect DA proposals for the decide event. + let mut da_proposals = tx + .fetch_all( + query("SELECT view, data FROM da_proposal where view >= $1 AND view <= $2") + .bind(from_view.u64() as i64) + .bind(to_view.u64() as i64), + ) + .await? + .into_iter() + .map(|row| { + let view: i64 = row.get("view"); + let data: Vec = row.get("data"); + let da_proposal = + bincode::deserialize::>>(&data)?; + Ok((view as u64, da_proposal.data)) + }) + .collect::>>()?; + + drop(tx); + + // Collate all the information by view number and construct a chain of leaves. + let leaf_chain = leaves + .into_iter() + // Go in reverse chronological order, as expected by Decide events. + .rev() + .map(|mut leaf| { + let view = leaf.view_number(); + + // Include the VID share if available. + let vid_share = vid_shares.remove(&view); + if vid_share.is_none() { + tracing::debug!(?view, "VID share not available at decide"); + } + + // Fill in the full block payload using the DA proposals we had persisted. + if let Some(proposal) = da_proposals.remove(&view) { + let payload = + Payload::from_bytes(&proposal.encoded_transactions, &proposal.metadata); + leaf.fill_block_payload_unchecked(payload); + } else if view == ViewNumber::genesis() { + // We don't get a DA proposal for the genesis view, but we know what the + // payload always is. + leaf.fill_block_payload_unchecked(Payload::empty().0); + } else { + tracing::debug!(?view, "DA proposal not available at decide"); + } + + LeafInfo { + leaf: leaf.into(), + vid_share, + // Note: the following fields are not used in Decide event processing, and + // should be removed. For now, we just default them. + state: Default::default(), + delta: Default::default(), + } + }) + .collect(); + + // Generate decide event for the consumer. + tracing::debug!(?to_view, ?final_qc, ?leaf_chain, "generating decide event"); + consumer + .handle_event(&Event { + view_number: to_view, + event: EventType::Decide { + leaf_chain: Arc::new(leaf_chain), + qc: Arc::new(final_qc.to_qc2()), + block_size: None, + }, + }) + .await?; + + let mut tx = self.db.write().await?; + + // Now that we have definitely processed leaves up to `to_view`, we can update + // `last_processed_view` so we don't process these leaves again. We may still fail at + // this point, or shut down, and fail to complete this update. At worst this will lead + // to us sending a duplicate decide event the next time we are called; this is fine as + // the event consumer is required to be idempotent. + tx.upsert( + "event_stream", + ["id", "last_processed_view"], + ["id"], + [(1i32, to_view.u64() as i64)], + ) + .await?; + + // Delete the data that has been fully processed. + tx.execute( + query("DELETE FROM vid_share where view >= $1 AND view <= $2") + .bind(from_view.u64() as i64) + .bind(to_view.u64() as i64), + ) + .await?; + tx.execute( + query("DELETE FROM da_proposal where view >= $1 AND view <= $2") + .bind(from_view.u64() as i64) + .bind(to_view.u64() as i64), + ) + .await?; + tx.execute( + query("DELETE FROM quorum_proposals where view >= $1 AND view <= $2") + .bind(from_view.u64() as i64) + .bind(to_view.u64() as i64), + ) + .await?; + tx.execute( + query("DELETE FROM quorum_certificate where view >= $1 AND view <= $2") + .bind(from_view.u64() as i64) + .bind(to_view.u64() as i64), + ) + .await?; + + // Clean up leaves, but do not delete the most recent one (all leaves with a view number + // less than the given value). This is necessary to ensure that, in case of a restart, + // we can resume from the last decided leaf. + tx.execute( + query("DELETE FROM anchor_leaf WHERE view >= $1 AND view < $2") + .bind(from_view.u64() as i64) + .bind(to_view.u64() as i64), + ) + .await?; + + tx.commit().await?; + last_processed_view = Some(to_view.u64() as i64); + } + } + + #[tracing::instrument(skip(self))] + async fn prune(&self, cur_view: ViewNumber) -> anyhow::Result<()> { + let mut tx = self.db.write().await?; + + // Prune everything older than the target retention period. + prune_to_view( + &mut tx, + cur_view.u64().saturating_sub(self.gc_opt.target_retention), + ) + .await?; + + // Check our storage usage; if necessary we will prune more aggressively (up to the minimum + // retention) to get below the target usage. + #[cfg(feature = "embedded-db")] + let usage_query = format!( + "SELECT sum(pgsize) FROM dbstat WHERE name IN ({})", + PRUNE_TABLES + .iter() + .map(|table| format!("'{table}'")) + .join(",") + ); + + #[cfg(not(feature = "embedded-db"))] + let usage_query = { + let table_sizes = PRUNE_TABLES + .iter() + .map(|table| format!("pg_table_size('{table}')")) + .join(" + "); + format!("SELECT {table_sizes}") + }; + + let (usage,): (i64,) = query_as(&usage_query).fetch_one(tx.as_mut()).await?; + tracing::debug!(usage, "consensus storage usage after pruning"); + + if (usage as u64) > self.gc_opt.target_usage { + tracing::warn!( + usage, + gc_opt = ?self.gc_opt, + "consensus storage is running out of space, pruning to minimum retention" + ); + prune_to_view( + &mut tx, + cur_view.u64().saturating_sub(self.gc_opt.minimum_retention), + ) + .await?; + } + + tx.commit().await + } +} + +const PRUNE_TABLES: &[&str] = &[ + "anchor_leaf", + "vid_share", + "da_proposal", + "quorum_proposals", + "quorum_certificate", +]; + +async fn prune_to_view(tx: &mut Transaction, view: u64) -> anyhow::Result<()> { + if view == 0 { + // Nothing to prune, the entire chain is younger than the retention period. + return Ok(()); + } + tracing::debug!(view, "pruning consensus storage"); + + for table in PRUNE_TABLES { + let res = query(&format!("DELETE FROM {table} WHERE view < $1")) + .bind(view as i64) + .execute(tx.as_mut()) + .await + .context(format!("pruning {table}"))?; + if res.rows_affected() > 0 { + tracing::info!( + "garbage collected {} rows from {table}", + res.rows_affected() + ); + } + } + + Ok(()) } #[async_trait] @@ -575,13 +979,18 @@ impl SequencerPersistence for Persistence { // Generate an event for the new leaves and, only if it succeeds, clean up data we no longer // need. - let consumer = dyn_clone::clone(consumer); - - if let Err(err) = collect_garbage(self, view, consumer).await { + if let Err(err) = self.generate_decide_events(consumer).await { // GC/event processing failure is not an error, since by this point we have at least // managed to persist the decided leaves successfully, and GC will just run again at the // next decide. Log an error but do not return it. - tracing::warn!(?view, "GC/event processing failed: {err:#}"); + tracing::warn!(?view, "event processing failed: {err:#}"); + return Ok(()); + } + + // Garbage collect data which was not included in any decide event, but which at this point + // is old enough to just forget about. + if let Err(err) = self.prune(view).await { + tracing::warn!(?view, "pruning failed: {err:#}"); } Ok(()) @@ -740,24 +1149,25 @@ impl SequencerPersistence for Persistence { &self, proposal: &Proposal>, ) -> anyhow::Result<()> { - let data = &proposal.data; - let view = data.view_number().u64(); + let view = proposal.data.view_number.u64(); + let payload_hash = proposal.data.payload_commitment; let data_bytes = bincode::serialize(proposal).unwrap(); let mut tx = self.db.write().await?; tx.upsert( "vid_share", - ["view", "data"], + ["view", "data", "payload_hash"], ["view"], - [(view as i64, data_bytes)], + [(view as i64, data_bytes, payload_hash.to_string())], ) .await?; tx.commit().await } + async fn append_da( &self, proposal: &Proposal>, - _vid_commit: ::Commit, + vid_commit: VidCommitment, ) -> anyhow::Result<()> { let data = &proposal.data; let view = data.view_number().u64(); @@ -766,13 +1176,14 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.write().await?; tx.upsert( "da_proposal", - ["view", "data"], + ["view", "data", "payload_hash"], ["view"], - [(view as i64, data_bytes)], + [(view as i64, data_bytes, vid_commit.to_string())], ) .await?; tx.commit().await } + async fn record_action(&self, view: ViewNumber, action: HotShotAction) -> anyhow::Result<()> { // Todo Remove this after https://github.com/EspressoSystems/espresso-sequencer/issues/1931 if !matches!(action, HotShotAction::Propose | HotShotAction::Vote) { @@ -829,6 +1240,22 @@ impl SequencerPersistence for Persistence { [(view_number as i64, leaf_hash.to_string(), proposal_bytes)], ) .await?; + + // We also keep track of any QC we see in case we need it to recover our archival storage. + let justify_qc = &proposal.data.justify_qc; + let justify_qc_bytes = bincode::serialize(&justify_qc).context("serializing QC")?; + tx.upsert( + "quorum_certificate", + ["view", "leaf_hash", "data"], + ["view"], + [( + justify_qc.view_number.u64() as i64, + justify_qc.data.leaf_commit.to_string(), + &justify_qc_bytes, + )], + ) + .await?; + tx.commit().await } @@ -883,177 +1310,144 @@ impl SequencerPersistence for Persistence { } } -async fn collect_garbage( - storage: &Persistence, - view: ViewNumber, - consumer: impl EventConsumer, -) -> anyhow::Result<()> { - // In SQLite, overlapping read and write transactions can lead to database errors. - // To avoid this: - // - start a read transaction to query and collect all the necessary data. - // - Commit (or implicitly drop) the read transaction once the data is fetched. - // - use the collected data to generate a "decide" event for the consumer. - // - begin a write transaction to delete the data and update the event stream. - let mut tx = storage.db.read().await?; - - // collect VID shares. - let mut vid_shares = tx - .fetch_all(query("SELECT * FROM vid_share where view <= $1").bind(view.u64() as i64)) - .await? - .into_iter() - .map(|row| { - let view: i64 = row.get("view"); - let data: Vec = row.get("data"); - let vid_proposal = - bincode::deserialize::>>(&data)?; - Ok((view as u64, vid_proposal.data)) - }) - .collect::>>()?; - - // collect DA proposals. - let mut da_proposals = tx - .fetch_all(query("SELECT * FROM da_proposal where view <= $1").bind(view.u64() as i64)) - .await? - .into_iter() - .map(|row| { - let view: i64 = row.get("view"); - let data: Vec = row.get("data"); - let da_proposal = - bincode::deserialize::>>(&data)?; - Ok((view as u64, da_proposal.data)) - }) - .collect::>>()?; +#[async_trait] +impl Provider for Persistence { + #[tracing::instrument(skip(self))] + async fn fetch(&self, req: VidCommonRequest) -> Option { + let mut tx = match self.db.read().await { + Ok(tx) => tx, + Err(err) => { + tracing::warn!("could not open transaction: {err:#}"); + return None; + } + }; - // collect leaves - let mut leaves = tx - .fetch_all( - query("SELECT view, leaf, qc FROM anchor_leaf WHERE view <= $1") - .bind(view.u64() as i64), + let bytes = match query_as::<(Vec,)>( + "SELECT data FROM vid_share WHERE payload_hash = $1 LIMIT 1", ) - .await? - .into_iter() - .map(|row| { - let view: i64 = row.get("view"); - let leaf_data: Vec = row.get("leaf"); - let leaf = bincode::deserialize::(&leaf_data)?; - let qc_data: Vec = row.get("qc"); - let qc = bincode::deserialize::>(&qc_data)?; - Ok((view as u64, (leaf, qc))) - }) - .collect::>>()?; - - // Exclude from the decide event any leaves which have definitely already been processed. We may - // have selected an already-processed leaf because the oldest leaf -- the last leaf processed in - // the previous decide event -- remained in the database to serve as the anchor leaf, so our - // query would have returned it. In fact, this will almost always be the case, but there are two - // cases where it might not be, and we must process this leaf after all: - // - // 1. The oldest leaf is the genesis leaf, and there _is_ no previous decide event - // 2. We previously stored some leaves in the database and then failed while processing the - // decide event, or shut down before generating the decide event, and so we are only now - // generating the decide event for those previous leaves. - // - // Since these cases (particularly case 2) are hard to account for explicitly, we just use a - // persistent value in the database to remember how far we have successfully processed the event - // stream. - let last_processed_view: Option = tx - .fetch_optional(query( - "SELECT last_processed_view FROM event_stream WHERE id = 1 LIMIT 1", - )) - .await? - .map(|row| row.get("last_processed_view")); - let leaves = if let Some(v) = last_processed_view { - let new_leaves = leaves.split_off(&((v as u64) + 1)); - if !leaves.is_empty() { - tracing::debug!( - v, - remaining_leaves = new_leaves.len(), - ?leaves, - "excluding already-processed leaves from decide event" - ); - } - new_leaves - } else { - leaves - }; + .bind(req.0.to_string()) + .fetch_one(tx.as_mut()) + .await + { + Ok((bytes,)) => bytes, + Err(err) => { + tracing::warn!("error loading VID share: {err:#}"); + return None; + } + }; - drop(tx); + let share: Proposal> = + match bincode::deserialize(&bytes) { + Ok(share) => share, + Err(err) => { + tracing::warn!("error decoding VID share: {err:#}"); + return None; + } + }; - // Generate a decide event for each leaf, to be processed by the event consumer. We make a - // separate event for each leaf because it is possible we have non-consecutive leaves in our - // storage, which would not be valid as a single decide with a single leaf chain. - for (view, (mut leaf, qc)) in leaves { - // Include the VID share if available. - let vid_share = vid_shares.remove(&view); - if vid_share.is_none() { - tracing::debug!(view, "VID share not available at decide"); - } + Some(share.data.common) + } +} - // Fill in the full block payload using the DA proposals we had persisted. - if let Some(proposal) = da_proposals.remove(&view) { - let payload = Payload::from_bytes(&proposal.encoded_transactions, &proposal.metadata); - leaf.fill_block_payload_unchecked(payload); - } else if view == ViewNumber::genesis().u64() { - // We don't get a DA proposal for the genesis view, but we know what the payload always - // is. - leaf.fill_block_payload_unchecked(Payload::empty().0); - } else { - tracing::debug!(view, "DA proposal not available at decide"); - } +#[async_trait] +impl Provider for Persistence { + #[tracing::instrument(skip(self))] + async fn fetch(&self, req: PayloadRequest) -> Option { + let mut tx = match self.db.read().await { + Ok(tx) => tx, + Err(err) => { + tracing::warn!("could not open transaction: {err:#}"); + return None; + } + }; - let leaf_info = LeafInfo { - leaf: leaf.into(), - vid_share, + let bytes = match query_as::<(Vec,)>( + "SELECT data FROM da_proposal WHERE payload_hash = $1 LIMIT 1", + ) + .bind(req.0.to_string()) + .fetch_one(tx.as_mut()) + .await + { + Ok((bytes,)) => bytes, + Err(err) => { + tracing::warn!("error loading DA proposal: {err:#}"); + return None; + } + }; - // Note: the following fields are not used in Decide event processing, and - // should be removed. For now, we just default them. - state: Default::default(), - delta: Default::default(), + let proposal: Proposal> = match bincode::deserialize(&bytes) + { + Ok(proposal) => proposal, + Err(err) => { + tracing::warn!("error decoding DA proposal: {err:#}"); + return None; + } }; - tracing::debug!(?view, ?qc, ?leaf_info, "generating decide event"); - consumer - .handle_event(&Event { - view_number: ViewNumber::new(view), - event: EventType::Decide { - leaf_chain: Arc::new(vec![leaf_info]), - qc: Arc::new(qc.to_qc2()), - block_size: None, - }, - }) - .await?; + + Some(Payload::from_bytes( + &proposal.data.encoded_transactions, + &proposal.data.metadata, + )) } +} - let mut tx = storage.db.write().await?; - // Now that we have definitely processed leaves up to `view`, we can update - // `last_processed_view` so we don't process these leaves again. We may still fail at this - // point, or shut down, and fail to complete this update. At worst this will lead to us sending - // a duplicate decide event the next time we are called; this is fine as the event consumer is - // required to be idempotent. - tx.upsert( - "event_stream", - ["id", "last_processed_view"], - ["id"], - [(1i32, view.u64() as i64)], - ) - .await?; - - tx.execute(query("DELETE FROM vid_share where view <= $1").bind(view.u64() as i64)) - .await?; +#[async_trait] +impl Provider> for Persistence { + #[tracing::instrument(skip(self))] + async fn fetch(&self, req: LeafRequest) -> Option> { + let mut tx = match self.db.read().await { + Ok(tx) => tx, + Err(err) => { + tracing::warn!("could not open transaction: {err:#}"); + return None; + } + }; - tx.execute(query("DELETE FROM da_proposal where view <= $1").bind(view.u64() as i64)) - .await?; + let (leaf, qc) = match fetch_leaf_from_proposals(&mut tx, req).await { + Ok(res) => res, + Err(err) => { + tracing::info!("requested leaf not found in undecided proposals: {err:#}"); + return None; + } + }; - // Clean up leaves, but do not delete the most recent one (all leaves with a view number less than the given value). - // This is necessary to ensure that, in case of a restart, we can resume from the last decided leaf. - tx.execute(query("DELETE FROM anchor_leaf WHERE view < $1").bind(view.u64() as i64)) - .await?; + match LeafQueryData::new(leaf, qc) { + Ok(leaf) => Some(leaf), + Err(err) => { + tracing::warn!("fetched invalid leaf: {err:#}"); + None + } + } + } +} - // Clean up old proposals. These are not part of the decide event we generate for the consumer, - // so we don't need to return them. - tx.execute(query("DELETE FROM quorum_proposals where view <= $1").bind(view.u64() as i64)) - .await?; +async fn fetch_leaf_from_proposals( + tx: &mut Transaction, + req: LeafRequest, +) -> anyhow::Result<(Leaf, QuorumCertificate)> { + // Look for a quorum proposal corresponding to this leaf. + let (proposal_bytes,) = + query_as::<(Vec,)>("SELECT data FROM quorum_proposals WHERE leaf_hash = $1 LIMIT 1") + .bind(req.expected_leaf.to_string()) + .fetch_one(tx.as_mut()) + .await + .context("fetching proposal")?; - tx.commit().await + // Look for a QC corresponding to this leaf. + let (qc_bytes,) = + query_as::<(Vec,)>("SELECT data FROM quorum_certificate WHERE leaf_hash = $1 LIMIT 1") + .bind(req.expected_leaf.to_string()) + .fetch_one(tx.as_mut()) + .await + .context("fetching QC")?; + + let proposal: Proposal> = + bincode::deserialize(&proposal_bytes).context("deserializing quorum proposal")?; + let qc: QuorumCertificate = + bincode::deserialize(&qc_bytes).context("deserializing quorum certificate")?; + + let leaf = Leaf::from_quorum_proposal(&proposal.data); + Ok((leaf, qc)) } #[cfg(test)] @@ -1070,24 +1464,26 @@ mod testing { Arc::new(TmpDb::init().await) } - async fn connect(db: &Self::Storage) -> Self { + #[allow(refining_impl_trait)] + fn options(db: &Self::Storage) -> Options { #[cfg(not(feature = "embedded-db"))] { - let mut opt: Options = PostgresOptions { + PostgresOptions { port: Some(db.port()), host: Some(db.host()), user: Some("postgres".into()), password: Some("password".into()), ..Default::default() } - .into(); - opt.create().await.unwrap() + .into() } #[cfg(feature = "embedded-db")] { - let mut opt: Options = SqliteOptions { path: db.path() }.into(); - opt.create().await.unwrap() + SqliteOptions { + path: Some(db.path()), + } + .into() } } } @@ -1108,17 +1504,22 @@ mod test { use super::*; use crate::{persistence::testing::TestablePersistence, BLSPubKey, PubKey}; - use espresso_types::{Leaf, NodeState, ValidatedState}; + use espresso_types::{traits::NullEventConsumer, Leaf, NodeState, ValidatedState}; use futures::stream::TryStreamExt; use hotshot_example_types::node_types::TestVersions; use hotshot_types::{ drb::{INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, simple_certificate::QuorumCertificate, - traits::signature_key::SignatureKey, + traits::{block_contents::vid_commitment, signature_key::SignatureKey, EncodeBytes}, + vid::vid_scheme, }; + use jf_vid::VidScheme; + use sequencer_utils::test_utils::setup_test; #[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposals_leaf_hash_migration() { + setup_test(); + // Create some quorum proposals to test with. let leaf: Leaf2 = Leaf::genesis(&ValidatedState::default(), &NodeState::mock()) .await @@ -1192,4 +1593,256 @@ mod test { ); } } + + #[tokio::test(flavor = "multi_thread")] + async fn test_fetching_providers() { + setup_test(); + + let tmp = Persistence::tmp_storage().await; + let storage = Persistence::connect(&tmp).await; + + // Mock up some data. + let leaf = Leaf::genesis(&ValidatedState::default(), &NodeState::mock()).await; + let leaf_payload = leaf.block_payload().unwrap(); + let leaf_payload_bytes_arc = leaf_payload.encode(); + let disperse = vid_scheme(2) + .disperse(leaf_payload_bytes_arc.clone()) + .unwrap(); + let payload_commitment = disperse.commit; + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); + let vid_share = VidDisperseShare:: { + view_number: ViewNumber::new(0), + payload_commitment, + share: disperse.shares[0].clone(), + common: disperse.common, + recipient_key: pubkey, + } + .to_proposal(&privkey) + .unwrap() + .clone(); + + let quorum_proposal = QuorumProposal2:: { + block_header: leaf.block_header().clone(), + view_number: leaf.view_number(), + justify_qc: leaf.justify_qc().to_qc2(), + upgrade_certificate: None, + view_change_evidence: None, + drb_seed: INITIAL_DRB_SEED_INPUT, + drb_result: INITIAL_DRB_RESULT, + }; + let quorum_proposal_signature = + BLSPubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) + .expect("Failed to sign quorum proposal"); + let quorum_proposal = Proposal { + data: quorum_proposal, + signature: quorum_proposal_signature, + _pd: Default::default(), + }; + + let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) + .expect("Failed to sign block payload"); + let da_proposal = Proposal { + data: DaProposal:: { + encoded_transactions: leaf_payload_bytes_arc, + metadata: leaf_payload.ns_table().clone(), + view_number: ViewNumber::new(0), + }, + signature: block_payload_signature, + _pd: Default::default(), + }; + + let mut next_quorum_proposal = quorum_proposal.clone(); + next_quorum_proposal.data.view_number += 1; + next_quorum_proposal.data.justify_qc.view_number += 1; + next_quorum_proposal.data.justify_qc.data.leaf_commit = + Committable::commit(&leaf.clone().into()); + let qc = &next_quorum_proposal.data.justify_qc; + + // Add to database. + storage + .append_da(&da_proposal, payload_commitment) + .await + .unwrap(); + storage.append_vid(&vid_share).await.unwrap(); + storage + .append_quorum_proposal(&quorum_proposal) + .await + .unwrap(); + + // Add an extra quorum proposal so we have a QC pointing back at `leaf`. + storage + .append_quorum_proposal(&next_quorum_proposal) + .await + .unwrap(); + + // Fetch it as if we were rebuilding an archive. + assert_eq!( + vid_share.data.common, + storage + .fetch(VidCommonRequest(vid_share.data.payload_commitment)) + .await + .unwrap() + ); + assert_eq!( + leaf_payload, + storage + .fetch(PayloadRequest(vid_share.data.payload_commitment)) + .await + .unwrap() + ); + assert_eq!( + LeafQueryData::new(leaf.clone(), qc.clone().to_qc()).unwrap(), + storage + .fetch(LeafRequest::new( + leaf.block_header().block_number(), + Committable::commit(&leaf), + qc.clone().to_qc().commit() + )) + .await + .unwrap() + ); + } + + /// Test conditions that trigger pruning. + /// + /// This is a configurable test that can be used to test different configurations of GC, + /// `pruning_opt`. The test populates the database with some data for view 1, asserts that it is + /// retained for view 2, and then asserts that it is pruned by view 3. There are various + /// different configurations that can achieve this behavior, such that the data is retained and + /// then pruned due to different logic and code paths. + async fn test_pruning_helper(pruning_opt: ConsensusPruningOptions) { + setup_test(); + + let tmp = Persistence::tmp_storage().await; + let mut opt = Persistence::options(&tmp); + opt.consensus_pruning = pruning_opt; + let storage = opt.create().await.unwrap(); + + let data_view = ViewNumber::new(1); + + // Populate some data. + let leaf = Leaf::genesis(&ValidatedState::default(), &NodeState::mock()).await; + let leaf_payload = leaf.block_payload().unwrap(); + let leaf_payload_bytes_arc = leaf_payload.encode(); + + let disperse = vid_scheme(2) + .disperse(leaf_payload_bytes_arc.clone()) + .unwrap(); + let payload_commitment = vid_commitment(&leaf_payload_bytes_arc, 2); + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); + let vid = VidDisperseShare:: { + view_number: data_view, + payload_commitment, + share: disperse.shares[0].clone(), + common: disperse.common, + recipient_key: pubkey, + } + .to_proposal(&privkey) + .unwrap() + .clone(); + let quorum_proposal = QuorumProposal2:: { + block_header: leaf.block_header().clone(), + view_number: data_view, + justify_qc: QuorumCertificate::genesis::( + &ValidatedState::default(), + &NodeState::mock(), + ) + .await + .to_qc2(), + upgrade_certificate: None, + view_change_evidence: None, + drb_seed: INITIAL_DRB_SEED_INPUT, + drb_result: INITIAL_DRB_RESULT, + }; + let quorum_proposal_signature = + BLSPubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) + .expect("Failed to sign quorum proposal"); + let quorum_proposal = Proposal { + data: quorum_proposal, + signature: quorum_proposal_signature, + _pd: Default::default(), + }; + + let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) + .expect("Failed to sign block payload"); + let da_proposal = Proposal { + data: DaProposal:: { + encoded_transactions: leaf_payload_bytes_arc.clone(), + metadata: leaf_payload.ns_table().clone(), + view_number: data_view, + }, + signature: block_payload_signature, + _pd: Default::default(), + }; + + tracing::info!(?vid, ?da_proposal, ?quorum_proposal, "append data"); + storage.append_vid(&vid).await.unwrap(); + storage + .append_da(&da_proposal, payload_commitment) + .await + .unwrap(); + storage + .append_quorum_proposal(&quorum_proposal) + .await + .unwrap(); + + // The first decide doesn't trigger any garbage collection, even though our usage exceeds + // the target, because of the minimum retention. + tracing::info!("decide view 1"); + storage + .append_decided_leaves(data_view + 1, [], &NullEventConsumer) + .await + .unwrap(); + assert_eq!( + storage.load_vid_share(data_view).await.unwrap().unwrap(), + vid + ); + assert_eq!( + storage.load_da_proposal(data_view).await.unwrap().unwrap(), + da_proposal + ); + assert_eq!( + storage.load_quorum_proposal(data_view).await.unwrap(), + quorum_proposal + ); + + // After another view, our data is beyond the minimum retention (though not the target + // retention) so it gets pruned. + tracing::info!("decide view 2"); + storage + .append_decided_leaves(data_view + 2, [], &NullEventConsumer) + .await + .unwrap(); + assert!(storage.load_vid_share(data_view).await.unwrap().is_none(),); + assert!(storage.load_da_proposal(data_view).await.unwrap().is_none()); + storage.load_quorum_proposal(data_view).await.unwrap_err(); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_pruning_minimum_retention() { + test_pruning_helper(ConsensusPruningOptions { + // Use a very low target usage, to show that we still retain data up to the minimum + // retention even when usage is above target. + target_usage: 0, + minimum_retention: 1, + // Use a very high target retention, so that pruning is only triggered by the minimum + // retention. + target_retention: u64::MAX, + }) + .await + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_pruning_target_retention() { + test_pruning_helper(ConsensusPruningOptions { + target_retention: 1, + // Use a very low minimum retention, so that data is only kept around due to the target + // retention. + minimum_retention: 0, + // Use a very high target usage, so that pruning is only triggered by the target + // retention. + target_usage: u64::MAX, + }) + .await + } } diff --git a/sequencer/src/proposal_fetcher.rs b/sequencer/src/proposal_fetcher.rs new file mode 100644 index 0000000000..5c753ece80 --- /dev/null +++ b/sequencer/src/proposal_fetcher.rs @@ -0,0 +1,244 @@ +use std::sync::Arc; + +use anyhow::Context; +use async_channel::{Receiver, Sender}; +use async_lock::RwLock; +use clap::Parser; +use committable::Commitment; +use derivative::Derivative; +use espresso_types::{parse_duration, v0::traits::SequencerPersistence, PubKey, ValidatedState}; +use futures::stream::StreamExt; +use hotshot::types::EventType; +use hotshot_types::{ + data::{EpochNumber, Leaf2, ViewNumber}, + traits::{ + metrics::{Counter, Gauge, Metrics}, + network::ConnectedNetwork, + node_implementation::{ConsensusTime, Versions}, + ValidatedState as _, + }, + utils::{View, ViewInner}, +}; +use std::time::Duration; +use tokio::time::{sleep, timeout}; +use tracing::Instrument; + +use crate::{ + context::{Consensus, TaskList}, + SeqTypes, +}; + +#[derive(Clone, Copy, Debug, Parser)] +pub struct ProposalFetcherConfig { + #[clap( + long = "proposal-fetcher-num-workers", + env = "ESPRESSO_SEQUENCER_PROPOSAL_FETCHER_NUM_WORKERS", + default_value = "2" + )] + pub num_workers: usize, + + #[clap( + long = "proposal-fetcher-fetch-timeout", + env = "ESPRESSO_SEQUENCER_PROPOSAL_FETCHER_FETCH_TIMEOUT", + default_value = "2s", + value_parser = parse_duration, + )] + pub fetch_timeout: Duration, +} + +impl Default for ProposalFetcherConfig { + fn default() -> Self { + Self::parse_from(std::iter::empty::()) + } +} + +impl ProposalFetcherConfig { + pub(crate) fn spawn( + self, + tasks: &mut TaskList, + consensus: Arc>>, + persistence: Arc

, + metrics: &(impl Metrics + ?Sized), + ) where + N: ConnectedNetwork, + P: SequencerPersistence, + V: Versions, + { + let (sender, receiver) = async_channel::unbounded(); + let fetcher = ProposalFetcher { + sender, + consensus, + persistence, + cfg: self, + metrics: ProposalFetcherMetrics::new(metrics), + }; + + tasks.spawn("proposal scanner", fetcher.clone().scan()); + for i in 0..self.num_workers { + tasks.spawn( + format!("proposal fetcher {i}"), + fetcher.clone().fetch(receiver.clone()), + ); + } + } +} + +#[derive(Clone, Debug)] +struct ProposalFetcherMetrics { + fetched: Arc, + failed: Arc, + queue_len: Arc, + last_seen: Arc, + last_fetched: Arc, +} + +impl ProposalFetcherMetrics { + fn new(metrics: &(impl Metrics + ?Sized)) -> Self { + let metrics = metrics.subgroup("proposal_fetcher".into()); + Self { + fetched: metrics.create_counter("fetched".into(), None).into(), + failed: metrics.create_counter("failed".into(), None).into(), + queue_len: metrics.create_gauge("queue_len".into(), None).into(), + last_seen: metrics + .create_gauge("last_seen".into(), Some("view".into())) + .into(), + last_fetched: metrics + .create_gauge("last_fetched".into(), Some("view".into())) + .into(), + } + } +} + +type Request = (ViewNumber, Commitment>); + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""))] +struct ProposalFetcher +where + N: ConnectedNetwork, + P: SequencerPersistence, + V: Versions, +{ + sender: Sender, + #[derivative(Debug = "ignore")] + consensus: Arc>>, + #[derivative(Debug = "ignore")] + persistence: Arc

, + cfg: ProposalFetcherConfig, + metrics: ProposalFetcherMetrics, +} + +impl ProposalFetcher +where + N: ConnectedNetwork, + P: SequencerPersistence, + V: Versions, +{ + #[tracing::instrument(skip_all)] + async fn scan(self) { + let mut events = self.consensus.read().await.event_stream(); + while let Some(event) = events.next().await { + let EventType::QuorumProposal { proposal, .. } = event.event else { + continue; + }; + // Whenever we see a quorum proposal, ensure we have the chain of proposals stretching back + // to the anchor. This allows state replay from the decided state. + let parent_view = proposal.data.justify_qc.view_number; + let parent_leaf = proposal.data.justify_qc.data.leaf_commit; + self.request((parent_view, parent_leaf)).await; + } + } + + #[tracing::instrument(skip_all)] + async fn fetch(self, receiver: Receiver<(ViewNumber, Commitment>)>) { + let mut receiver = std::pin::pin!(receiver); + while let Some(req) = receiver.next().await { + self.fetch_request(req).await; + } + } + + async fn request(&self, req: Request) { + self.sender.send(req).await.ok(); + self.metrics.queue_len.set(self.sender.len()); + self.metrics.last_seen.set(req.0.u64() as usize); + } + + async fn fetch_request(&self, (view, leaf): Request) { + let span = tracing::warn_span!("fetch proposal", ?view, %leaf); + let res: anyhow::Result<()> = async { + let anchor_view = self + .persistence + .load_anchor_view() + .await + .context("loading anchor view")?; + if view <= anchor_view { + tracing::debug!(?anchor_view, "skipping already-decided proposal"); + return Ok(()); + } + + match self.persistence.load_quorum_proposal(view).await { + Ok(proposal) => { + // If we already have the proposal in storage, keep traversing the chain to its + // parent. + let view = proposal.data.justify_qc.view_number; + let leaf = proposal.data.justify_qc.data.leaf_commit; + self.request((view, leaf)).await; + return Ok(()); + } + Err(err) => { + tracing::info!("proposal missing from storage; fetching from network: {err:#}"); + } + } + + let future = + self.consensus + .read() + .await + .request_proposal(view, EpochNumber::genesis(), leaf)?; + let proposal = timeout(self.cfg.fetch_timeout, future) + .await + .context("timed out fetching proposal")? + .context("error fetching proposal")?; + self.persistence + .append_quorum_proposal(&proposal) + .await + .context("error saving fetched proposal")?; + + // Add the fetched leaf to HotShot state, so consensus can make use of it. + let leaf = Leaf2::from_quorum_proposal(&proposal.data); + let handle = self.consensus.read().await; + let consensus = handle.consensus(); + let mut consensus = consensus.write().await; + if matches!( + consensus.validated_state_map().get(&view), + None | Some(View { + // Replace a Da-only view with a Leaf view, which has strictly more information. + view_inner: ViewInner::Da { .. } + }) + ) { + let state = Arc::new(ValidatedState::from_header(leaf.block_header())); + if let Err(err) = consensus.update_leaf(leaf, state, None) { + tracing::warn!("unable to update leaf: {err:#}"); + } + } + + self.metrics.last_fetched.set(view.u64() as usize); + self.metrics.fetched.add(1); + + Ok(()) + } + .instrument(span) + .await; + if let Err(err) = res { + tracing::warn!("failed to fetch proposal: {err:#}"); + self.metrics.failed.add(1); + + // Avoid busy loop when operations are failing. + sleep(Duration::from_secs(1)).await; + + // If we fail fetching the proposal, don't let it clog up the fetching task. Just push + // it back onto the queue and move onto the next proposal. + self.request((view, leaf)).await; + } + } +} diff --git a/types/Cargo.toml b/types/Cargo.toml index 0c4151f691..32d0c8e389 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -21,15 +21,17 @@ cld = { workspace = true } committable = { workspace = true } contract-bindings = { path = "../contract-bindings" } derive_more = { workspace = true } -dyn-clone = { workspace = true } +diff-test-bn254 = { git = "https://github.com/EspressoSystems/solidity-bn254.git" } ethers = { workspace = true } fluent-asserter = "0.1.9" futures = { workspace = true } hotshot = { workspace = true } +hotshot-contract-adapter = { workspace = true } hotshot-query-service = { workspace = true } hotshot-types = { workspace = true } itertools = { workspace = true } jf-merkle-tree = { workspace = true } +jf-signature = { workspace = true } jf-utils = { workspace = true } # TODO temporary: used only for test_rng() jf-vid = { workspace = true } lru = { workspace = true } diff --git a/types/src/v0/impls/auction.rs b/types/src/v0/impls/auction.rs index ce5aa835d6..71b5a5592d 100644 --- a/types/src/v0/impls/auction.rs +++ b/types/src/v0/impls/auction.rs @@ -210,7 +210,7 @@ impl BidTx { .charge_fee(FeeInfo::new(self.account(), self.amount()), recipient) .map_err(ExecutionError::from)?; - // Charge the the gas amount + // Charge the gas amount state .charge_fee(FeeInfo::new(self.account(), self.gas_price()), recipient) .map_err(ExecutionError::from)?; diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index 1577a2a38d..f410858ea9 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -673,7 +673,7 @@ impl Header { &mut *field_mut!(self.l1_head) } - /// The Espresso block header includes information a bout the latest finalized L1 block. + /// The Espresso block header includes information about the latest finalized L1 block. /// /// Similar to [`l1_head`](Self::l1_head), rollups can use this information to implement a /// bridge between the L1 and L2 while retaining the finality of low-latency block confirmations diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs index 71a387bbcb..bf5e5595fa 100644 --- a/types/src/v0/impls/instance_state.rs +++ b/types/src/v0/impls/instance_state.rs @@ -213,6 +213,7 @@ pub mod mock { impl StateCatchup for MockStateCatchup { async fn try_fetch_accounts( &self, + _retry: usize, _instance: &NodeState, _height: u64, view: ViewNumber, @@ -228,6 +229,7 @@ pub mod mock { async fn try_remember_blocks_merkle_tree( &self, + _retry: usize, _instance: &NodeState, _height: u64, view: ViewNumber, @@ -252,6 +254,7 @@ pub mod mock { async fn try_fetch_chain_config( &self, + _retry: usize, _commitment: Commitment, ) -> anyhow::Result { Ok(ChainConfig::default()) diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index 6bd58a837c..17b33ad7a4 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -5,7 +5,6 @@ use std::{cmp::max, collections::BTreeMap, fmt::Debug, ops::Range, sync::Arc}; use anyhow::{bail, ensure, Context}; use async_trait::async_trait; use committable::{Commitment, Committable}; -use dyn_clone::DynClone; use futures::{FutureExt, TryFutureExt}; use hotshot::{types::EventType, HotShotInitializer}; use hotshot_types::{ @@ -41,6 +40,7 @@ pub trait StateCatchup: Send + Sync { /// Try to fetch the given accounts state, failing without retrying if unable. async fn try_fetch_accounts( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, @@ -58,10 +58,18 @@ pub trait StateCatchup: Send + Sync { accounts: Vec, ) -> anyhow::Result> { self.backoff() - .retry(self, |provider| { - async { + .retry(self, |provider, retry| { + let accounts = &accounts; + async move { let tree = provider - .try_fetch_accounts(instance, height, view, fee_merkle_tree_root, &accounts) + .try_fetch_accounts( + retry, + instance, + height, + view, + fee_merkle_tree_root, + accounts, + ) .await .map_err(|err| { err.context(format!( @@ -85,6 +93,7 @@ pub trait StateCatchup: Send + Sync { /// Try to fetch and remember the blocks frontier, failing without retrying if unable. async fn try_remember_blocks_merkle_tree( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, @@ -100,8 +109,8 @@ pub trait StateCatchup: Send + Sync { mt: &mut BlockMerkleTree, ) -> anyhow::Result<()> { self.backoff() - .retry(mt, |mt| { - self.try_remember_blocks_merkle_tree(instance, height, view, mt) + .retry(mt, |mt, retry| { + self.try_remember_blocks_merkle_tree(retry, instance, height, view, mt) .map_err(|err| err.context("fetching frontier")) .boxed() }) @@ -110,6 +119,7 @@ pub trait StateCatchup: Send + Sync { async fn try_fetch_chain_config( &self, + retry: usize, commitment: Commitment, ) -> anyhow::Result; @@ -118,9 +128,9 @@ pub trait StateCatchup: Send + Sync { commitment: Commitment, ) -> anyhow::Result { self.backoff() - .retry(self, |provider| { + .retry(self, |provider, retry| { provider - .try_fetch_chain_config(commitment) + .try_fetch_chain_config(retry, commitment) .map_err(|err| err.context("fetching chain config")) .boxed() }) @@ -135,6 +145,7 @@ pub trait StateCatchup: Send + Sync { impl StateCatchup for Box { async fn try_fetch_accounts( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, @@ -142,7 +153,14 @@ impl StateCatchup for Box { accounts: &[FeeAccount], ) -> anyhow::Result { (**self) - .try_fetch_accounts(instance, height, view, fee_merkle_tree_root, accounts) + .try_fetch_accounts( + retry, + instance, + height, + view, + fee_merkle_tree_root, + accounts, + ) .await } @@ -161,13 +179,14 @@ impl StateCatchup for Box { async fn try_remember_blocks_merkle_tree( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, mt: &mut BlockMerkleTree, ) -> anyhow::Result<()> { (**self) - .try_remember_blocks_merkle_tree(instance, height, view, mt) + .try_remember_blocks_merkle_tree(retry, instance, height, view, mt) .await } @@ -185,9 +204,10 @@ impl StateCatchup for Box { async fn try_fetch_chain_config( &self, + retry: usize, commitment: Commitment, ) -> anyhow::Result { - (**self).try_fetch_chain_config(commitment).await + (**self).try_fetch_chain_config(retry, commitment).await } async fn fetch_chain_config( @@ -210,6 +230,7 @@ impl StateCatchup for Box { impl StateCatchup for Arc { async fn try_fetch_accounts( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, @@ -217,7 +238,14 @@ impl StateCatchup for Arc { accounts: &[FeeAccount], ) -> anyhow::Result { (**self) - .try_fetch_accounts(instance, height, view, fee_merkle_tree_root, accounts) + .try_fetch_accounts( + retry, + instance, + height, + view, + fee_merkle_tree_root, + accounts, + ) .await } @@ -236,13 +264,14 @@ impl StateCatchup for Arc { async fn try_remember_blocks_merkle_tree( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, mt: &mut BlockMerkleTree, ) -> anyhow::Result<()> { (**self) - .try_remember_blocks_merkle_tree(instance, height, view, mt) + .try_remember_blocks_merkle_tree(retry, instance, height, view, mt) .await } @@ -260,9 +289,10 @@ impl StateCatchup for Arc { async fn try_fetch_chain_config( &self, + retry: usize, commitment: Commitment, ) -> anyhow::Result { - (**self).try_fetch_chain_config(commitment).await + (**self).try_fetch_chain_config(retry, commitment).await } async fn fetch_chain_config( @@ -287,6 +317,7 @@ impl StateCatchup for Vec { #[tracing::instrument(skip(self, instance))] async fn try_fetch_accounts( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, @@ -295,7 +326,14 @@ impl StateCatchup for Vec { ) -> anyhow::Result { for provider in self { match provider - .try_fetch_accounts(instance, height, view, fee_merkle_tree_root, accounts) + .try_fetch_accounts( + retry, + instance, + height, + view, + fee_merkle_tree_root, + accounts, + ) .await { Ok(tree) => return Ok(tree), @@ -315,6 +353,7 @@ impl StateCatchup for Vec { #[tracing::instrument(skip(self, instance, mt))] async fn try_remember_blocks_merkle_tree( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, @@ -322,7 +361,7 @@ impl StateCatchup for Vec { ) -> anyhow::Result<()> { for provider in self { match provider - .try_remember_blocks_merkle_tree(instance, height, view, mt) + .try_remember_blocks_merkle_tree(retry, instance, height, view, mt) .await { Ok(()) => return Ok(()), @@ -340,10 +379,11 @@ impl StateCatchup for Vec { async fn try_fetch_chain_config( &self, + retry: usize, commitment: Commitment, ) -> anyhow::Result { for provider in self { - match provider.try_fetch_chain_config(commitment).await { + match provider.try_fetch_chain_config(retry, commitment).await { Ok(cf) => return Ok(cf), Err(err) => { tracing::info!( @@ -374,6 +414,7 @@ impl StateCatchup for Vec { pub trait PersistenceOptions: Clone + Send + Sync + 'static { type Persistence: SequencerPersistence; + fn set_view_retention(&mut self, view_retention: u64); async fn create(&mut self) -> anyhow::Result; async fn reset(self) -> anyhow::Result<()>; } @@ -652,16 +693,13 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { } #[async_trait] -pub trait EventConsumer: Debug + DynClone + Send + Sync { +pub trait EventConsumer: Debug + Send + Sync { async fn handle_event(&self, event: &Event) -> anyhow::Result<()>; } -dyn_clone::clone_trait_object!(EventConsumer); - #[async_trait] impl EventConsumer for Box where - Self: Clone, T: EventConsumer + ?Sized, { async fn handle_event(&self, event: &Event) -> anyhow::Result<()> { diff --git a/types/src/v0/utils.rs b/types/src/v0/utils.rs index 3f4cfd2750..0c3df95798 100644 --- a/types/src/v0/utils.rs +++ b/types/src/v0/utils.rs @@ -286,12 +286,12 @@ impl BackoffParams { pub async fn retry( &self, mut state: S, - f: impl for<'a> Fn(&'a mut S) -> BoxFuture<'a, anyhow::Result>, + f: impl for<'a> Fn(&'a mut S, usize) -> BoxFuture<'a, anyhow::Result>, ) -> anyhow::Result { let mut delay = self.base; - loop { - match f(&mut state).await { - Ok(res) => break Ok(res), + for i in 0.. { + match f(&mut state, i).await { + Ok(res) => return Ok(res), Err(err) if self.disable => { return Err(err.context("Retryable operation failed; retries disabled")); } @@ -304,6 +304,7 @@ impl BackoffParams { } } } + unreachable!() } #[must_use] diff --git a/types/src/v0/v0_3/mod.rs b/types/src/v0/v0_3/mod.rs index f99c840cac..1a2e7efc9b 100644 --- a/types/src/v0/v0_3/mod.rs +++ b/types/src/v0/v0_3/mod.rs @@ -18,3 +18,7 @@ pub(crate) use super::v0_1::{ }; pub const VERSION: Version = Version { major: 0, minor: 3 }; + +mod stake_table; + +pub use stake_table::CombinedStakeTable; diff --git a/types/src/v0/v0_3/stake_table.rs b/types/src/v0/v0_3/stake_table.rs new file mode 100644 index 0000000000..6d5e41f189 --- /dev/null +++ b/types/src/v0/v0_3/stake_table.rs @@ -0,0 +1,12 @@ +use crate::PubKey; +use derive_more::derive::From; +use hotshot_contract_adapter::stake_table::NodeInfoJf; +use hotshot_types::network::PeerConfigKeys; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize, From)] +pub struct PermissionedStakeTableEntry(NodeInfoJf); + +/// Stake table holding all staking information (DA and non-DA stakers) +#[derive(Debug, Clone, Serialize, Deserialize, From)] +pub struct CombinedStakeTable(Vec>); diff --git a/utils/Cargo.toml b/utils/Cargo.toml index 5328ae0d71..27aa0f435a 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -20,6 +20,7 @@ ethers = { workspace = true } futures = { workspace = true } hotshot = { workspace = true } hotshot-contract-adapter = { workspace = true } +hotshot-types = { workspace = true } log-panics = { workspace = true } portpicker = { workspace = true } # for price oracle and align with ethers-rs dep @@ -29,5 +30,6 @@ serde_json = "^1.0.113" surf = "2.3.2" tempfile = { workspace = true } tokio = { workspace = true } +toml = { workspace = true } tracing = "0.1.37" url = "2.3.1" diff --git a/utils/src/deployer.rs b/utils/src/deployer.rs index ce95f8ba21..50826c042d 100644 --- a/utils/src/deployer.rs +++ b/utils/src/deployer.rs @@ -7,6 +7,7 @@ use contract_bindings::{ light_client_mock::LIGHTCLIENTMOCK_ABI, light_client_state_update_vk::LightClientStateUpdateVK, light_client_state_update_vk_mock::LightClientStateUpdateVKMock, + permissioned_stake_table::{NodeInfo, PermissionedStakeTable}, plonk_verifier::PlonkVerifier, }; use derive_more::Display; @@ -18,8 +19,7 @@ use futures::future::{BoxFuture, FutureExt}; use hotshot_contract_adapter::light_client::{ LightClientConstructorArgs, ParsedLightClientState, ParsedStakeTableState, }; -use std::sync::Arc; -use std::{collections::HashMap, io::Write, ops::Deref}; +use std::{collections::HashMap, io::Write, ops::Deref, sync::Arc, time::Duration}; use url::Url; /// Set of predeployed contracts. @@ -48,6 +48,10 @@ pub struct DeployedContracts { /// Use an already-deployed FeeContract.sol proxy instead of deploying a new one. #[clap(long, env = Contract::FeeContractProxy)] fee_contract_proxy: Option

, + + /// Use an already-deployed PermissonedStakeTable.sol proxy instead of deploying a new one. + #[clap(long, env = Contract::PermissonedStakeTable)] + permissioned_stake_table: Option
, } /// An identifier for a particular contract. @@ -65,6 +69,8 @@ pub enum Contract { FeeContract, #[display("ESPRESSO_SEQUENCER_FEE_CONTRACT_PROXY_ADDRESS")] FeeContractProxy, + #[display("ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS")] + PermissonedStakeTable, } impl From for OsStr { @@ -98,6 +104,9 @@ impl From for Contracts { if let Some(addr) = deployed.fee_contract_proxy { m.insert(Contract::FeeContractProxy, addr); } + if let Some(addr) = deployed.permissioned_stake_table { + m.insert(Contract::PermissonedStakeTable, addr); + } Self(m) } } @@ -300,6 +309,7 @@ pub async fn deploy_mock_light_client_contract( #[allow(clippy::too_many_arguments)] pub async fn deploy( l1url: Url, + l1_interval: Duration, mnemonic: String, account_index: u32, multisig_address: Option, @@ -308,8 +318,9 @@ pub async fn deploy( genesis: BoxFuture<'_, anyhow::Result<(ParsedLightClientState, ParsedStakeTableState)>>, permissioned_prover: Option
, mut contracts: Contracts, + initial_stake_table: Option>, ) -> anyhow::Result { - let provider = Provider::::try_from(l1url.to_string())?; + let provider = Provider::::try_from(l1url.to_string())?.interval(l1_interval); let chain_id = provider.get_chainid().await?.as_u64(); let wallet = MnemonicBuilder::::default() .phrase(mnemonic.as_str()) @@ -426,6 +437,28 @@ pub async fn deploy( } } + // `PermissionedStakeTable.sol` + if should_deploy(ContractGroup::PermissionedStakeTable, &only) { + let initial_stake_table: Vec<_> = initial_stake_table.unwrap_or_default(); + let stake_table_address = contracts + .deploy_tx( + Contract::PermissonedStakeTable, + PermissionedStakeTable::deploy(l1.clone(), initial_stake_table)?, + ) + .await?; + let stake_table = PermissionedStakeTable::new(stake_table_address, l1.clone()); + + // Transfer ownership to the multisig wallet if provided. + if let Some(owner) = multisig_address { + tracing::info!( + %stake_table_address, + %owner, + "transferring PermissionedStakeTable ownership to multisig", + ); + stake_table.transfer_ownership(owner).send().await?.await?; + } + } + Ok(contracts) } @@ -459,6 +492,7 @@ pub async fn is_proxy_contract( pub enum ContractGroup { FeeContract, LightClient, + PermissionedStakeTable, } #[cfg(any(test, feature = "testing"))] diff --git a/utils/src/lib.rs b/utils/src/lib.rs index ec8bce5825..18c01a06d2 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -24,6 +24,7 @@ pub mod blocknative; pub mod deployer; pub mod logging; pub mod ser; +pub mod stake_table; pub mod test_utils; pub type Signer = SignerMiddleware, LocalWallet>; diff --git a/utils/src/stake_table.rs b/utils/src/stake_table.rs new file mode 100644 index 0000000000..0b3236083b --- /dev/null +++ b/utils/src/stake_table.rs @@ -0,0 +1,138 @@ +/// Utilities for loading an initial permissioned stake table from a toml file. +/// +/// The initial stake table is passed to the permissioned stake table contract +/// on deployment. +use contract_bindings::permissioned_stake_table::NodeInfo; +use hotshot::types::BLSPubKey; +use hotshot_contract_adapter::stake_table::NodeInfoJf; +use hotshot_types::network::PeerConfigKeys; + +use std::{fs, path::Path}; + +/// A stake table config stored in a file +#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] +#[serde(bound(deserialize = ""))] +pub struct PermissionedStakeTableConfig { + /// The list of public keys that are initially inserted into the + /// permissioned stake table contract. + #[serde(default)] + pub public_keys: Vec>, +} + +impl PermissionedStakeTableConfig { + pub fn from_toml_file(path: &Path) -> anyhow::Result { + let config_file_as_string: String = fs::read_to_string(path) + .unwrap_or_else(|_| panic!("Could not read config file located at {}", path.display())); + + Ok( + toml::from_str::(&config_file_as_string).unwrap_or_else(|err| { + panic!( + "Unable to convert config file {} to TOML: {err}", + path.display() + ) + }), + ) + } +} + +impl From for Vec { + fn from(value: PermissionedStakeTableConfig) -> Self { + value + .public_keys + .into_iter() + .map(|peer_config| { + let node_info: NodeInfoJf = peer_config.clone().into(); + node_info.into() + }) + .collect() + } +} + +#[cfg(test)] +mod test { + use crate::stake_table::PermissionedStakeTableConfig; + use crate::test_utils::setup_test; + use hotshot::types::{BLSPubKey, SignatureKey}; + use hotshot_types::{light_client::StateKeyPair, network::PeerConfigKeys}; + use toml::toml; + #[test] + fn test_permissioned_stake_table_from_toml() { + setup_test(); + + let mut keys = Vec::new(); + for i in 0..3 { + let (pubkey, _) = BLSPubKey::generated_from_seed_indexed([0; 32], i); + let state_kp = StateKeyPair::generate_from_seed_indexed([0; 32], i).0; + let ver_key = state_kp.ver_key(); + keys.push(PeerConfigKeys { + stake_table_key: pubkey, + state_ver_key: ver_key, + stake: 1, + da: i == 0, + }); + } + + let st_key_1 = keys[0].stake_table_key.to_string(); + let verkey_1 = keys[0].state_ver_key.to_string(); + let da_1 = keys[0].da; + + let st_key_2 = keys[1].stake_table_key.to_string(); + let verkey_2 = keys[1].state_ver_key.to_string(); + let da_2 = keys[1].da; + + let st_key_3 = keys[2].stake_table_key.to_string(); + let verkey_3 = keys[2].state_ver_key.to_string(); + let da_3 = keys[2].da; + + let toml = toml! { + [[public_keys]] + stake_table_key = st_key_1 + state_ver_key = verkey_1 + stake = 1 + da = da_1 + + [[public_keys]] + stake_table_key = st_key_2 + state_ver_key = verkey_2 + stake = 1 + da = da_2 + + [[public_keys]] + stake_table_key = st_key_3 + state_ver_key = verkey_3 + stake = 2 + da = da_3 + + } + .to_string(); + + let toml_st: PermissionedStakeTableConfig = toml::from_str(&toml).unwrap(); + + assert_eq!(toml_st.public_keys.len(), 3); + + // TODO: add `PartialEq` to PeerConfigKeys + assert_eq!(toml_st.public_keys[0].state_ver_key, keys[0].state_ver_key); + assert_eq!( + toml_st.public_keys[0].stake_table_key, + keys[0].stake_table_key + ); + assert_eq!(toml_st.public_keys[0].da, da_1); + assert_eq!(toml_st.public_keys[0].stake, 1); + + assert_eq!(toml_st.public_keys[1].state_ver_key, keys[1].state_ver_key); + assert_eq!( + toml_st.public_keys[1].stake_table_key, + keys[1].stake_table_key + ); + assert_eq!(toml_st.public_keys[1].da, da_2); + assert_eq!(toml_st.public_keys[1].stake, 1); + + assert_eq!(toml_st.public_keys[2].state_ver_key, keys[2].state_ver_key); + assert_eq!( + toml_st.public_keys[2].stake_table_key, + keys[2].stake_table_key + ); + assert_eq!(toml_st.public_keys[2].da, da_3); + assert_eq!(toml_st.public_keys[2].stake, 2); + } +}