From f3a1b5da3190b1a3d838c9955f2da6c6830bd9e3 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 27 Jun 2022 22:50:27 +0000 Subject: [PATCH 01/15] Update Sepolia TTD (#3288) ## Issue Addressed NA ## Proposed Changes Update Sepolia TTD as per https://github.com/eth-clients/merge-testnets/pull/21 ## Additional Info NA --- .../built_in_network_configs/sepolia/config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 95587c29087..4c3e4bb6eca 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -24,7 +24,7 @@ ALTAIR_FORK_EPOCH: 50 # Merge BELLATRIX_FORK_VERSION: 0x90000071 BELLATRIX_FORK_EPOCH: 100 -TERMINAL_TOTAL_DIFFICULTY: 100000000000000000000000 +TERMINAL_TOTAL_DIFFICULTY: 17000000000000000 TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 From 45b2eb18bc4ca6a8e06fc66d3139358b38418ed2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 28 Jun 2022 03:03:30 +0000 Subject: [PATCH 02/15] v2.3.2-rc.0 (#3289) ## Issue Addressed NA ## Proposed Changes Bump versions ## Additional Info NA --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3455ca8efa5..1554ff5564b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -328,7 +328,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.3.1" +version = "2.3.2-rc.0" dependencies = [ "beacon_chain", "clap", @@ -485,7 +485,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.3.1" +version = "2.3.2-rc.0" dependencies = [ "beacon_node", "clap", @@ -2885,7 +2885,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.3.1" +version = "2.3.2-rc.0" dependencies = [ "account_utils", "bls", @@ -3383,7 +3383,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.3.1" +version = "2.3.2-rc.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 081e91aba8a..ccb145caf97 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.3.1" +version = "2.3.2-rc.0" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index c5a5bc57e8e..e4a6bd01792 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.3.1-", - fallback = "Lighthouse/v2.3.1" + prefix = "Lighthouse/v2.3.2-rc.0-", + fallback = "Lighthouse/v2.3.2-rc.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 5dfcba8fa15..037171097d8 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.3.1" +version = "2.3.2-rc.0" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 35fee803157..d9bd4334cfc 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.3.1" +version = "2.3.2-rc.0" authors = ["Sigma Prime "] edition = "2021" autotests = false From 36453929d55568a714fd98d60087cad0f042f711 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 29 Jun 2022 04:50:36 +0000 Subject: [PATCH 03/15] Update Cross config for v0.2.2 (#3286) ## Proposed Changes Update `Cross.toml` for the recently released Cross v0.2.2. This allows us to remove the dependency on my fork of the Cross Docker image, which was a maintenance burden and prone to bit-rot. This PR puts us back in sync with upstream Cross. ## Additional Info Due to some bindgen errors on the default Cross images we seemingly need a full `clang-3.9` install. The `libclang-3.9-dev` package was found to be insufficient due to `stdarg.h` being missing. In order to continue building locally all Lighthouse devs should update their local cross version with `cargo install cross`. --- Cross.toml | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/Cross.toml b/Cross.toml index 2db39924648..d5f7a5d5068 100644 --- a/Cross.toml +++ b/Cross.toml @@ -1,15 +1,5 @@ -[build.env] -passthrough = [ - "RUSTFLAGS", -] - -# These custom images are required to work around the lack of Clang in the default `cross` images. -# We need Clang to run `bindgen` for MDBX, and the `BINDGEN_EXTRA_CLANG_ARGS` flags must also be set -# while cross-compiling for ARM to prevent bindgen from attempting to include headers from the host. -# -# For more information see https://github.com/rust-embedded/cross/pull/608 [target.x86_64-unknown-linux-gnu] -image = "michaelsproul/cross-clang:x86_64-latest" +pre-build = ["apt-get install -y cmake clang-3.9"] [target.aarch64-unknown-linux-gnu] -image = "michaelsproul/cross-clang:aarch64-latest" +pre-build = ["apt-get install -y cmake clang-3.9"] From 53b2b500dbb9f00b5d6dd4da065e52d7ba186f9a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 29 Jun 2022 04:50:37 +0000 Subject: [PATCH 04/15] Extend block reward APIs (#3290) ## Proposed Changes Add a new HTTP endpoint `POST /lighthouse/analysis/block_rewards` which takes a vec of `BeaconBlock`s as input and outputs the `BlockReward`s for them. Augment the `BlockReward` struct with the attestation data for attestations in the block, which simplifies access to this information from blockprint. Using attestation data I've been able to make blockprint up to 95% accurate across Prysm/Lighthouse/Teku/Nimbus. I hope to go even higher using a bunch of synthetic blocks produced for Prysm/Nimbus/Lodestar, which are underrepresented in the current training data. --- Cargo.lock | 1 + beacon_node/beacon_chain/src/block_reward.rs | 19 +++- .../beacon_chain/src/block_verification.rs | 2 +- beacon_node/http_api/Cargo.toml | 2 +- beacon_node/http_api/src/block_rewards.rs | 100 +++++++++++++++++- beacon_node/http_api/src/lib.rs | 15 ++- common/eth2/src/lighthouse/block_rewards.rs | 8 +- 7 files changed, 136 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1554ff5564b..d461027e773 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2521,6 +2521,7 @@ dependencies = [ "lighthouse_network", "lighthouse_version", "logging", + "lru", "network", "parking_lot 0.12.1", "safe_arith", diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs index 74a27d5f751..4b8b809d3fa 100644 --- a/beacon_node/beacon_chain/src/block_reward.rs +++ b/beacon_node/beacon_chain/src/block_reward.rs @@ -2,7 +2,7 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::lighthouse::{AttestationRewards, BlockReward, BlockRewardMeta}; use operation_pool::{AttMaxCover, MaxCover}; use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; -use types::{BeaconBlockRef, BeaconState, EthSpec, ExecPayload, Hash256, RelativeEpoch}; +use types::{BeaconBlockRef, BeaconState, EthSpec, ExecPayload, Hash256}; impl BeaconChain { pub fn compute_block_reward>( @@ -10,13 +10,13 @@ impl BeaconChain { block: BeaconBlockRef<'_, T::EthSpec, Payload>, block_root: Hash256, state: &BeaconState, + include_attestations: bool, ) -> Result { if block.slot() != state.slot() { return Err(BeaconChainError::BlockRewardSlotError); } - let active_indices = state.get_cached_active_validator_indices(RelativeEpoch::Current)?; - let total_active_balance = state.get_total_balance(active_indices, &self.spec)?; + let total_active_balance = state.get_total_active_balance()?; let mut per_attestation_rewards = block .body() .attestations() @@ -60,11 +60,24 @@ impl BeaconChain { .map(|cover| cover.fresh_validators_rewards) .collect(); + // Add the attestation data if desired. + let attestations = if include_attestations { + block + .body() + .attestations() + .iter() + .map(|a| a.data.clone()) + .collect() + } else { + vec![] + }; + let attestation_rewards = AttestationRewards { total: attestation_total, prev_epoch_total, curr_epoch_total, per_attestation_rewards, + attestations, }; // Sync committee rewards. diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index afdbaf13ee2..c791a35f689 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1235,7 +1235,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { if let Some(ref event_handler) = chain.event_handler { if event_handler.has_block_reward_subscribers() { let block_reward = - chain.compute_block_reward(block.message(), block_root, &state)?; + chain.compute_block_reward(block.message(), block_root, &state, true)?; event_handler.register(EventKind::BlockReward(block_reward)); } } diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index a34618c2ef4..9dd2af7d179 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -31,7 +31,7 @@ execution_layer = {path = "../execution_layer"} parking_lot = "0.12.0" safe_arith = {path = "../../consensus/safe_arith"} task_executor = { path = "../../common/task_executor" } - +lru = "0.7.7" [dev-dependencies] store = { path = "../store" } diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 154773aa95c..05550372101 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -1,10 +1,17 @@ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::lighthouse::{BlockReward, BlockRewardsQuery}; -use slog::{warn, Logger}; +use lru::LruCache; +use slog::{debug, warn, Logger}; use state_processing::BlockReplayer; use std::sync::Arc; -use warp_utils::reject::{beacon_chain_error, beacon_state_error, custom_bad_request}; +use types::BeaconBlock; +use warp_utils::reject::{ + beacon_chain_error, beacon_state_error, custom_bad_request, custom_server_error, +}; +const STATE_CACHE_SIZE: usize = 2; + +/// Fetch block rewards for blocks from the canonical chain. pub fn get_block_rewards( query: BlockRewardsQuery, chain: Arc>, @@ -50,8 +57,12 @@ pub fn get_block_rewards( let block_replayer = BlockReplayer::new(state, &chain.spec) .pre_block_hook(Box::new(|state, block| { // Compute block reward. - let block_reward = - chain.compute_block_reward(block.message(), block.canonical_root(), state)?; + let block_reward = chain.compute_block_reward( + block.message(), + block.canonical_root(), + state, + query.include_attestations, + )?; block_rewards.push(block_reward); Ok(()) })) @@ -78,3 +89,84 @@ pub fn get_block_rewards( Ok(block_rewards) } + +/// Compute block rewards for blocks passed in as input. +pub fn compute_block_rewards( + blocks: Vec>, + chain: Arc>, + log: Logger, +) -> Result, warp::Rejection> { + let mut block_rewards = Vec::with_capacity(blocks.len()); + let mut state_cache = LruCache::new(STATE_CACHE_SIZE); + + for block in blocks { + let parent_root = block.parent_root(); + + // Check LRU cache for a constructed state from a previous iteration. + let state = if let Some(state) = state_cache.get(&(parent_root, block.slot())) { + debug!( + log, + "Re-using cached state for block rewards"; + "parent_root" => ?parent_root, + "slot" => block.slot(), + ); + state + } else { + debug!( + log, + "Fetching state for block rewards"; + "parent_root" => ?parent_root, + "slot" => block.slot() + ); + let parent_block = chain + .get_blinded_block(&parent_root) + .map_err(beacon_chain_error)? + .ok_or_else(|| { + custom_bad_request(format!( + "parent block not known or not canonical: {:?}", + parent_root + )) + })?; + + let parent_state = chain + .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .map_err(beacon_chain_error)? + .ok_or_else(|| { + custom_bad_request(format!( + "no state known for parent block: {:?}", + parent_root + )) + })?; + + let block_replayer = BlockReplayer::new(parent_state, &chain.spec) + .no_signature_verification() + .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) + .minimal_block_root_verification() + .apply_blocks(vec![], Some(block.slot())) + .map_err(beacon_chain_error)?; + + if block_replayer.state_root_miss() { + warn!( + log, + "Block reward state root miss"; + "parent_slot" => parent_block.slot(), + "slot" => block.slot(), + ); + } + + state_cache + .get_or_insert((parent_root, block.slot()), || block_replayer.into_state()) + .ok_or_else(|| { + custom_server_error("LRU cache insert should always succeed".into()) + })? + }; + + // Compute block reward. + let block_reward = chain + .compute_block_reward(block.to_ref(), block.canonical_root(), state, true) + .map_err(beacon_chain_error)?; + block_rewards.push(block_reward); + } + + Ok(block_rewards) +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index be08b3f7374..379033a1130 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2823,6 +2823,18 @@ pub fn serve( blocking_json_task(move || block_rewards::get_block_rewards(query, chain, log)) }); + // POST lighthouse/analysis/block_rewards + let post_lighthouse_block_rewards = warp::path("lighthouse") + .and(warp::path("analysis")) + .and(warp::path("block_rewards")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and(log_filter.clone()) + .and_then(|blocks, chain, log| { + blocking_json_task(move || block_rewards::compute_block_rewards(blocks, chain, log)) + }); + // GET lighthouse/analysis/attestation_performance/{index} let get_lighthouse_attestation_performance = warp::path("lighthouse") .and(warp::path("analysis")) @@ -2998,7 +3010,8 @@ pub fn serve( .or(post_validator_prepare_beacon_proposer.boxed()) .or(post_lighthouse_liveness.boxed()) .or(post_lighthouse_database_reconstruct.boxed()) - .or(post_lighthouse_database_historical_blocks.boxed()), + .or(post_lighthouse_database_historical_blocks.boxed()) + .or(post_lighthouse_block_rewards.boxed()), )) .recover(warp_utils::reject::handle_rejection) .with(slog_logging(log.clone())) diff --git a/common/eth2/src/lighthouse/block_rewards.rs b/common/eth2/src/lighthouse/block_rewards.rs index 186cbd888cf..38070f3539a 100644 --- a/common/eth2/src/lighthouse/block_rewards.rs +++ b/common/eth2/src/lighthouse/block_rewards.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; -use types::{Hash256, Slot}; +use types::{AttestationData, Hash256, Slot}; /// Details about the rewards paid to a block proposer for proposing a block. /// @@ -42,6 +42,9 @@ pub struct AttestationRewards { /// /// Each element of the vec is a map from validator index to reward. pub per_attestation_rewards: Vec>, + /// The attestations themselves (optional). + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub attestations: Vec, } /// Query parameters for the `/lighthouse/block_rewards` endpoint. @@ -51,4 +54,7 @@ pub struct BlockRewardsQuery { pub start_slot: Slot, /// Upper slot limit for block rewards returned (inclusive). pub end_slot: Slot, + /// Include the full attestations themselves? + #[serde(default)] + pub include_attestations: bool, } From 5de00b7ee821dc28f80285acbce083edda4f14a1 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 29 Jun 2022 09:07:09 +0000 Subject: [PATCH 05/15] Unify execution layer endpoints (#3214) ## Issue Addressed Resolves #3069 ## Proposed Changes Unify the `eth1-endpoints` and `execution-endpoints` flags in a backwards compatible way as described in https://github.com/sigp/lighthouse/issues/3069#issuecomment-1134219221 Users have 2 options: 1. Use multiple non auth execution endpoints for deposit processing pre-merge 2. Use a single jwt authenticated execution endpoint for both execution layer and deposit processing post merge Related https://github.com/sigp/lighthouse/issues/3118 To enable jwt authenticated deposit processing, this PR removes the calls to `net_version` as the `net` namespace is not exposed in the auth server in execution clients. Moving away from using `networkId` is a good step in my opinion as it doesn't provide us with any added guarantees over `chainId`. See https://github.com/ethereum/consensus-specs/issues/2163 and https://github.com/sigp/lighthouse/issues/2115 Co-authored-by: Paul Hauner --- Cargo.lock | 11 +- beacon_node/client/Cargo.toml | 2 +- beacon_node/client/src/config.rs | 5 +- beacon_node/eth1/Cargo.toml | 3 +- beacon_node/eth1/src/deposit_cache.rs | 31 +- beacon_node/eth1/src/deposit_log.rs | 107 ---- beacon_node/eth1/src/http.rs | 489 ------------------ beacon_node/eth1/src/lib.rs | 8 +- beacon_node/eth1/src/service.rs | 186 ++++--- beacon_node/eth1/tests/test.rs | 248 ++++----- beacon_node/execution_layer/Cargo.toml | 4 +- beacon_node/execution_layer/src/engine_api.rs | 2 +- .../execution_layer/src/engine_api/auth.rs | 33 ++ .../execution_layer/src/engine_api/http.rs | 484 ++++++++++++++++- beacon_node/execution_layer/src/lib.rs | 15 +- .../genesis/src/eth1_genesis_service.rs | 2 +- beacon_node/genesis/src/lib.rs | 1 + beacon_node/genesis/tests/tests.rs | 9 +- beacon_node/src/cli.rs | 60 +-- beacon_node/src/config.rs | 143 +++-- lcli/src/eth1_genesis.rs | 5 +- lighthouse/Cargo.toml | 1 + lighthouse/tests/beacon_node.rs | 191 +++++-- scripts/local_testnet/ganache_test_node.sh | 3 +- scripts/local_testnet/setup.sh | 2 +- scripts/local_testnet/vars.env | 2 +- scripts/tests/vars.env | 2 +- testing/eth1_test_rig/src/ganache.rs | 22 +- testing/eth1_test_rig/src/lib.rs | 4 +- testing/simulator/Cargo.toml | 1 + testing/simulator/src/eth1_sim.rs | 23 +- 31 files changed, 1110 insertions(+), 989 deletions(-) delete mode 100644 beacon_node/eth1/src/deposit_log.rs delete mode 100644 beacon_node/eth1/src/http.rs diff --git a/Cargo.lock b/Cargo.lock index d461027e773..3dbe005658f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -744,6 +744,7 @@ dependencies = [ "sensitive_url", "serde", "serde_derive", + "serde_yaml", "slasher", "slasher_service", "slog", @@ -753,7 +754,6 @@ dependencies = [ "time 0.3.9", "timer", "tokio", - "toml", "types", ] @@ -1530,6 +1530,7 @@ dependencies = [ "eth2", "eth2_ssz", "eth2_ssz_derive", + "execution_layer", "fallback", "futures", "hex", @@ -1541,12 +1542,12 @@ dependencies = [ "sensitive_url", "serde", "serde_json", + "serde_yaml", "slog", "sloggers", "state_processing", "task_executor", "tokio", - "toml", "tree_hash", "types", "web3", @@ -1877,8 +1878,9 @@ dependencies = [ "async-trait", "bytes", "environment", - "eth1", + "eth2", "eth2_serde_utils", + "eth2_ssz", "eth2_ssz_types", "ethers-core", "exit-future", @@ -1896,6 +1898,7 @@ dependencies = [ "serde_json", "slog", "slot_clock", + "state_processing", "task_executor", "tempfile", "tokio", @@ -3397,6 +3400,7 @@ dependencies = [ "directory", "env_logger 0.9.0", "environment", + "eth1", "eth2_hashing", "eth2_network_config", "futures", @@ -5636,6 +5640,7 @@ dependencies = [ "env_logger 0.9.0", "eth1", "eth1_test_rig", + "execution_layer", "futures", "node_test_rig", "parking_lot 0.12.1", diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 3079d7744e9..d01f2505cce 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2021" [dev-dependencies] -toml = "0.5.6" +serde_yaml = "0.8.13" [dependencies] beacon_chain = { path = "../beacon_chain" } diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 13614af12ee..b13ca8f489b 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -198,7 +198,8 @@ mod tests { #[test] fn serde() { let config = Config::default(); - let serialized = toml::to_string(&config).expect("should serde encode default config"); - toml::from_str::(&serialized).expect("should serde decode default config"); + let serialized = + serde_yaml::to_string(&config).expect("should serde encode default config"); + serde_yaml::from_str::(&serialized).expect("should serde decode default config"); } } diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index ecf3c19e30e..403869cc9c3 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -6,13 +6,14 @@ edition = "2021" [dev-dependencies] eth1_test_rig = { path = "../../testing/eth1_test_rig" } -toml = "0.5.6" +serde_yaml = "0.8.13" web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } sloggers = { version = "2.1.1", features = ["json"] } environment = { path = "../../lighthouse/environment" } [dependencies] reqwest = { version = "0.11.0", features = ["native-tls-vendored"] } +execution_layer = { path = "../execution_layer" } futures = "0.3.7" serde_json = "1.0.58" serde = { version = "1.0.116", features = ["derive"] } diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs index 7c67893fb34..078e3602f52 100644 --- a/beacon_node/eth1/src/deposit_cache.rs +++ b/beacon_node/eth1/src/deposit_cache.rs @@ -1,4 +1,4 @@ -use crate::DepositLog; +use execution_layer::http::deposit_log::DepositLog; use ssz_derive::{Decode, Encode}; use state_processing::common::DepositDataTree; use std::cmp::Ordering; @@ -297,12 +297,37 @@ impl DepositCache { #[cfg(test)] pub mod tests { use super::*; - use crate::deposit_log::tests::EXAMPLE_LOG; - use crate::http::Log; + use execution_layer::http::deposit_log::Log; use types::{EthSpec, MainnetEthSpec}; pub const TREE_DEPTH: usize = 32; + /// The data from a deposit event, using the v0.8.3 version of the deposit contract. + pub const EXAMPLE_LOG: &[u8] = &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 167, 108, 6, 69, 88, 17, 3, 51, 6, 4, 158, 232, 82, + 248, 218, 2, 71, 219, 55, 102, 86, 125, 136, 203, 36, 77, 64, 213, 43, 52, 175, 154, 239, + 50, 142, 52, 201, 77, 54, 239, 0, 229, 22, 46, 139, 120, 62, 240, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 64, 89, 115, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 140, 74, 175, 158, 209, 20, 206, + 30, 63, 215, 238, 113, 60, 132, 216, 211, 100, 186, 202, 71, 34, 200, 160, 225, 212, 213, + 119, 88, 51, 80, 101, 74, 2, 45, 78, 153, 12, 192, 44, 51, 77, 40, 10, 72, 246, 34, 193, + 187, 22, 95, 4, 211, 245, 224, 13, 162, 21, 163, 54, 225, 22, 124, 3, 56, 14, 81, 122, 189, + 149, 250, 251, 159, 22, 77, 94, 157, 197, 196, 253, 110, 201, 88, 193, 246, 136, 226, 221, + 18, 113, 232, 105, 100, 114, 103, 237, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + fn example_log() -> DepositLog { let spec = MainnetEthSpec::default_spec(); diff --git a/beacon_node/eth1/src/deposit_log.rs b/beacon_node/eth1/src/deposit_log.rs deleted file mode 100644 index 1b3cfa01a02..00000000000 --- a/beacon_node/eth1/src/deposit_log.rs +++ /dev/null @@ -1,107 +0,0 @@ -use super::http::Log; -use ssz::Decode; -use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message; -use types::{ChainSpec, DepositData, Hash256, PublicKeyBytes, SignatureBytes}; - -pub use eth2::lighthouse::DepositLog; - -/// The following constants define the layout of bytes in the deposit contract `DepositEvent`. The -/// event bytes are formatted according to the Ethereum ABI. -const PUBKEY_START: usize = 192; -const PUBKEY_LEN: usize = 48; -const CREDS_START: usize = PUBKEY_START + 64 + 32; -const CREDS_LEN: usize = 32; -const AMOUNT_START: usize = CREDS_START + 32 + 32; -const AMOUNT_LEN: usize = 8; -const SIG_START: usize = AMOUNT_START + 32 + 32; -const SIG_LEN: usize = 96; -const INDEX_START: usize = SIG_START + 96 + 32; -const INDEX_LEN: usize = 8; - -impl Log { - /// Attempts to parse a raw `Log` from the deposit contract into a `DepositLog`. - pub fn to_deposit_log(&self, spec: &ChainSpec) -> Result { - let bytes = &self.data; - - let pubkey = bytes - .get(PUBKEY_START..PUBKEY_START + PUBKEY_LEN) - .ok_or("Insufficient bytes for pubkey")?; - let withdrawal_credentials = bytes - .get(CREDS_START..CREDS_START + CREDS_LEN) - .ok_or("Insufficient bytes for withdrawal credential")?; - let amount = bytes - .get(AMOUNT_START..AMOUNT_START + AMOUNT_LEN) - .ok_or("Insufficient bytes for amount")?; - let signature = bytes - .get(SIG_START..SIG_START + SIG_LEN) - .ok_or("Insufficient bytes for signature")?; - let index = bytes - .get(INDEX_START..INDEX_START + INDEX_LEN) - .ok_or("Insufficient bytes for index")?; - - let deposit_data = DepositData { - pubkey: PublicKeyBytes::from_ssz_bytes(pubkey) - .map_err(|e| format!("Invalid pubkey ssz: {:?}", e))?, - withdrawal_credentials: Hash256::from_ssz_bytes(withdrawal_credentials) - .map_err(|e| format!("Invalid withdrawal_credentials ssz: {:?}", e))?, - amount: u64::from_ssz_bytes(amount) - .map_err(|e| format!("Invalid amount ssz: {:?}", e))?, - signature: SignatureBytes::from_ssz_bytes(signature) - .map_err(|e| format!("Invalid signature ssz: {:?}", e))?, - }; - - let signature_is_valid = deposit_pubkey_signature_message(&deposit_data, spec) - .map_or(false, |(public_key, signature, msg)| { - signature.verify(&public_key, msg) - }); - - Ok(DepositLog { - deposit_data, - block_number: self.block_number, - index: u64::from_ssz_bytes(index).map_err(|e| format!("Invalid index ssz: {:?}", e))?, - signature_is_valid, - }) - } -} - -#[cfg(test)] -pub mod tests { - use crate::http::Log; - use types::{EthSpec, MainnetEthSpec}; - - /// The data from a deposit event, using the v0.8.3 version of the deposit contract. - pub const EXAMPLE_LOG: &[u8] = &[ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 167, 108, 6, 69, 88, 17, 3, 51, 6, 4, 158, 232, 82, - 248, 218, 2, 71, 219, 55, 102, 86, 125, 136, 203, 36, 77, 64, 213, 43, 52, 175, 154, 239, - 50, 142, 52, 201, 77, 54, 239, 0, 229, 22, 46, 139, 120, 62, 240, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 64, 89, 115, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 140, 74, 175, 158, 209, 20, 206, - 30, 63, 215, 238, 113, 60, 132, 216, 211, 100, 186, 202, 71, 34, 200, 160, 225, 212, 213, - 119, 88, 51, 80, 101, 74, 2, 45, 78, 153, 12, 192, 44, 51, 77, 40, 10, 72, 246, 34, 193, - 187, 22, 95, 4, 211, 245, 224, 13, 162, 21, 163, 54, 225, 22, 124, 3, 56, 14, 81, 122, 189, - 149, 250, 251, 159, 22, 77, 94, 157, 197, 196, 253, 110, 201, 88, 193, 246, 136, 226, 221, - 18, 113, 232, 105, 100, 114, 103, 237, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ]; - - #[test] - fn can_parse_example_log() { - let log = Log { - block_number: 42, - data: EXAMPLE_LOG.to_vec(), - }; - log.to_deposit_log(&MainnetEthSpec::default_spec()) - .expect("should decode log"); - } -} diff --git a/beacon_node/eth1/src/http.rs b/beacon_node/eth1/src/http.rs deleted file mode 100644 index 71b1b5b4b2b..00000000000 --- a/beacon_node/eth1/src/http.rs +++ /dev/null @@ -1,489 +0,0 @@ -//! Provides a very minimal set of functions for interfacing with the eth2 deposit contract via an -//! eth1 HTTP JSON-RPC endpoint. -//! -//! All remote functions return a future (i.e., are async). -//! -//! Does not use a web3 library, instead it uses `reqwest` (`hyper`) to call the remote endpoint -//! and `serde` to decode the response. -//! -//! ## Note -//! -//! There is no ABI parsing here, all function signatures and topics are hard-coded as constants. - -use futures::future::TryFutureExt; -use reqwest::{header::CONTENT_TYPE, ClientBuilder, StatusCode}; -use sensitive_url::SensitiveUrl; -use serde::{Deserialize, Serialize}; -use serde_json::{json, Value}; -use std::fmt; -use std::ops::Range; -use std::str::FromStr; -use std::time::Duration; -use types::Hash256; - -/// `keccak("DepositEvent(bytes,bytes,bytes,bytes,bytes)")` -pub const DEPOSIT_EVENT_TOPIC: &str = - "0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"; -/// `keccak("get_deposit_root()")[0..4]` -pub const DEPOSIT_ROOT_FN_SIGNATURE: &str = "0xc5f2892f"; -/// `keccak("get_deposit_count()")[0..4]` -pub const DEPOSIT_COUNT_FN_SIGNATURE: &str = "0x621fd130"; - -/// Number of bytes in deposit contract deposit root response. -pub const DEPOSIT_COUNT_RESPONSE_BYTES: usize = 96; -/// Number of bytes in deposit contract deposit root (value only). -pub const DEPOSIT_ROOT_BYTES: usize = 32; - -/// This error is returned during a `chainId` call by Geth. -pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; - -/// Represents an eth1 chain/network id. -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub enum Eth1Id { - Goerli, - Mainnet, - Custom(u64), -} - -/// Used to identify a block when querying the Eth1 node. -#[derive(Clone, Copy)] -pub enum BlockQuery { - Number(u64), - Latest, -} - -/// Represents an error received from a remote procecdure call. -#[derive(Debug, Serialize, Deserialize)] -pub enum RpcError { - NoResultField, - Eip155Error, - InvalidJson(String), - Error(String), -} - -impl fmt::Display for RpcError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - RpcError::NoResultField => write!(f, "No result field in response"), - RpcError::Eip155Error => write!(f, "Not synced past EIP-155"), - RpcError::InvalidJson(e) => write!(f, "Malformed JSON received: {}", e), - RpcError::Error(s) => write!(f, "{}", s), - } - } -} - -impl From for String { - fn from(e: RpcError) -> String { - e.to_string() - } -} - -impl Into for Eth1Id { - fn into(self) -> u64 { - match self { - Eth1Id::Mainnet => 1, - Eth1Id::Goerli => 5, - Eth1Id::Custom(id) => id, - } - } -} - -impl From for Eth1Id { - fn from(id: u64) -> Self { - let into = |x: Eth1Id| -> u64 { x.into() }; - match id { - id if id == into(Eth1Id::Mainnet) => Eth1Id::Mainnet, - id if id == into(Eth1Id::Goerli) => Eth1Id::Goerli, - id => Eth1Id::Custom(id), - } - } -} - -impl FromStr for Eth1Id { - type Err = String; - - fn from_str(s: &str) -> Result { - s.parse::() - .map(Into::into) - .map_err(|e| format!("Failed to parse eth1 network id {}", e)) - } -} - -/// Get the eth1 network id of the given endpoint. -pub async fn get_network_id(endpoint: &SensitiveUrl, timeout: Duration) -> Result { - let response_body = send_rpc_request(endpoint, "net_version", json!([]), timeout).await?; - Eth1Id::from_str( - response_result_or_error(&response_body)? - .as_str() - .ok_or("Data was not string")?, - ) -} - -/// Get the eth1 chain id of the given endpoint. -pub async fn get_chain_id(endpoint: &SensitiveUrl, timeout: Duration) -> Result { - let response_body: String = - send_rpc_request(endpoint, "eth_chainId", json!([]), timeout).await?; - - match response_result_or_error(&response_body) { - Ok(chain_id) => { - hex_to_u64_be(chain_id.as_str().ok_or("Data was not string")?).map(|id| id.into()) - } - // Geth returns this error when it's syncing lower blocks. Simply map this into `0` since - // Lighthouse does not raise errors for `0`, it simply waits for it to change. - Err(RpcError::Eip155Error) => Ok(Eth1Id::Custom(0)), - Err(e) => Err(e.to_string()), - } -} - -#[derive(Debug, PartialEq, Clone)] -pub struct Block { - pub hash: Hash256, - pub timestamp: u64, - pub number: u64, -} - -/// Returns the current block number. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub async fn get_block_number(endpoint: &SensitiveUrl, timeout: Duration) -> Result { - let response_body = send_rpc_request(endpoint, "eth_blockNumber", json!([]), timeout).await?; - hex_to_u64_be( - response_result_or_error(&response_body) - .map_err(|e| format!("eth_blockNumber failed: {}", e))? - .as_str() - .ok_or("Data was not string")?, - ) - .map_err(|e| format!("Failed to get block number: {}", e)) -} - -/// Gets a block hash by block number. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub async fn get_block( - endpoint: &SensitiveUrl, - query: BlockQuery, - timeout: Duration, -) -> Result { - let query_param = match query { - BlockQuery::Number(block_number) => format!("0x{:x}", block_number), - BlockQuery::Latest => "latest".to_string(), - }; - let params = json!([ - query_param, - false // do not return full tx objects. - ]); - - let response_body = send_rpc_request(endpoint, "eth_getBlockByNumber", params, timeout).await?; - let response = response_result_or_error(&response_body) - .map_err(|e| format!("eth_getBlockByNumber failed: {}", e))?; - - let hash: Vec = hex_to_bytes( - response - .get("hash") - .ok_or("No hash for block")? - .as_str() - .ok_or("Block hash was not string")?, - )?; - let hash: Hash256 = if hash.len() == 32 { - Hash256::from_slice(&hash) - } else { - return Err(format!("Block has was not 32 bytes: {:?}", hash)); - }; - - let timestamp = hex_to_u64_be( - response - .get("timestamp") - .ok_or("No timestamp for block")? - .as_str() - .ok_or("Block timestamp was not string")?, - )?; - - let number = hex_to_u64_be( - response - .get("number") - .ok_or("No number for block")? - .as_str() - .ok_or("Block number was not string")?, - )?; - - if number <= usize::max_value() as u64 { - Ok(Block { - hash, - timestamp, - number, - }) - } else { - Err(format!("Block number {} is larger than a usize", number)) - } - .map_err(|e| format!("Failed to get block number: {}", e)) -} - -/// Returns the value of the `get_deposit_count()` call at the given `address` for the given -/// `block_number`. -/// -/// Assumes that the `address` has the same ABI as the eth2 deposit contract. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub async fn get_deposit_count( - endpoint: &SensitiveUrl, - address: &str, - block_number: u64, - timeout: Duration, -) -> Result, String> { - let result = call( - endpoint, - address, - DEPOSIT_COUNT_FN_SIGNATURE, - block_number, - timeout, - ) - .await?; - match result { - None => Err("Deposit root response was none".to_string()), - Some(bytes) => { - if bytes.is_empty() { - Ok(None) - } else if bytes.len() == DEPOSIT_COUNT_RESPONSE_BYTES { - let mut array = [0; 8]; - array.copy_from_slice(&bytes[32 + 32..32 + 32 + 8]); - Ok(Some(u64::from_le_bytes(array))) - } else { - Err(format!( - "Deposit count response was not {} bytes: {:?}", - DEPOSIT_COUNT_RESPONSE_BYTES, bytes - )) - } - } - } -} - -/// Returns the value of the `get_hash_tree_root()` call at the given `block_number`. -/// -/// Assumes that the `address` has the same ABI as the eth2 deposit contract. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub async fn get_deposit_root( - endpoint: &SensitiveUrl, - address: &str, - block_number: u64, - timeout: Duration, -) -> Result, String> { - let result = call( - endpoint, - address, - DEPOSIT_ROOT_FN_SIGNATURE, - block_number, - timeout, - ) - .await?; - match result { - None => Err("Deposit root response was none".to_string()), - Some(bytes) => { - if bytes.is_empty() { - Ok(None) - } else if bytes.len() == DEPOSIT_ROOT_BYTES { - Ok(Some(Hash256::from_slice(&bytes))) - } else { - Err(format!( - "Deposit root response was not {} bytes: {:?}", - DEPOSIT_ROOT_BYTES, bytes - )) - } - } - } -} - -/// Performs a instant, no-transaction call to the contract `address` with the given `0x`-prefixed -/// `hex_data`. -/// -/// Returns bytes, if any. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -async fn call( - endpoint: &SensitiveUrl, - address: &str, - hex_data: &str, - block_number: u64, - timeout: Duration, -) -> Result>, String> { - let params = json! ([ - { - "to": address, - "data": hex_data, - }, - format!("0x{:x}", block_number) - ]); - - let response_body = send_rpc_request(endpoint, "eth_call", params, timeout).await?; - - match response_result_or_error(&response_body) { - Ok(result) => { - let hex = result - .as_str() - .map(|s| s.to_string()) - .ok_or("'result' value was not a string")?; - - Ok(Some(hex_to_bytes(&hex)?)) - } - // It's valid for `eth_call` to return without a result. - Err(RpcError::NoResultField) => Ok(None), - Err(e) => Err(format!("eth_call failed: {}", e)), - } -} - -/// A reduced set of fields from an Eth1 contract log. -#[derive(Debug, PartialEq, Clone)] -pub struct Log { - pub(crate) block_number: u64, - pub(crate) data: Vec, -} - -/// Returns logs for the `DEPOSIT_EVENT_TOPIC`, for the given `address` in the given -/// `block_height_range`. -/// -/// It's not clear from the Ethereum JSON-RPC docs if this range is inclusive or not. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub async fn get_deposit_logs_in_range( - endpoint: &SensitiveUrl, - address: &str, - block_height_range: Range, - timeout: Duration, -) -> Result, String> { - let params = json! ([{ - "address": address, - "topics": [DEPOSIT_EVENT_TOPIC], - "fromBlock": format!("0x{:x}", block_height_range.start), - "toBlock": format!("0x{:x}", block_height_range.end), - }]); - - let response_body = send_rpc_request(endpoint, "eth_getLogs", params, timeout).await?; - response_result_or_error(&response_body) - .map_err(|e| format!("eth_getLogs failed: {}", e))? - .as_array() - .cloned() - .ok_or("'result' value was not an array")? - .into_iter() - .map(|value| { - let block_number = value - .get("blockNumber") - .ok_or("No block number field in log")? - .as_str() - .ok_or("Block number was not string")?; - - let data = value - .get("data") - .ok_or("No block number field in log")? - .as_str() - .ok_or("Data was not string")?; - - Ok(Log { - block_number: hex_to_u64_be(block_number)?, - data: hex_to_bytes(data)?, - }) - }) - .collect::, String>>() - .map_err(|e| format!("Failed to get logs in range: {}", e)) -} - -/// Sends an RPC request to `endpoint`, using a POST with the given `body`. -/// -/// Tries to receive the response and parse the body as a `String`. -pub async fn send_rpc_request( - endpoint: &SensitiveUrl, - method: &str, - params: Value, - timeout: Duration, -) -> Result { - let body = json! ({ - "jsonrpc": "2.0", - "method": method, - "params": params, - "id": 1 - }) - .to_string(); - - // Note: it is not ideal to create a new client for each request. - // - // A better solution would be to create some struct that contains a built client and pass it - // around (similar to the `web3` crate's `Transport` structs). - let response = ClientBuilder::new() - .timeout(timeout) - .build() - .expect("The builder should always build a client") - .post(endpoint.full.clone()) - .header(CONTENT_TYPE, "application/json") - .body(body) - .send() - .map_err(|e| format!("Request failed: {:?}", e)) - .await?; - if response.status() != StatusCode::OK { - return Err(format!( - "Response HTTP status was not 200 OK: {}.", - response.status() - )); - }; - let encoding = response - .headers() - .get(CONTENT_TYPE) - .ok_or("No content-type header in response")? - .to_str() - .map(|s| s.to_string()) - .map_err(|e| format!("Failed to parse content-type header: {}", e))?; - - response - .bytes() - .map_err(|e| format!("Failed to receive body: {:?}", e)) - .await - .and_then(move |bytes| match encoding.as_str() { - "application/json" => Ok(bytes), - "application/json; charset=utf-8" => Ok(bytes), - other => Err(format!("Unsupported encoding: {}", other)), - }) - .map(|bytes| String::from_utf8_lossy(&bytes).into_owned()) - .map_err(|e| format!("Failed to receive body: {:?}", e)) -} - -/// Accepts an entire HTTP body (as a string) and returns either the `result` field or the `error['message']` field, as a serde `Value`. -fn response_result_or_error(response: &str) -> Result { - let json = serde_json::from_str::(response) - .map_err(|e| RpcError::InvalidJson(e.to_string()))?; - - if let Some(error) = json.get("error").and_then(|e| e.get("message")) { - let error = error.to_string(); - if error.contains(EIP155_ERROR_STR) { - Err(RpcError::Eip155Error) - } else { - Err(RpcError::Error(error)) - } - } else { - json.get("result").cloned().ok_or(RpcError::NoResultField) - } -} - -/// Parses a `0x`-prefixed, **big-endian** hex string as a u64. -/// -/// Note: the JSON-RPC encodes integers as big-endian. The deposit contract uses little-endian. -/// Therefore, this function is only useful for numbers encoded by the JSON RPC. -/// -/// E.g., `0x01 == 1` -fn hex_to_u64_be(hex: &str) -> Result { - u64::from_str_radix(strip_prefix(hex)?, 16) - .map_err(|e| format!("Failed to parse hex as u64: {:?}", e)) -} - -/// Parses a `0x`-prefixed, big-endian hex string as bytes. -/// -/// E.g., `0x0102 == vec![1, 2]` -fn hex_to_bytes(hex: &str) -> Result, String> { - hex::decode(strip_prefix(hex)?).map_err(|e| format!("Failed to parse hex as bytes: {:?}", e)) -} - -/// Removes the `0x` prefix from some bytes. Returns an error if the prefix is not present. -fn strip_prefix(hex: &str) -> Result<&str, String> { - if let Some(stripped) = hex.strip_prefix("0x") { - Ok(stripped) - } else { - Err("Hex string did not start with `0x`".to_string()) - } -} diff --git a/beacon_node/eth1/src/lib.rs b/beacon_node/eth1/src/lib.rs index cf724201a42..f99d0852501 100644 --- a/beacon_node/eth1/src/lib.rs +++ b/beacon_node/eth1/src/lib.rs @@ -3,17 +3,15 @@ extern crate lazy_static; mod block_cache; mod deposit_cache; -mod deposit_log; -pub mod http; mod inner; mod metrics; mod service; pub use block_cache::{BlockCache, Eth1Block}; pub use deposit_cache::DepositCache; -pub use deposit_log::DepositLog; +pub use execution_layer::http::deposit_log::DepositLog; pub use inner::SszEth1Cache; pub use service::{ - BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service, DEFAULT_CHAIN_ID, - DEFAULT_NETWORK_ID, + BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Eth1Endpoint, Service, + DEFAULT_CHAIN_ID, }; diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 15e2123e8a5..36a637d2ae6 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -2,12 +2,13 @@ use crate::metrics; use crate::{ block_cache::{BlockCache, Error as BlockCacheError, Eth1Block}, deposit_cache::{DepositCacheInsertOutcome, Error as DepositCacheError}, - http::{ - get_block, get_block_number, get_chain_id, get_deposit_logs_in_range, get_network_id, - BlockQuery, Eth1Id, - }, inner::{DepositUpdater, Inner}, }; +use execution_layer::auth::Auth; +use execution_layer::http::{ + deposit_methods::{BlockQuery, Eth1Id}, + HttpJsonRpc, +}; use fallback::{Fallback, FallbackError}; use futures::future::TryFutureExt; use parking_lot::{RwLock, RwLockReadGuard}; @@ -17,14 +18,13 @@ use slog::{crit, debug, error, info, trace, warn, Logger}; use std::fmt::Debug; use std::future::Future; use std::ops::{Range, RangeInclusive}; +use std::path::PathBuf; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::sync::RwLock as TRwLock; use tokio::time::{interval_at, Duration, Instant}; use types::{ChainSpec, EthSpec, Unsigned}; -/// Indicates the default eth1 network id we use for the deposit contract. -pub const DEFAULT_NETWORK_ID: Eth1Id = Eth1Id::Goerli; /// Indicates the default eth1 chain id we use for the deposit contract. pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Goerli; /// Indicates the default eth1 endpoint. @@ -63,14 +63,14 @@ pub enum EndpointError { type EndpointState = Result<(), EndpointError>; pub struct EndpointWithState { - endpoint: SensitiveUrl, + client: HttpJsonRpc, state: TRwLock>, } impl EndpointWithState { - pub fn new(endpoint: SensitiveUrl) -> Self { + pub fn new(client: HttpJsonRpc) -> Self { Self { - endpoint, + client, state: TRwLock::new(None), } } @@ -89,7 +89,6 @@ async fn get_state(endpoint: &EndpointWithState) -> Option { /// is not usable. pub struct EndpointsCache { pub fallback: Fallback, - pub config_network_id: Eth1Id, pub config_chain_id: Eth1Id, pub log: Logger, } @@ -107,20 +106,14 @@ impl EndpointsCache { } crate::metrics::inc_counter_vec( &crate::metrics::ENDPOINT_REQUESTS, - &[&endpoint.endpoint.to_string()], + &[&endpoint.client.to_string()], ); - let state = endpoint_state( - &endpoint.endpoint, - &self.config_network_id, - &self.config_chain_id, - &self.log, - ) - .await; + let state = endpoint_state(&endpoint.client, &self.config_chain_id, &self.log).await; *value = Some(state.clone()); if state.is_err() { crate::metrics::inc_counter_vec( &crate::metrics::ENDPOINT_ERRORS, - &[&endpoint.endpoint.to_string()], + &[&endpoint.client.to_string()], ); crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 0); } else { @@ -136,7 +129,7 @@ impl EndpointsCache { func: F, ) -> Result<(O, usize), FallbackError> where - F: Fn(&'a SensitiveUrl) -> R, + F: Fn(&'a HttpJsonRpc) -> R, R: Future>, { let func = &func; @@ -144,12 +137,12 @@ impl EndpointsCache { .first_success(|endpoint| async move { match self.state(endpoint).await { Ok(()) => { - let endpoint_str = &endpoint.endpoint.to_string(); + let endpoint_str = &endpoint.client.to_string(); crate::metrics::inc_counter_vec( &crate::metrics::ENDPOINT_REQUESTS, &[endpoint_str], ); - match func(&endpoint.endpoint).await { + match func(&endpoint.client).await { Ok(t) => Ok(t), Err(t) => { crate::metrics::inc_counter_vec( @@ -186,8 +179,7 @@ impl EndpointsCache { /// Returns `Ok` if the endpoint is usable, i.e. is reachable and has a correct network id and /// chain id. Otherwise it returns `Err`. async fn endpoint_state( - endpoint: &SensitiveUrl, - config_network_id: &Eth1Id, + endpoint: &HttpJsonRpc, config_chain_id: &Eth1Id, log: &Logger, ) -> EndpointState { @@ -200,21 +192,9 @@ async fn endpoint_state( ); EndpointError::RequestFailed(e) }; - let network_id = get_network_id(endpoint, Duration::from_millis(STANDARD_TIMEOUT_MILLIS)) - .await - .map_err(error_connecting)?; - if &network_id != config_network_id { - warn!( - log, - "Invalid eth1 network id on endpoint. Please switch to correct network id"; - "endpoint" => %endpoint, - "action" => "trying fallbacks", - "expected" => format!("{:?}",config_network_id), - "received" => format!("{:?}",network_id), - ); - return Err(EndpointError::WrongNetworkId); - } - let chain_id = get_chain_id(endpoint, Duration::from_millis(STANDARD_TIMEOUT_MILLIS)) + + let chain_id = endpoint + .get_chain_id(Duration::from_millis(STANDARD_TIMEOUT_MILLIS)) .await .map_err(error_connecting)?; // Eth1 nodes return chain_id = 0 if the node is not synced @@ -253,7 +233,7 @@ pub enum HeadType { /// Returns the head block and the new block ranges relevant for deposits and the block cache /// from the given endpoint. async fn get_remote_head_and_new_block_ranges( - endpoint: &SensitiveUrl, + endpoint: &HttpJsonRpc, service: &Service, node_far_behind_seconds: u64, ) -> Result< @@ -315,14 +295,14 @@ async fn get_remote_head_and_new_block_ranges( /// Returns the range of new block numbers to be considered for the given head type from the given /// endpoint. async fn relevant_new_block_numbers_from_endpoint( - endpoint: &SensitiveUrl, + endpoint: &HttpJsonRpc, service: &Service, head_type: HeadType, ) -> Result>, SingleEndpointError> { - let remote_highest_block = - get_block_number(endpoint, Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) - .map_err(SingleEndpointError::GetBlockNumberFailed) - .await?; + let remote_highest_block = endpoint + .get_block_number(Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) + .map_err(SingleEndpointError::GetBlockNumberFailed) + .await?; service.relevant_new_block_numbers(remote_highest_block, None, head_type) } @@ -379,14 +359,41 @@ pub struct DepositCacheUpdateOutcome { pub logs_imported: usize, } +/// Supports either one authenticated jwt JSON-RPC endpoint **or** +/// multiple non-authenticated endpoints with fallback. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum Eth1Endpoint { + Auth { + endpoint: SensitiveUrl, + jwt_path: PathBuf, + jwt_id: Option, + jwt_version: Option, + }, + NoAuth(Vec), +} + +impl Eth1Endpoint { + fn len(&self) -> usize { + match &self { + Self::Auth { .. } => 1, + Self::NoAuth(urls) => urls.len(), + } + } + + pub fn get_endpoints(&self) -> Vec { + match &self { + Self::Auth { endpoint, .. } => vec![endpoint.clone()], + Self::NoAuth(endpoints) => endpoints.clone(), + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { /// An Eth1 node (e.g., Geth) running a HTTP JSON-RPC endpoint. - pub endpoints: Vec, + pub endpoints: Eth1Endpoint, /// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract. pub deposit_contract_address: String, - /// The eth1 network id where the deposit contract is deployed (Goerli/Mainnet). - pub network_id: Eth1Id, /// The eth1 chain id where the deposit contract is deployed (Goerli/Mainnet). pub chain_id: Eth1Id, /// Defines the first block that the `DepositCache` will start searching for deposit logs. @@ -461,10 +468,9 @@ impl Config { impl Default for Config { fn default() -> Self { Self { - endpoints: vec![SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT) - .expect("The default Eth1 endpoint must always be a valid URL.")], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT) + .expect("The default Eth1 endpoint must always be a valid URL.")]), deposit_contract_address: "0x0000000000000000000000000000000000000000".into(), - network_id: DEFAULT_NETWORK_ID, chain_id: DEFAULT_CHAIN_ID, deposit_contract_deploy_block: 1, lowest_cached_block_number: 1, @@ -673,27 +679,45 @@ impl Service { } /// Builds a new `EndpointsCache` with empty states. - pub fn init_endpoints(&self) -> Arc { + pub fn init_endpoints(&self) -> Result, String> { let endpoints = self.config().endpoints.clone(); - let config_network_id = self.config().network_id.clone(); let config_chain_id = self.config().chain_id.clone(); + + let servers = match endpoints { + Eth1Endpoint::Auth { + jwt_path, + endpoint, + jwt_id, + jwt_version, + } => { + let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version) + .map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?; + vec![HttpJsonRpc::new_with_auth(endpoint, auth) + .map_err(|e| format!("Failed to build auth enabled json rpc {:?}", e))?] + } + Eth1Endpoint::NoAuth(urls) => urls + .into_iter() + .map(|url| { + HttpJsonRpc::new(url).map_err(|e| format!("Failed to build json rpc {:?}", e)) + }) + .collect::>()?, + }; let new_cache = Arc::new(EndpointsCache { - fallback: Fallback::new(endpoints.into_iter().map(EndpointWithState::new).collect()), - config_network_id, + fallback: Fallback::new(servers.into_iter().map(EndpointWithState::new).collect()), config_chain_id, log: self.log.clone(), }); let mut endpoints_cache = self.inner.endpoints_cache.write(); *endpoints_cache = Some(new_cache.clone()); - new_cache + Ok(new_cache) } /// Returns the cached `EndpointsCache` if it exists or builds a new one. - pub fn get_endpoints(&self) -> Arc { + pub fn get_endpoints(&self) -> Result, String> { let endpoints_cache = self.inner.endpoints_cache.read(); if let Some(cache) = endpoints_cache.clone() { - cache + Ok(cache) } else { drop(endpoints_cache); self.init_endpoints() @@ -711,7 +735,7 @@ impl Service { pub async fn update( &self, ) -> Result<(DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), String> { - let endpoints = self.get_endpoints(); + let endpoints = self.get_endpoints()?; // Reset the state of any endpoints which have errored so their state can be redetermined. endpoints.reset_errorred_endpoints().await; @@ -738,7 +762,7 @@ impl Service { } } } - endpoints.fallback.map_format_error(|s| &s.endpoint, e) + endpoints.fallback.map_format_error(|s| &s.client, e) }; let process_err = |e: Error| match &e { @@ -988,15 +1012,15 @@ impl Service { */ let block_range_ref = &block_range; let logs = endpoints - .first_success(|e| async move { - get_deposit_logs_in_range( - e, - deposit_contract_address_ref, - block_range_ref.clone(), - Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), - ) - .await - .map_err(SingleEndpointError::GetDepositLogsFailed) + .first_success(|endpoint| async move { + endpoint + .get_deposit_logs_in_range( + deposit_contract_address_ref, + block_range_ref.clone(), + Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), + ) + .await + .map_err(SingleEndpointError::GetDepositLogsFailed) }) .await .map(|(res, _)| res) @@ -1305,7 +1329,7 @@ fn relevant_block_range( /// /// Performs three async calls to an Eth1 HTTP JSON RPC endpoint. async fn download_eth1_block( - endpoint: &SensitiveUrl, + endpoint: &HttpJsonRpc, cache: Arc, block_number_opt: Option, ) -> Result { @@ -1326,15 +1350,15 @@ async fn download_eth1_block( }); // Performs a `get_blockByNumber` call to an eth1 node. - let http_block = get_block( - endpoint, - block_number_opt - .map(BlockQuery::Number) - .unwrap_or_else(|| BlockQuery::Latest), - Duration::from_millis(GET_BLOCK_TIMEOUT_MILLIS), - ) - .map_err(SingleEndpointError::BlockDownloadFailed) - .await?; + let http_block = endpoint + .get_block( + block_number_opt + .map(BlockQuery::Number) + .unwrap_or_else(|| BlockQuery::Latest), + Duration::from_millis(GET_BLOCK_TIMEOUT_MILLIS), + ) + .map_err(SingleEndpointError::BlockDownloadFailed) + .await?; Ok(Eth1Block { hash: http_block.hash, @@ -1359,8 +1383,8 @@ mod tests { #[test] fn serde_serialize() { let serialized = - toml::to_string(&Config::default()).expect("Should serde encode default config"); - toml::from_str::(&serialized).expect("Should serde decode default config"); + serde_yaml::to_string(&Config::default()).expect("Should serde encode default config"); + serde_yaml::from_str::(&serialized).expect("Should serde decode default config"); } #[test] diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 3fe3b3ca527..f7f3b6e703b 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -1,9 +1,9 @@ #![cfg(test)] use environment::{Environment, EnvironmentBuilder}; -use eth1::http::{get_deposit_count, get_deposit_logs_in_range, get_deposit_root, Block, Log}; -use eth1::{Config, Service}; -use eth1::{DepositCache, DEFAULT_CHAIN_ID, DEFAULT_NETWORK_ID}; +use eth1::{Config, Eth1Endpoint, Service}; +use eth1::{DepositCache, DEFAULT_CHAIN_ID}; use eth1_test_rig::GanacheEth1Instance; +use execution_layer::http::{deposit_methods::*, HttpJsonRpc, Log}; use merkle_proof::verify_merkle_proof; use sensitive_url::SensitiveUrl; use slog::Logger; @@ -51,39 +51,39 @@ fn random_deposit_data() -> DepositData { } /// Blocking operation to get the deposit logs from the `deposit_contract`. -async fn blocking_deposit_logs(eth1: &GanacheEth1Instance, range: Range) -> Vec { - get_deposit_logs_in_range( - &SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ð1.deposit_contract.address(), - range, - timeout(), - ) - .await - .expect("should get logs") +async fn blocking_deposit_logs( + client: &HttpJsonRpc, + eth1: &GanacheEth1Instance, + range: Range, +) -> Vec { + client + .get_deposit_logs_in_range(ð1.deposit_contract.address(), range, timeout()) + .await + .expect("should get logs") } /// Blocking operation to get the deposit root from the `deposit_contract`. -async fn blocking_deposit_root(eth1: &GanacheEth1Instance, block_number: u64) -> Option { - get_deposit_root( - &SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ð1.deposit_contract.address(), - block_number, - timeout(), - ) - .await - .expect("should get deposit root") +async fn blocking_deposit_root( + client: &HttpJsonRpc, + eth1: &GanacheEth1Instance, + block_number: u64, +) -> Option { + client + .get_deposit_root(ð1.deposit_contract.address(), block_number, timeout()) + .await + .expect("should get deposit root") } /// Blocking operation to get the deposit count from the `deposit_contract`. -async fn blocking_deposit_count(eth1: &GanacheEth1Instance, block_number: u64) -> Option { - get_deposit_count( - &SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ð1.deposit_contract.address(), - block_number, - timeout(), - ) - .await - .expect("should get deposit count") +async fn blocking_deposit_count( + client: &HttpJsonRpc, + eth1: &GanacheEth1Instance, + block_number: u64, +) -> Option { + client + .get_deposit_count(ð1.deposit_contract.address(), block_number, timeout()) + .await + .expect("should get deposit count") } async fn get_block_number(web3: &Web3) -> u64 { @@ -95,7 +95,7 @@ async fn get_block_number(web3: &Web3) -> u64 { } async fn new_ganache_instance() -> Result { - GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), DEFAULT_CHAIN_ID.into()).await + GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()).await } mod eth1_cache { @@ -117,7 +117,10 @@ mod eth1_cache { let initial_block_number = get_block_number(&web3).await; let config = Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: initial_block_number, follow_distance, @@ -146,7 +149,7 @@ mod eth1_cache { eth1.ganache.evm_mine().await.expect("should mine block"); } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) @@ -198,7 +201,10 @@ mod eth1_cache { let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, @@ -215,7 +221,7 @@ mod eth1_cache { eth1.ganache.evm_mine().await.expect("should mine block") } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) @@ -252,7 +258,10 @@ mod eth1_cache { let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, @@ -267,7 +276,7 @@ mod eth1_cache { for _ in 0..cache_len / 2 { eth1.ganache.evm_mine().await.expect("should mine block") } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) .await @@ -302,7 +311,10 @@ mod eth1_cache { let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, @@ -316,7 +328,7 @@ mod eth1_cache { eth1.ganache.evm_mine().await.expect("should mine block") } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); futures::try_join!( service.update_deposit_cache(None, &endpoints), service.update_deposit_cache(None, &endpoints) @@ -354,7 +366,10 @@ mod deposit_tree { let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: start_block, follow_distance: 0, @@ -374,7 +389,7 @@ mod deposit_tree { .expect("should perform a deposit"); } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) @@ -434,7 +449,10 @@ mod deposit_tree { let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: start_block, lowest_cached_block_number: start_block, @@ -454,7 +472,7 @@ mod deposit_tree { .expect("should perform a deposit"); } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); futures::try_join!( service.update_deposit_cache(None, &endpoints), service.update_deposit_cache(None, &endpoints) @@ -484,6 +502,8 @@ mod deposit_tree { let mut deposit_roots = vec![]; let mut deposit_counts = vec![]; + let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); + // Perform deposits to the smart contract, recording it's state along the way. for deposit in &deposits { deposit_contract @@ -492,12 +512,12 @@ mod deposit_tree { .expect("should perform a deposit"); let block_number = get_block_number(&web3).await; deposit_roots.push( - blocking_deposit_root(ð1, block_number) + blocking_deposit_root(&client, ð1, block_number) .await .expect("should get root if contract exists"), ); deposit_counts.push( - blocking_deposit_count(ð1, block_number) + blocking_deposit_count(&client, ð1, block_number) .await .expect("should get count if contract exists"), ); @@ -507,7 +527,7 @@ mod deposit_tree { // Pull all the deposit logs from the contract. let block_number = get_block_number(&web3).await; - let logs: Vec<_> = blocking_deposit_logs(ð1, 0..block_number) + let logs: Vec<_> = blocking_deposit_logs(&client, ð1, 0..block_number) .await .iter() .map(|raw| raw.to_deposit_log(spec).expect("should parse deposit log")) @@ -570,16 +590,12 @@ mod deposit_tree { /// Tests for the base HTTP requests and response handlers. mod http { use super::*; - use eth1::http::BlockQuery; - - async fn get_block(eth1: &GanacheEth1Instance, block_number: u64) -> Block { - eth1::http::get_block( - &SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - BlockQuery::Number(block_number), - timeout(), - ) - .await - .expect("should get block number") + + async fn get_block(client: &HttpJsonRpc, block_number: u64) -> Block { + client + .get_block(BlockQuery::Number(block_number), timeout()) + .await + .expect("should get block number") } #[tokio::test] @@ -590,17 +606,18 @@ mod http { .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); + let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); let block_number = get_block_number(&web3).await; - let logs = blocking_deposit_logs(ð1, 0..block_number).await; + let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; assert_eq!(logs.len(), 0); - let mut old_root = blocking_deposit_root(ð1, block_number).await; - let mut old_block = get_block(ð1, block_number).await; + let mut old_root = blocking_deposit_root(&client, ð1, block_number).await; + let mut old_block = get_block(&client, block_number).await; let mut old_block_number = block_number; assert_eq!( - blocking_deposit_count(ð1, block_number).await, + blocking_deposit_count(&client, ð1, block_number).await, Some(0), "should have deposit count zero" ); @@ -618,18 +635,18 @@ mod http { // Check the logs. let block_number = get_block_number(&web3).await; - let logs = blocking_deposit_logs(ð1, 0..block_number).await; + let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; assert_eq!(logs.len(), i, "the number of logs should be as expected"); // Check the deposit count. assert_eq!( - blocking_deposit_count(ð1, block_number).await, + blocking_deposit_count(&client, ð1, block_number).await, Some(i as u64), "should have a correct deposit count" ); // Check the deposit root. - let new_root = blocking_deposit_root(ð1, block_number).await; + let new_root = blocking_deposit_root(&client, ð1, block_number).await; assert_ne!( new_root, old_root, "deposit root should change with each deposit" @@ -637,7 +654,7 @@ mod http { old_root = new_root; // Check the block hash. - let new_block = get_block(ð1, block_number).await; + let new_block = get_block(&client, block_number).await; assert_ne!( new_block.hash, old_block.hash, "block hash should change with each deposit" @@ -689,7 +706,10 @@ mod fast { let now = get_block_number(&web3).await; let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, lowest_cached_block_number: now, @@ -700,6 +720,7 @@ mod fast { log, MainnetEthSpec::default_spec(), ); + let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); let n = 10; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { @@ -711,7 +732,7 @@ mod fast { eth1.ganache.evm_mine().await.expect("should mine block"); } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) .await @@ -723,8 +744,9 @@ mod fast { ); for block_num in 0..=get_block_number(&web3).await { - let expected_deposit_count = blocking_deposit_count(ð1, block_num).await; - let expected_deposit_root = blocking_deposit_root(ð1, block_num).await; + let expected_deposit_count = + blocking_deposit_count(&client, ð1, block_num).await; + let expected_deposit_root = blocking_deposit_root(&client, ð1, block_num).await; let deposit_count = service .deposits() @@ -765,7 +787,10 @@ mod persist { let now = get_block_number(&web3).await; let config = Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, lowest_cached_block_number: now, @@ -783,7 +808,7 @@ mod persist { .expect("should perform a deposit"); } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) .await @@ -874,10 +899,10 @@ mod fallbacks { let service = Service::new( Config { - endpoints: vec![ + endpoints: Eth1Endpoint::NoAuth(vec![ SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), - ], + ]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: initial_block_number, follow_distance: 0, @@ -909,82 +934,13 @@ mod fallbacks { .await; } - #[tokio::test] - async fn test_fallback_when_wrong_network_id() { - async { - let log = null_logger(); - let correct_network_id: u64 = DEFAULT_NETWORK_ID.into(); - let wrong_network_id = correct_network_id + 1; - let endpoint1 = GanacheEth1Instance::new(wrong_network_id, DEFAULT_CHAIN_ID.into()) - .await - .expect("should start eth1 environment"); - let endpoint2 = new_ganache_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = &endpoint2.deposit_contract; - - let initial_block_number = get_block_number(&endpoint2.web3()).await; - - // Create some blocks and then consume them, performing the test `rounds` times. - let new_blocks = 4; - - for _ in 0..new_blocks { - endpoint1 - .ganache - .evm_mine() - .await - .expect("should mine block"); - endpoint2 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - //additional blocks for endpoint1 to be able to distinguish - for _ in 0..new_blocks { - endpoint1 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - let service = Service::new( - Config { - endpoints: vec![ - SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), - SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), - ], - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: initial_block_number, - follow_distance: 0, - ..Config::default() - }, - log.clone(), - MainnetEthSpec::default_spec(), - ); - - let endpoint1_block_number = get_block_number(&endpoint1.web3()).await; - let endpoint2_block_number = get_block_number(&endpoint2.web3()).await; - assert!(endpoint2_block_number < endpoint1_block_number); - //the call will fallback to endpoint2 - service.update().await.expect("should update deposit cache"); - assert_eq!( - service.deposits().read().last_processed_block.unwrap(), - endpoint2_block_number - ); - } - .await; - } - #[tokio::test] async fn test_fallback_when_wrong_chain_id() { async { let log = null_logger(); let correct_chain_id: u64 = DEFAULT_CHAIN_ID.into(); let wrong_chain_id = correct_chain_id + 1; - let endpoint1 = GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), wrong_chain_id) + let endpoint1 = GanacheEth1Instance::new(wrong_chain_id) .await .expect("should start eth1 environment"); let endpoint2 = new_ganache_instance() @@ -1021,10 +977,10 @@ mod fallbacks { let service = Service::new( Config { - endpoints: vec![ + endpoints: Eth1Endpoint::NoAuth(vec![ SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), - ], + ]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: initial_block_number, follow_distance: 0, @@ -1076,10 +1032,10 @@ mod fallbacks { let service = Service::new( Config { - endpoints: vec![ + endpoints: Eth1Endpoint::NoAuth(vec![ SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), - ], + ]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: initial_block_number, follow_distance: 0, diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 0351b5e433d..dbd63246803 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -16,14 +16,16 @@ reqwest = { version = "0.11.0", features = ["json","stream"] } eth2_serde_utils = "0.1.1" serde_json = "1.0.58" serde = { version = "1.0.116", features = ["derive"] } -eth1 = { path = "../eth1" } warp = { version = "0.3.2", features = ["tls"] } jsonwebtoken = "8" environment = { path = "../../lighthouse/environment" } bytes = "1.1.0" task_executor = { path = "../../common/task_executor" } hex = "0.4.2" +eth2_ssz = "0.4.1" eth2_ssz_types = "0.2.2" +eth2 = { path = "../../common/eth2" } +state_processing = { path = "../../consensus/state_processing" } lru = "0.7.1" exit-future = "0.2.0" tree_hash = "0.4.1" diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 9eb98cecb97..5f3edb78bfd 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,7 +1,7 @@ use crate::engines::ForkChoiceState; use async_trait::async_trait; -use eth1::http::RpcError; pub use ethers_core::types::Transaction; +use http::deposit_methods::RpcError; pub use json_structures::TransitionConfigurationV1; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; diff --git a/beacon_node/execution_layer/src/engine_api/auth.rs b/beacon_node/execution_layer/src/engine_api/auth.rs index a4050a25c0e..560e43585b7 100644 --- a/beacon_node/execution_layer/src/engine_api/auth.rs +++ b/beacon_node/execution_layer/src/engine_api/auth.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use jsonwebtoken::{encode, get_current_timestamp, Algorithm, EncodingKey, Header}; use rand::Rng; use serde::{Deserialize, Serialize}; @@ -13,6 +15,7 @@ pub const JWT_SECRET_LENGTH: usize = 32; pub enum Error { JWT(jsonwebtoken::errors::Error), InvalidToken, + InvalidKey(String), } impl From for Error { @@ -57,6 +60,14 @@ impl JwtKey { } } +pub fn strip_prefix(s: &str) -> &str { + if let Some(stripped) = s.strip_prefix("0x") { + stripped + } else { + s + } +} + /// Contains the JWT secret and claims parameters. pub struct Auth { key: EncodingKey, @@ -73,6 +84,28 @@ impl Auth { } } + /// Create a new `Auth` struct given the path to the file containing the hex + /// encoded jwt key. + pub fn new_with_path( + jwt_path: PathBuf, + id: Option, + clv: Option, + ) -> Result { + std::fs::read_to_string(&jwt_path) + .map_err(|e| { + Error::InvalidKey(format!( + "Failed to read JWT secret file {:?}, error: {:?}", + jwt_path, e + )) + }) + .and_then(|ref s| { + let secret_bytes = hex::decode(strip_prefix(s.trim_end())) + .map_err(|e| Error::InvalidKey(format!("Invalid hex string: {:?}", e)))?; + let secret = JwtKey::from_slice(&secret_bytes).map_err(Error::InvalidKey)?; + Ok(Self::new(secret, id, clv)) + }) + } + /// Generate a JWT token with `claims.iat` set to current time. pub fn generate_token(&self) -> Result { let claims = self.generate_claims_at_timestamp(); diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 179045ccf86..157f9a3054d 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -3,15 +3,16 @@ use super::*; use crate::auth::Auth; use crate::json_structures::*; -use eth1::http::EIP155_ERROR_STR; use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; use std::marker::PhantomData; + use std::time::Duration; use types::{BlindedPayload, EthSpec, ExecutionPayloadHeader, SignedBeaconBlock}; +pub use deposit_log::{DepositLog, Log}; pub use reqwest::Client; const STATIC_ID: u32 = 1; @@ -48,6 +49,480 @@ pub const BUILDER_GET_PAYLOAD_HEADER_TIMEOUT: Duration = Duration::from_secs(2); pub const BUILDER_PROPOSE_BLINDED_BLOCK_V1: &str = "builder_proposeBlindedBlockV1"; pub const BUILDER_PROPOSE_BLINDED_BLOCK_TIMEOUT: Duration = Duration::from_secs(2); +/// This error is returned during a `chainId` call by Geth. +pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; + +/// Contains methods to convert arbitary bytes to an ETH2 deposit contract object. +pub mod deposit_log { + use ssz::Decode; + use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message; + use types::{ChainSpec, DepositData, Hash256, PublicKeyBytes, SignatureBytes}; + + pub use eth2::lighthouse::DepositLog; + + /// The following constants define the layout of bytes in the deposit contract `DepositEvent`. The + /// event bytes are formatted according to the Ethereum ABI. + const PUBKEY_START: usize = 192; + const PUBKEY_LEN: usize = 48; + const CREDS_START: usize = PUBKEY_START + 64 + 32; + const CREDS_LEN: usize = 32; + const AMOUNT_START: usize = CREDS_START + 32 + 32; + const AMOUNT_LEN: usize = 8; + const SIG_START: usize = AMOUNT_START + 32 + 32; + const SIG_LEN: usize = 96; + const INDEX_START: usize = SIG_START + 96 + 32; + const INDEX_LEN: usize = 8; + + /// A reduced set of fields from an Eth1 contract log. + #[derive(Debug, PartialEq, Clone)] + pub struct Log { + pub block_number: u64, + pub data: Vec, + } + + impl Log { + /// Attempts to parse a raw `Log` from the deposit contract into a `DepositLog`. + pub fn to_deposit_log(&self, spec: &ChainSpec) -> Result { + let bytes = &self.data; + + let pubkey = bytes + .get(PUBKEY_START..PUBKEY_START + PUBKEY_LEN) + .ok_or("Insufficient bytes for pubkey")?; + let withdrawal_credentials = bytes + .get(CREDS_START..CREDS_START + CREDS_LEN) + .ok_or("Insufficient bytes for withdrawal credential")?; + let amount = bytes + .get(AMOUNT_START..AMOUNT_START + AMOUNT_LEN) + .ok_or("Insufficient bytes for amount")?; + let signature = bytes + .get(SIG_START..SIG_START + SIG_LEN) + .ok_or("Insufficient bytes for signature")?; + let index = bytes + .get(INDEX_START..INDEX_START + INDEX_LEN) + .ok_or("Insufficient bytes for index")?; + + let deposit_data = DepositData { + pubkey: PublicKeyBytes::from_ssz_bytes(pubkey) + .map_err(|e| format!("Invalid pubkey ssz: {:?}", e))?, + withdrawal_credentials: Hash256::from_ssz_bytes(withdrawal_credentials) + .map_err(|e| format!("Invalid withdrawal_credentials ssz: {:?}", e))?, + amount: u64::from_ssz_bytes(amount) + .map_err(|e| format!("Invalid amount ssz: {:?}", e))?, + signature: SignatureBytes::from_ssz_bytes(signature) + .map_err(|e| format!("Invalid signature ssz: {:?}", e))?, + }; + + let signature_is_valid = deposit_pubkey_signature_message(&deposit_data, spec) + .map_or(false, |(public_key, signature, msg)| { + signature.verify(&public_key, msg) + }); + + Ok(DepositLog { + deposit_data, + block_number: self.block_number, + index: u64::from_ssz_bytes(index) + .map_err(|e| format!("Invalid index ssz: {:?}", e))?, + signature_is_valid, + }) + } + } + + #[cfg(test)] + pub mod tests { + use super::*; + use types::{EthSpec, MainnetEthSpec}; + + /// The data from a deposit event, using the v0.8.3 version of the deposit contract. + pub const EXAMPLE_LOG: &[u8] = &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 167, 108, 6, 69, 88, 17, + 3, 51, 6, 4, 158, 232, 82, 248, 218, 2, 71, 219, 55, 102, 86, 125, 136, 203, 36, 77, + 64, 213, 43, 52, 175, 154, 239, 50, 142, 52, 201, 77, 54, 239, 0, 229, 22, 46, 139, + 120, 62, 240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 8, 0, 64, 89, 115, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 96, 140, 74, 175, 158, 209, 20, 206, 30, 63, 215, 238, 113, 60, + 132, 216, 211, 100, 186, 202, 71, 34, 200, 160, 225, 212, 213, 119, 88, 51, 80, 101, + 74, 2, 45, 78, 153, 12, 192, 44, 51, 77, 40, 10, 72, 246, 34, 193, 187, 22, 95, 4, 211, + 245, 224, 13, 162, 21, 163, 54, 225, 22, 124, 3, 56, 14, 81, 122, 189, 149, 250, 251, + 159, 22, 77, 94, 157, 197, 196, 253, 110, 201, 88, 193, 246, 136, 226, 221, 18, 113, + 232, 105, 100, 114, 103, 237, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + + #[test] + fn can_parse_example_log() { + let log = Log { + block_number: 42, + data: EXAMPLE_LOG.to_vec(), + }; + log.to_deposit_log(&MainnetEthSpec::default_spec()) + .expect("should decode log"); + } + } +} + +/// Contains subset of the HTTP JSON-RPC methods used to query an execution node for +/// state of the deposit contract. +pub mod deposit_methods { + use super::Log; + use crate::{EngineApi, HttpJsonRpc}; + use serde::{Deserialize, Serialize}; + use serde_json::{json, Value}; + use std::fmt; + use std::ops::Range; + use std::str::FromStr; + use std::time::Duration; + use types::Hash256; + + /// `keccak("DepositEvent(bytes,bytes,bytes,bytes,bytes)")` + pub const DEPOSIT_EVENT_TOPIC: &str = + "0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"; + /// `keccak("get_deposit_root()")[0..4]` + pub const DEPOSIT_ROOT_FN_SIGNATURE: &str = "0xc5f2892f"; + /// `keccak("get_deposit_count()")[0..4]` + pub const DEPOSIT_COUNT_FN_SIGNATURE: &str = "0x621fd130"; + + /// Number of bytes in deposit contract deposit root response. + pub const DEPOSIT_COUNT_RESPONSE_BYTES: usize = 96; + /// Number of bytes in deposit contract deposit root (value only). + pub const DEPOSIT_ROOT_BYTES: usize = 32; + + /// Represents an eth1 chain/network id. + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] + pub enum Eth1Id { + Goerli, + Mainnet, + Custom(u64), + } + + #[derive(Debug, PartialEq, Clone)] + pub struct Block { + pub hash: Hash256, + pub timestamp: u64, + pub number: u64, + } + + /// Used to identify a block when querying the Eth1 node. + #[derive(Clone, Copy)] + pub enum BlockQuery { + Number(u64), + Latest, + } + + impl Into for Eth1Id { + fn into(self) -> u64 { + match self { + Eth1Id::Mainnet => 1, + Eth1Id::Goerli => 5, + Eth1Id::Custom(id) => id, + } + } + } + + impl From for Eth1Id { + fn from(id: u64) -> Self { + let into = |x: Eth1Id| -> u64 { x.into() }; + match id { + id if id == into(Eth1Id::Mainnet) => Eth1Id::Mainnet, + id if id == into(Eth1Id::Goerli) => Eth1Id::Goerli, + id => Eth1Id::Custom(id), + } + } + } + + impl FromStr for Eth1Id { + type Err = String; + + fn from_str(s: &str) -> Result { + s.parse::() + .map(Into::into) + .map_err(|e| format!("Failed to parse eth1 network id {}", e)) + } + } + + /// Represents an error received from a remote procecdure call. + #[derive(Debug, Serialize, Deserialize)] + pub enum RpcError { + NoResultField, + Eip155Error, + InvalidJson(String), + Error(String), + } + + impl fmt::Display for RpcError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + RpcError::NoResultField => write!(f, "No result field in response"), + RpcError::Eip155Error => write!(f, "Not synced past EIP-155"), + RpcError::InvalidJson(e) => write!(f, "Malformed JSON received: {}", e), + RpcError::Error(s) => write!(f, "{}", s), + } + } + } + + impl From for String { + fn from(e: RpcError) -> String { + e.to_string() + } + } + + /// Parses a `0x`-prefixed, **big-endian** hex string as a u64. + /// + /// Note: the JSON-RPC encodes integers as big-endian. The deposit contract uses little-endian. + /// Therefore, this function is only useful for numbers encoded by the JSON RPC. + /// + /// E.g., `0x01 == 1` + fn hex_to_u64_be(hex: &str) -> Result { + u64::from_str_radix(strip_prefix(hex)?, 16) + .map_err(|e| format!("Failed to parse hex as u64: {:?}", e)) + } + + /// Parses a `0x`-prefixed, big-endian hex string as bytes. + /// + /// E.g., `0x0102 == vec![1, 2]` + fn hex_to_bytes(hex: &str) -> Result, String> { + hex::decode(strip_prefix(hex)?) + .map_err(|e| format!("Failed to parse hex as bytes: {:?}", e)) + } + + /// Removes the `0x` prefix from some bytes. Returns an error if the prefix is not present. + fn strip_prefix(hex: &str) -> Result<&str, String> { + if let Some(stripped) = hex.strip_prefix("0x") { + Ok(stripped) + } else { + Err("Hex string did not start with `0x`".to_string()) + } + } + + impl HttpJsonRpc { + /// Get the eth1 chain id of the given endpoint. + pub async fn get_chain_id(&self, timeout: Duration) -> Result { + let chain_id: String = self + .rpc_request("eth_chainId", json!([]), timeout) + .await + .map_err(|e| format!("eth_chainId call failed {:?}", e))?; + hex_to_u64_be(chain_id.as_str()).map(|id| id.into()) + } + + /// Returns the current block number. + pub async fn get_block_number(&self, timeout: Duration) -> Result { + let response: String = self + .rpc_request("eth_blockNumber", json!([]), timeout) + .await + .map_err(|e| format!("eth_blockNumber call failed {:?}", e))?; + hex_to_u64_be(response.as_str()) + .map_err(|e| format!("Failed to get block number: {}", e)) + } + + /// Gets a block hash by block number. + pub async fn get_block( + &self, + query: BlockQuery, + timeout: Duration, + ) -> Result { + let query_param = match query { + BlockQuery::Number(block_number) => format!("0x{:x}", block_number), + BlockQuery::Latest => "latest".to_string(), + }; + let params = json!([ + query_param, + false // do not return full tx objects. + ]); + + let response: Value = self + .rpc_request("eth_getBlockByNumber", params, timeout) + .await + .map_err(|e| format!("eth_getBlockByNumber call failed {:?}", e))?; + + let hash: Vec = hex_to_bytes( + response + .get("hash") + .ok_or("No hash for block")? + .as_str() + .ok_or("Block hash was not string")?, + )?; + let hash: Hash256 = if hash.len() == 32 { + Hash256::from_slice(&hash) + } else { + return Err(format!("Block hash was not 32 bytes: {:?}", hash)); + }; + + let timestamp = hex_to_u64_be( + response + .get("timestamp") + .ok_or("No timestamp for block")? + .as_str() + .ok_or("Block timestamp was not string")?, + )?; + + let number = hex_to_u64_be( + response + .get("number") + .ok_or("No number for block")? + .as_str() + .ok_or("Block number was not string")?, + )?; + + if number <= usize::max_value() as u64 { + Ok(Block { + hash, + timestamp, + number, + }) + } else { + Err(format!("Block number {} is larger than a usize", number)) + } + .map_err(|e| format!("Failed to get block number: {}", e)) + } + + /// Returns the value of the `get_deposit_count()` call at the given `address` for the given + /// `block_number`. + /// + /// Assumes that the `address` has the same ABI as the eth2 deposit contract. + pub async fn get_deposit_count( + &self, + address: &str, + block_number: u64, + timeout: Duration, + ) -> Result, String> { + let result = self + .call(address, DEPOSIT_COUNT_FN_SIGNATURE, block_number, timeout) + .await?; + match result { + None => Err("Deposit root response was none".to_string()), + Some(bytes) => { + if bytes.is_empty() { + Ok(None) + } else if bytes.len() == DEPOSIT_COUNT_RESPONSE_BYTES { + let mut array = [0; 8]; + array.copy_from_slice(&bytes[32 + 32..32 + 32 + 8]); + Ok(Some(u64::from_le_bytes(array))) + } else { + Err(format!( + "Deposit count response was not {} bytes: {:?}", + DEPOSIT_COUNT_RESPONSE_BYTES, bytes + )) + } + } + } + } + + /// Returns the value of the `get_hash_tree_root()` call at the given `block_number`. + /// + /// Assumes that the `address` has the same ABI as the eth2 deposit contract. + pub async fn get_deposit_root( + &self, + address: &str, + block_number: u64, + timeout: Duration, + ) -> Result, String> { + let result = self + .call(address, DEPOSIT_ROOT_FN_SIGNATURE, block_number, timeout) + .await?; + match result { + None => Err("Deposit root response was none".to_string()), + Some(bytes) => { + if bytes.is_empty() { + Ok(None) + } else if bytes.len() == DEPOSIT_ROOT_BYTES { + Ok(Some(Hash256::from_slice(&bytes))) + } else { + Err(format!( + "Deposit root response was not {} bytes: {:?}", + DEPOSIT_ROOT_BYTES, bytes + )) + } + } + } + } + + /// Performs a instant, no-transaction call to the contract `address` with the given `0x`-prefixed + /// `hex_data`. + /// + /// Returns bytes, if any. + async fn call( + &self, + address: &str, + hex_data: &str, + block_number: u64, + timeout: Duration, + ) -> Result>, String> { + let params = json! ([ + { + "to": address, + "data": hex_data, + }, + format!("0x{:x}", block_number) + ]); + + let response: Option = self + .rpc_request("eth_call", params, timeout) + .await + .map_err(|e| format!("eth_call call failed {:?}", e))?; + + response.map(|s| hex_to_bytes(&s)).transpose() + } + + /// Returns logs for the `DEPOSIT_EVENT_TOPIC`, for the given `address` in the given + /// `block_height_range`. + /// + /// It's not clear from the Ethereum JSON-RPC docs if this range is inclusive or not. + pub async fn get_deposit_logs_in_range( + &self, + address: &str, + block_height_range: Range, + timeout: Duration, + ) -> Result, String> { + let params = json! ([{ + "address": address, + "topics": [DEPOSIT_EVENT_TOPIC], + "fromBlock": format!("0x{:x}", block_height_range.start), + "toBlock": format!("0x{:x}", block_height_range.end), + }]); + + let response: Value = self + .rpc_request("eth_getLogs", params, timeout) + .await + .map_err(|e| format!("eth_getLogs call failed {:?}", e))?; + response + .as_array() + .cloned() + .ok_or("'result' value was not an array")? + .into_iter() + .map(|value| { + let block_number = value + .get("blockNumber") + .ok_or("No block number field in log")? + .as_str() + .ok_or("Block number was not string")?; + + let data = value + .get("data") + .ok_or("No block number field in log")? + .as_str() + .ok_or("Data was not string")?; + + Ok(Log { + block_number: hex_to_u64_be(block_number)?, + data: hex_to_bytes(data)?, + }) + }) + .collect::, String>>() + .map_err(|e| format!("Failed to get logs in range: {}", e)) + } + } +} + pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, @@ -117,6 +592,12 @@ impl HttpJsonRpc { } } +impl std::fmt::Display for HttpJsonRpc { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}, auth={}", self.url, self.auth.is_some()) + } +} + impl HttpJsonRpc { pub async fn upcheck(&self) -> Result<(), Error> { let result: serde_json::Value = self @@ -289,6 +770,7 @@ impl HttpJsonRpc { Ok(response.into()) } } + #[cfg(test)] mod test { use super::auth::JwtKey; diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index cff21902722..4b298876756 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -6,10 +6,10 @@ use crate::engine_api::Builder; use crate::engines::Builders; -use auth::{Auth, JwtKey}; +use auth::{strip_prefix, Auth, JwtKey}; use engine_api::Error as ApiError; pub use engine_api::*; -pub use engine_api::{http, http::HttpJsonRpc}; +pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; pub use engines::ForkChoiceState; use engines::{Engine, EngineError, Engines, Logging}; use lru::LruCache; @@ -42,6 +42,9 @@ mod metrics; mod payload_status; pub mod test_utils; +/// Indicates the default jwt authenticated execution endpoint. +pub const DEFAULT_EXECUTION_ENDPOINT: &str = "http://localhost:8551/"; + /// Name for the default file used for the jwt secret. pub const DEFAULT_JWT_FILE: &str = "jwt.hex"; @@ -130,14 +133,6 @@ pub struct Config { pub default_datadir: PathBuf, } -fn strip_prefix(s: &str) -> &str { - if let Some(stripped) = s.strip_prefix("0x") { - stripped - } else { - s - } -} - /// Provides access to one or more execution engines and provides a neat interface for consumption /// by the `BeaconChain`. /// diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index aac13a324fc..089f79aa113 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -112,7 +112,7 @@ impl Eth1GenesisService { "Importing eth1 deposit logs"; ); - let endpoints = eth1_service.init_endpoints(); + let endpoints = eth1_service.init_endpoints()?; loop { let update_result = eth1_service diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs index ccf8fe10c9d..1233d99fd31 100644 --- a/beacon_node/genesis/src/lib.rs +++ b/beacon_node/genesis/src/lib.rs @@ -3,6 +3,7 @@ mod eth1_genesis_service; mod interop; pub use eth1::Config as Eth1Config; +pub use eth1::Eth1Endpoint; pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; pub use interop::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; pub use types::test_utils::generate_deterministic_keypairs; diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index 8b77c894717..74a054fcc04 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -4,7 +4,7 @@ //! dir in the root of the `lighthouse` repo. #![cfg(test)] use environment::{Environment, EnvironmentBuilder}; -use eth1::{DEFAULT_CHAIN_ID, DEFAULT_NETWORK_ID}; +use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; use eth1_test_rig::{DelayThenDeposit, GanacheEth1Instance}; use genesis::{Eth1Config, Eth1GenesisService}; use sensitive_url::SensitiveUrl; @@ -29,7 +29,7 @@ fn basic() { let mut spec = env.eth2_config().spec.clone(); env.runtime().block_on(async { - let eth1 = GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), DEFAULT_CHAIN_ID.into()) + let eth1 = GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()) .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; @@ -44,7 +44,10 @@ fn basic() { let service = Eth1GenesisService::new( Eth1Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, lowest_cached_block_number: now, diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 3102018e3e2..a0cc124d476 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -409,45 +409,46 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("merge") .long("merge") - .help("Enable the features necessary to run merge testnets. This feature \ - is unstable and is for developers only.") - .takes_value(false), + .help("Deprecated. The feature activates automatically when --execution-endpoint \ + is supplied.") + .takes_value(false) ) .arg( - Arg::with_name("execution-endpoints") - .long("execution-endpoints") - .value_name("EXECUTION-ENDPOINTS") - .help("One or more comma-delimited server endpoints for HTTP JSON-RPC connection. \ - If multiple endpoints are given the endpoints are used as fallback in the \ - given order. Also enables the --merge flag. \ - If this flag is omitted and the --eth1-endpoints is supplied, those values \ - will be used. Defaults to http://127.0.0.1:8545.") + Arg::with_name("execution-endpoint") + .long("execution-endpoint") + .value_name("EXECUTION-ENDPOINT") + .alias("execution-endpoints") + .help("Server endpoint for an execution layer jwt authenticated HTTP \ + JSON-RPC connection. Uses the same endpoint to populate the \ + deposit cache. Also enables the --merge flag.\ + If not provided, uses the default value of http://127.0.0.1:8551") .takes_value(true) + .requires("execution-jwt") ) .arg( - Arg::with_name("jwt-secrets") - .long("jwt-secrets") - .value_name("JWT-SECRETS") - .help("One or more comma-delimited file paths which contain the corresponding hex-encoded \ - JWT secrets for each execution endpoint provided in the --execution-endpoints flag. \ - The number of paths should be in the same order and strictly equal to the number \ - of execution endpoints provided.") + Arg::with_name("execution-jwt") + .long("execution-jwt") + .value_name("EXECUTION-JWT") + .alias("jwt-secrets") + .help("File path which contains the hex-encoded JWT secret for the \ + execution endpoint provided in the --execution-endpoint flag.") .takes_value(true) - .requires("execution-endpoints") ) .arg( - Arg::with_name("jwt-id") - .long("jwt-id") - .value_name("JWT-ID") + Arg::with_name("execution-jwt-id") + .long("execution-jwt-id") + .value_name("EXECUTION-JWT-ID") + .alias("jwt-id") .help("Used by the beacon node to communicate a unique identifier to execution nodes \ during JWT authentication. It corresponds to the 'id' field in the JWT claims object.\ Set to empty by deafult") .takes_value(true) ) .arg( - Arg::with_name("jwt-version") - .long("jwt-version") - .value_name("JWT-VERSION") + Arg::with_name("execution-jwt-version") + .long("execution-jwt-version") + .value_name("EXECUTION-JWT-VERSION") + .alias("jwt-version") .help("Used by the beacon node to communicate a client version to execution nodes \ during JWT authentication. It corresponds to the 'clv' field in the JWT claims object.\ Set to empty by deafult") @@ -461,14 +462,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { collected from any blocks produced by this node. Defaults to a junk \ address whilst the merge is in development stages. THE DEFAULT VALUE \ WILL BE REMOVED BEFORE THE MERGE ENTERS PRODUCTION") - .requires("merge") + .requires("execution-endpoint") .takes_value(true) ) .arg( - Arg::with_name("payload-builders") - .long("payload-builders") + Arg::with_name("payload-builder") + .long("payload-builder") + .alias("payload-builders") .help("The URL of a service compatible with the MEV-boost API.") - .requires("merge") + .requires("execution-endpoint") .takes_value(true) ) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index db765100c3a..0421df34290 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -3,12 +3,14 @@ use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; use client::{ClientConfig, ClientGenesis}; use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use environment::RuntimeContext; +use genesis::Eth1Endpoint; use http_api::TlsConfig; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use sensitive_url::SensitiveUrl; use slog::{info, warn, Logger}; use std::cmp; use std::cmp::max; +use std::fmt::Debug; use std::fs; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; use std::path::{Path, PathBuf}; @@ -215,15 +217,18 @@ pub fn get_config( "msg" => "please use --eth1-endpoints instead" ); client_config.sync_eth1_chain = true; - client_config.eth1.endpoints = vec![SensitiveUrl::parse(endpoint) + + let endpoints = vec![SensitiveUrl::parse(endpoint) .map_err(|e| format!("eth1-endpoint was an invalid URL: {:?}", e))?]; + client_config.eth1.endpoints = Eth1Endpoint::NoAuth(endpoints); } else if let Some(endpoints) = cli_args.value_of("eth1-endpoints") { client_config.sync_eth1_chain = true; - client_config.eth1.endpoints = endpoints + let endpoints = endpoints .split(',') .map(SensitiveUrl::parse) .collect::>() .map_err(|e| format!("eth1-endpoints contains an invalid URL {:?}", e))?; + client_config.eth1.endpoints = Eth1Endpoint::NoAuth(endpoints); } if let Some(val) = cli_args.value_of("eth1-blocks-per-log-query") { @@ -242,47 +247,79 @@ pub fn get_config( client_config.eth1.cache_follow_distance = Some(follow_distance); } - if cli_args.is_present("merge") || cli_args.is_present("execution-endpoints") { - let mut el_config = execution_layer::Config::default(); - - if let Some(endpoints) = cli_args.value_of("execution-endpoints") { - client_config.sync_eth1_chain = true; - el_config.execution_endpoints = endpoints - .split(',') - .map(SensitiveUrl::parse) - .collect::>() - .map_err(|e| format!("execution-endpoints contains an invalid URL {:?}", e))?; - } else if cli_args.is_present("merge") { - el_config.execution_endpoints = client_config.eth1.endpoints.clone(); + if cli_args.is_present("merge") { + if cli_args.is_present("execution-endpoint") { + warn!( + log, + "The --merge flag is deprecated"; + "info" => "the --execution-endpoint flag automatically enables this feature" + ) + } else { + return Err("The --merge flag is deprecated. \ + Supply a value to --execution-endpoint instead." + .into()); } + } - if let Some(endpoints) = cli_args.value_of("payload-builders") { - el_config.builder_endpoints = endpoints - .split(',') - .map(SensitiveUrl::parse) - .collect::>() - .map_err(|e| format!("payload-builders contains an invalid URL {:?}", e))?; - } + if let Some(endpoints) = cli_args.value_of("execution-endpoint") { + let mut el_config = execution_layer::Config::default(); - if let Some(secrets) = cli_args.value_of("jwt-secrets") { - let secret_files: Vec<_> = secrets.split(',').map(PathBuf::from).collect(); - if !secret_files.is_empty() && secret_files.len() != el_config.execution_endpoints.len() - { - return Err(format!( - "{} execution-endpoints supplied with {} jwt-secrets. Lengths \ - must match or jwt-secrets must be empty.", - el_config.execution_endpoints.len(), - secret_files.len(), - )); - } - el_config.secret_files = secret_files; + // Always follow the deposit contract when there is an execution endpoint. + // + // This is wasteful for non-staking nodes as they have no need to process deposit contract + // logs and build an "eth1" cache. The alternative is to explicitly require the `--eth1` or + // `--staking` flags, however that poses a risk to stakers since they cannot produce blocks + // without "eth1". + // + // The waste for non-staking nodes is relatively small so we err on the side of safety for + // stakers. The merge is already complicated enough. + client_config.sync_eth1_chain = true; + + // Parse a single execution endpoint, logging warnings if multiple endpoints are supplied. + let execution_endpoint = + parse_only_one_value(endpoints, SensitiveUrl::parse, "--execution-endpoint", log)?; + + // Parse a single JWT secret, logging warnings if multiple are supplied. + // + // JWTs are required if `--execution-endpoint` is supplied. + let secret_files: String = clap_utils::parse_required(cli_args, "execution-jwt")?; + let secret_file = + parse_only_one_value(&secret_files, PathBuf::from_str, "--execution-jwt", log)?; + + // Parse and set the payload builder, if any. + if let Some(endpoints) = cli_args.value_of("payload-builder") { + let payload_builder = + parse_only_one_value(endpoints, SensitiveUrl::parse, "--payload-builder", log)?; + el_config.builder_endpoints = vec![payload_builder]; } + // Set config values from parse values. + el_config.secret_files = vec![secret_file.clone()]; + el_config.execution_endpoints = vec![execution_endpoint.clone()]; el_config.suggested_fee_recipient = clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?; - el_config.jwt_id = clap_utils::parse_optional(cli_args, "jwt-id")?; - el_config.jwt_version = clap_utils::parse_optional(cli_args, "jwt-version")?; + el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?; + el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?; el_config.default_datadir = client_config.data_dir.clone(); + + // If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and + // use `--execution-endpoint` instead. Also, log a deprecation warning. + if cli_args.is_present("eth1-endpoints") || cli_args.is_present("eth1-endpoint") { + warn!( + log, + "Ignoring --eth1-endpoints flag"; + "info" => "the value for --execution-endpoint will be used instead. \ + --eth1-endpoints has been deprecated for post-merge configurations" + ); + } + client_config.eth1.endpoints = Eth1Endpoint::Auth { + endpoint: execution_endpoint, + jwt_path: secret_file, + jwt_id: el_config.jwt_id.clone(), + jwt_version: el_config.jwt_version.clone(), + }; + + // Store the EL config in the client config. client_config.execution_layer = Some(el_config); } @@ -344,7 +381,6 @@ pub fn get_config( client_config.eth1.follow_distance = spec.eth1_follow_distance; client_config.eth1.node_far_behind_seconds = max(5, spec.eth1_follow_distance / 2) * spec.seconds_per_eth1_block; - client_config.eth1.network_id = spec.deposit_network_id.into(); client_config.eth1.chain_id = spec.deposit_chain_id.into(); client_config.eth1.set_block_cache_truncation::(spec); @@ -844,3 +880,38 @@ pub fn get_slots_per_restore_point( Ok((default, false)) } } + +/// Parses the `cli_value` as a comma-separated string of values to be parsed with `parser`. +/// +/// If there is more than one value, log a warning. If there are no values, return an error. +pub fn parse_only_one_value( + cli_value: &str, + parser: F, + flag_name: &str, + log: &Logger, +) -> Result +where + F: Fn(&str) -> Result, + E: Debug, +{ + let values = cli_value + .split(',') + .map(parser) + .collect::, _>>() + .map_err(|e| format!("{} contains an invalid value {:?}", flag_name, e))?; + + if values.len() > 1 { + warn!( + log, + "Multiple values provided"; + "info" => "multiple values are deprecated, only the first value will be used", + "count" => values.len(), + "flag" => flag_name + ); + } + + values + .into_iter() + .next() + .ok_or(format!("Must provide at least one value to {}", flag_name)) +} diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index 689107228e8..10462419532 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -1,7 +1,7 @@ use clap::ArgMatches; use environment::Environment; use eth2_network_config::Eth2NetworkConfig; -use genesis::{Eth1Config, Eth1GenesisService}; +use genesis::{Eth1Config, Eth1Endpoint, Eth1GenesisService}; use sensitive_url::SensitiveUrl; use ssz::Encode; use std::cmp::max; @@ -35,11 +35,12 @@ pub fn run( let mut config = Eth1Config::default(); if let Some(v) = endpoints.clone() { - config.endpoints = v + let endpoints = v .iter() .map(|s| SensitiveUrl::parse(s)) .collect::>() .map_err(|e| format!("Unable to parse eth1 endpoint URL: {:?}", e))?; + config.endpoints = Eth1Endpoint::NoAuth(endpoints); } config.deposit_contract_address = format!("{:?}", spec.deposit_contract_address); config.deposit_contract_deploy_block = eth2_network_config.deposit_contract_deploy_block; diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index d9bd4334cfc..f7742ef0b91 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -55,6 +55,7 @@ validator_dir = { path = "../common/validator_dir" } slashing_protection = { path = "../validator_client/slashing_protection" } lighthouse_network = { path = "../beacon_node/lighthouse_network" } sensitive_url = { path = "../common/sensitive_url" } +eth1 = { path = "../beacon_node/eth1" } [[test]] name = "lighthouse_tests" diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index effccbbd662..443c442027e 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1,6 +1,7 @@ use beacon_node::ClientConfig as Config; use crate::exec::{CommandLineTestExec, CompletedTest}; +use eth1::Eth1Endpoint; use lighthouse_network::PeerId; use std::fs::File; use std::io::Write; @@ -66,7 +67,10 @@ fn staking_flag() { .with_config(|config| { assert!(config.http_api.enabled); assert!(config.sync_eth1_chain); - assert_eq!(config.eth1.endpoints[0].to_string(), DEFAULT_ETH1_ENDPOINT); + assert_eq!( + config.eth1.endpoints.get_endpoints()[0].to_string(), + DEFAULT_ETH1_ENDPOINT + ); }); } @@ -196,18 +200,21 @@ fn eth1_endpoints_flag() { .run_with_zero_port() .with_config(|config| { assert_eq!( - config.eth1.endpoints[0].full.to_string(), + config.eth1.endpoints.get_endpoints()[0].full.to_string(), "http://localhost:9545/" ); assert_eq!( - config.eth1.endpoints[0].to_string(), + config.eth1.endpoints.get_endpoints()[0].to_string(), "http://localhost:9545/" ); assert_eq!( - config.eth1.endpoints[1].full.to_string(), + config.eth1.endpoints.get_endpoints()[1].full.to_string(), "https://infura.io/secret" ); - assert_eq!(config.eth1.endpoints[1].to_string(), "https://infura.io/"); + assert_eq!( + config.eth1.endpoints.get_endpoints()[1].to_string(), + "https://infura.io/" + ); assert!(config.sync_eth1_chain); }); } @@ -246,45 +253,107 @@ fn eth1_cache_follow_distance_manual() { } // Tests for Bellatrix flags. -#[test] -fn merge_flag() { - CommandLineTest::new() - .flag("merge", None) - .run_with_zero_port() - .with_config(|config| assert!(config.execution_layer.is_some())); -} -#[test] -fn merge_execution_endpoints_flag() { +fn run_merge_execution_endpoints_flag_test(flag: &str) { use sensitive_url::SensitiveUrl; let urls = vec!["http://sigp.io/no-way:1337", "http://infura.not_real:4242"]; - let endpoints = urls - .iter() - .map(|s| SensitiveUrl::parse(s).unwrap()) - .collect::>(); + // we don't support redundancy for execution-endpoints + // only the first provided endpoint is parsed. + let mut endpoint_arg = urls[0].to_string(); - for url in urls.into_iter().skip(1) { + for url in urls.iter().skip(1) { endpoint_arg.push(','); endpoint_arg.push_str(url); } + + let (_dirs, jwts): (Vec<_>, Vec<_>) = (0..2) + .map(|i| { + let dir = TempDir::new().expect("Unable to create temporary directory"); + let path = dir.path().join(format!("jwt-{}", i)); + (dir, path) + }) + .unzip(); + + let mut jwts_arg = jwts[0].as_os_str().to_str().unwrap().to_string(); + for jwt in jwts.iter().skip(1) { + jwts_arg.push(','); + jwts_arg.push_str(jwt.as_os_str().to_str().unwrap()); + } + // this is way better but intersperse is still a nightly feature :/ // let endpoint_arg: String = urls.into_iter().intersperse(",").collect(); CommandLineTest::new() - .flag("merge", None) - .flag("execution-endpoints", Some(&endpoint_arg)) + .flag(flag, Some(&endpoint_arg)) + .flag("execution-jwt", Some(&jwts_arg)) .run_with_zero_port() .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); - assert_eq!(config.execution_endpoints, endpoints) + assert_eq!(config.execution_endpoints.len(), 1); + assert_eq!( + config.execution_endpoints[0], + SensitiveUrl::parse(&urls[0]).unwrap() + ); + // Only the first secret file should be used. + assert_eq!(config.secret_files, vec![jwts[0].clone()]); + }); +} +#[test] +fn merge_execution_endpoints_flag() { + run_merge_execution_endpoints_flag_test("execution-endpoints") +} +#[test] +fn merge_execution_endpoint_flag() { + run_merge_execution_endpoints_flag_test("execution-endpoint") +} +fn run_execution_endpoints_overrides_eth1_endpoints_test(eth1_flag: &str, execution_flag: &str) { + use sensitive_url::SensitiveUrl; + + let eth1_endpoint = "http://bad.bad"; + let execution_endpoint = "http://good.good"; + + assert!(eth1_endpoint != execution_endpoint); + + let dir = TempDir::new().expect("Unable to create temporary directory"); + let jwt_path = dir.path().join("jwt-file"); + + CommandLineTest::new() + .flag(eth1_flag, Some(ð1_endpoint)) + .flag(execution_flag, Some(&execution_endpoint)) + .flag("execution-jwt", jwt_path.as_os_str().to_str()) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.execution_layer.as_ref().unwrap().execution_endpoints, + vec![SensitiveUrl::parse(execution_endpoint).unwrap()] + ); + + // The eth1 endpoint should have been set to the --execution-endpoint value in defiance + // of --eth1-endpoints. + assert_eq!( + config.eth1.endpoints, + Eth1Endpoint::Auth { + endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(), + jwt_path: jwt_path.clone(), + jwt_id: None, + jwt_version: None, + } + ); }); } #[test] +fn execution_endpoints_overrides_eth1_endpoints() { + run_execution_endpoints_overrides_eth1_endpoints_test("eth1-endpoints", "execution-endpoints"); +} +#[test] +fn execution_endpoint_overrides_eth1_endpoint() { + run_execution_endpoints_overrides_eth1_endpoints_test("eth1-endpoint", "execution-endpoint"); +} +#[test] fn merge_jwt_secrets_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); let mut file = File::create(dir.path().join("jwtsecrets")).expect("Unable to create file"); file.write_all(b"0x3cbc11b0d8fa16f3344eacfd6ff6430b9d30734450e8adcf5400f88d327dcb33") .expect("Unable to write to file"); CommandLineTest::new() - .flag("merge", None) .flag("execution-endpoints", Some("http://localhost:8551/")) .flag( "jwt-secrets", @@ -302,8 +371,13 @@ fn merge_jwt_secrets_flag() { } #[test] fn merge_fee_recipient_flag() { + let dir = TempDir::new().expect("Unable to create temporary directory"); CommandLineTest::new() - .flag("merge", None) + .flag("execution-endpoint", Some("http://meow.cats")) + .flag( + "execution-jwt", + dir.path().join("jwt-file").as_os_str().to_str(), + ) .flag( "suggested-fee-recipient", Some("0x00000000219ab540356cbb839cbe05303d7705fa"), @@ -317,19 +391,74 @@ fn merge_fee_recipient_flag() { ); }); } -#[test] -fn jwt_optional_flags() { +fn run_payload_builder_flag_test(flag: &str, builders: &str) { + use sensitive_url::SensitiveUrl; + + let dir = TempDir::new().expect("Unable to create temporary directory"); + let all_builders: Vec<_> = builders + .split(",") + .map(|builder| SensitiveUrl::parse(builder).expect("valid builder url")) + .collect(); CommandLineTest::new() - .flag("merge", None) - .flag("jwt-id", Some("bn-1")) - .flag("jwt-version", Some("Lighthouse-v2.1.3")) + .flag("execution-endpoint", Some("http://meow.cats")) + .flag( + "execution-jwt", + dir.path().join("jwt-file").as_os_str().to_str(), + ) + .flag(flag, Some(builders)) .run_with_zero_port() .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); - assert_eq!(config.jwt_id, Some("bn-1".to_string())); - assert_eq!(config.jwt_version, Some("Lighthouse-v2.1.3".to_string())); + // Only first provided endpoint is parsed as we don't support + // redundancy. + assert_eq!(&config.builder_endpoints, &all_builders[..1]); }); } + +#[test] +fn payload_builder_flags() { + run_payload_builder_flag_test("payload-builder", "http://meow.cats"); + run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs"); + run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs"); +} + +fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) { + use sensitive_url::SensitiveUrl; + + let dir = TempDir::new().expect("Unable to create temporary directory"); + let execution_endpoint = "http://meow.cats"; + let jwt_file = "jwt-file"; + let id = "bn-1"; + let version = "Lighthouse-v2.1.3"; + CommandLineTest::new() + .flag("execution-endpoint", Some(execution_endpoint.clone())) + .flag(jwt_flag, dir.path().join(jwt_file).as_os_str().to_str()) + .flag(jwt_id_flag, Some(id)) + .flag(jwt_version_flag, Some(version)) + .run_with_zero_port() + .with_config(|config| { + let el_config = config.execution_layer.as_ref().unwrap(); + assert_eq!(el_config.jwt_id, Some(id.to_string())); + assert_eq!(el_config.jwt_version, Some(version.to_string())); + assert_eq!( + config.eth1.endpoints, + Eth1Endpoint::Auth { + endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(), + jwt_path: dir.path().join(jwt_file), + jwt_id: Some(id.to_string()), + jwt_version: Some(version.to_string()), + } + ); + }); +} +#[test] +fn jwt_optional_flags() { + run_jwt_optional_flags_test("execution-jwt", "execution-jwt-id", "execution-jwt-version"); +} +#[test] +fn jwt_optional_alias_flags() { + run_jwt_optional_flags_test("jwt-secrets", "jwt-id", "jwt-version"); +} #[test] fn terminal_total_difficulty_override_flag() { use beacon_node::beacon_chain::types::Uint256; diff --git a/scripts/local_testnet/ganache_test_node.sh b/scripts/local_testnet/ganache_test_node.sh index 7d97f2196a8..a489c332243 100755 --- a/scripts/local_testnet/ganache_test_node.sh +++ b/scripts/local_testnet/ganache_test_node.sh @@ -11,5 +11,4 @@ exec ganache \ --mnemonic "$ETH1_NETWORK_MNEMONIC" \ --port 8545 \ --blockTime $SECONDS_PER_ETH1_BLOCK \ - --networkId "$NETWORK_ID" \ - --chain.chainId "$NETWORK_ID" + --chain.chainId "$CHAIN_ID" diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh index 6f0b070915a..a1348363a9b 100755 --- a/scripts/local_testnet/setup.sh +++ b/scripts/local_testnet/setup.sh @@ -32,7 +32,7 @@ lcli \ --genesis-delay $GENESIS_DELAY \ --genesis-fork-version $GENESIS_FORK_VERSION \ --altair-fork-epoch $ALTAIR_FORK_EPOCH \ - --eth1-id $NETWORK_ID \ + --eth1-id $CHAIN_ID \ --eth1-follow-distance 1 \ --seconds-per-slot $SECONDS_PER_SLOT \ --seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \ diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index 208fbb6d856..efb1046452c 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -30,7 +30,7 @@ GENESIS_DELAY=0 BOOTNODE_PORT=4242 # Network ID and Chain ID of local eth1 test network -NETWORK_ID=4242 +CHAIN_ID=4242 # Hard fork configuration ALTAIR_FORK_EPOCH=18446744073709551615 diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env index 6cc0dd3b8a0..d51fe2aef2e 100644 --- a/scripts/tests/vars.env +++ b/scripts/tests/vars.env @@ -30,7 +30,7 @@ GENESIS_DELAY=0 BOOTNODE_PORT=4242 # Network ID and Chain ID of local eth1 test network -NETWORK_ID=4242 +CHAIN_ID=4242 # Hard fork configuration ALTAIR_FORK_EPOCH=18446744073709551615 diff --git a/testing/eth1_test_rig/src/ganache.rs b/testing/eth1_test_rig/src/ganache.rs index c82277dc75d..9b6a33ff599 100644 --- a/testing/eth1_test_rig/src/ganache.rs +++ b/testing/eth1_test_rig/src/ganache.rs @@ -16,17 +16,11 @@ pub struct GanacheInstance { pub port: u16, child: Child, pub web3: Web3, - network_id: u64, chain_id: u64, } impl GanacheInstance { - fn new_from_child( - mut child: Child, - port: u16, - network_id: u64, - chain_id: u64, - ) -> Result { + fn new_from_child(mut child: Child, port: u16, chain_id: u64) -> Result { let stdout = child .stdout .ok_or("Unable to get stdout for ganache child process")?; @@ -64,14 +58,13 @@ impl GanacheInstance { port, child, web3, - network_id, chain_id, }) } /// Start a new `ganache` process, waiting until it indicates that it is ready to accept /// RPC connections. - pub fn new(network_id: u64, chain_id: u64) -> Result { + pub fn new(chain_id: u64) -> Result { let port = unused_tcp_port()?; let binary = match cfg!(windows) { true => "ganache.cmd", @@ -89,8 +82,6 @@ impl GanacheInstance { .arg(format!("{}", port)) .arg("--mnemonic") .arg("\"vast thought differ pull jewel broom cook wrist tribe word before omit\"") - .arg("--networkId") - .arg(format!("{}", network_id)) .arg("--chain.chainId") .arg(format!("{}", chain_id)) .spawn() @@ -102,7 +93,7 @@ impl GanacheInstance { ) })?; - Self::new_from_child(child, port, network_id, chain_id) + Self::new_from_child(child, port, chain_id) } pub fn fork(&self) -> Result { @@ -128,7 +119,7 @@ impl GanacheInstance { ) })?; - Self::new_from_child(child, port, self.network_id, self.chain_id) + Self::new_from_child(child, port, self.chain_id) } /// Returns the endpoint that this instance is listening on. @@ -136,11 +127,6 @@ impl GanacheInstance { endpoint(self.port) } - /// Returns the network id of the ganache instance - pub fn network_id(&self) -> u64 { - self.network_id - } - /// Returns the chain id of the ganache instance pub fn chain_id(&self) -> u64 { self.chain_id diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs index 52ae3922bc2..42081a60e74 100644 --- a/testing/eth1_test_rig/src/lib.rs +++ b/testing/eth1_test_rig/src/lib.rs @@ -30,8 +30,8 @@ pub struct GanacheEth1Instance { } impl GanacheEth1Instance { - pub async fn new(network_id: u64, chain_id: u64) -> Result { - let ganache = GanacheInstance::new(network_id, chain_id)?; + pub async fn new(chain_id: u64) -> Result { + let ganache = GanacheInstance::new(chain_id)?; DepositContract::deploy(ganache.web3.clone(), 0, None) .await .map(|deposit_contract| Self { diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 6770508435a..a01c133fd92 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -9,6 +9,7 @@ edition = "2021" [dependencies] node_test_rig = { path = "../node_test_rig" } eth1 = {path = "../../beacon_node/eth1"} +execution_layer = {path = "../../beacon_node/execution_layer"} types = { path = "../../consensus/types" } parking_lot = "0.12.0" futures = "0.3.7" diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 80fc755d52b..4c773c70bf6 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -1,9 +1,10 @@ use crate::local_network::INVALID_ADDRESS; use crate::{checks, LocalNetwork, E}; use clap::ArgMatches; -use eth1::http::Eth1Id; -use eth1::{DEFAULT_CHAIN_ID, DEFAULT_NETWORK_ID}; +use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; use eth1_test_rig::GanacheEth1Instance; + +use execution_layer::http::deposit_methods::Eth1Id; use futures::prelude::*; use node_test_rig::{ environment::{EnvironmentBuilder, LoggerConfig}, @@ -92,10 +93,8 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit * validators. */ - let ganache_eth1_instance = - GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), DEFAULT_CHAIN_ID.into()).await?; + let ganache_eth1_instance = GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?; let deposit_contract = ganache_eth1_instance.deposit_contract; - let network_id = ganache_eth1_instance.ganache.network_id(); let chain_id = ganache_eth1_instance.ganache.chain_id(); let ganache = ganache_eth1_instance.ganache; let eth1_endpoint = SensitiveUrl::parse(ganache.endpoint().as_str()) @@ -124,7 +123,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let mut beacon_config = testing_client_config(); beacon_config.genesis = ClientGenesis::DepositContract; - beacon_config.eth1.endpoints = vec![eth1_endpoint]; + beacon_config.eth1.endpoints = Eth1Endpoint::NoAuth(vec![eth1_endpoint]); beacon_config.eth1.deposit_contract_address = deposit_contract_address; beacon_config.eth1.deposit_contract_deploy_block = 0; beacon_config.eth1.lowest_cached_block_number = 0; @@ -133,7 +132,6 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { beacon_config.dummy_eth1_backend = false; beacon_config.sync_eth1_chain = true; beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64; - beacon_config.eth1.network_id = Eth1Id::from(network_id); beacon_config.eth1.chain_id = Eth1Id::from(chain_id); beacon_config.network.target_peers = node_count - 1; @@ -150,10 +148,13 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { for i in 0..node_count - 1 { let mut config = beacon_config.clone(); if i % 2 == 0 { - config.eth1.endpoints.insert( - 0, - SensitiveUrl::parse(INVALID_ADDRESS).expect("Unable to parse invalid address"), - ); + if let Eth1Endpoint::NoAuth(endpoints) = &mut config.eth1.endpoints { + endpoints.insert( + 0, + SensitiveUrl::parse(INVALID_ADDRESS) + .expect("Unable to parse invalid address"), + ) + } } network.add_beacon_node(config).await?; } From f6ec44f0dd38ff86309ace4a4e246c0ea42f4e86 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Thu, 30 Jun 2022 00:49:21 +0000 Subject: [PATCH 06/15] Register validator api (#3194) ## Issue Addressed Lays the groundwork for builder API changes by implementing the beacon-API's new `register_validator` endpoint ## Proposed Changes - Add a routine in the VC that runs on startup (re-try until success), once per epoch or whenever `suggested_fee_recipient` is updated, signing `ValidatorRegistrationData` and sending it to the BN. - TODO: `gas_limit` config options https://github.com/ethereum/builder-specs/issues/17 - BN only sends VC registration data to builders on demand, but VC registration data *does update* the BN's prepare proposer cache and send an updated fcU to a local EE. This is necessary for fee recipient consistency between the blinded and full block flow in the event of fallback. Having the BN only send registration data to builders on demand gives feedback directly to the VC about relay status. Also, since the BN has no ability to sign these messages anyways (so couldn't refresh them if it wanted), and validator registration is independent of the BN head, I think this approach makes sense. - Adds upcoming consensus spec changes for this PR https://github.com/ethereum/consensus-specs/pull/2884 - I initially applied the bit mask based on a configured application domain.. but I ended up just hard coding it here instead because that's how it's spec'd in the builder repo. - Should application mask appear in the api? Co-authored-by: realbigsean --- Cargo.lock | 1 + beacon_node/http_api/Cargo.toml | 1 + beacon_node/http_api/src/lib.rs | 86 ++++++- beacon_node/http_api/tests/tests.rs | 76 ++++++ book/src/api-vc-endpoints.md | 1 + common/eth2/src/lib.rs | 17 ++ consensus/types/src/application_domain.rs | 16 ++ consensus/types/src/chain_spec.rs | 50 ++++ consensus/types/src/config_and_preset.rs | 4 + consensus/types/src/lib.rs | 3 + .../types/src/validator_registration_data.rs | 23 ++ testing/web3signer_tests/src/lib.rs | 33 +++ validator_client/src/http_metrics/metrics.rs | 5 + validator_client/src/lib.rs | 5 +- validator_client/src/preparation_service.rs | 232 +++++++++++++++++- validator_client/src/signing_method.rs | 39 ++- .../src/signing_method/web3signer.rs | 3 + validator_client/src/validator_store.rs | 35 ++- 18 files changed, 603 insertions(+), 27 deletions(-) create mode 100644 consensus/types/src/application_domain.rs create mode 100644 consensus/types/src/validator_registration_data.rs diff --git a/Cargo.lock b/Cargo.lock index 3dbe005658f..3bdce9138ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2530,6 +2530,7 @@ dependencies = [ "safe_arith", "sensitive_url", "serde", + "serde_json", "slog", "slot_clock", "state_processing", diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 9dd2af7d179..07fb9923936 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -39,6 +39,7 @@ environment = { path = "../../lighthouse/environment" } tree_hash = "0.4.1" sensitive_url = { path = "../../common/sensitive_url" } logging = { path = "../../common/logging" } +serde_json = "1.0.58" [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 379033a1130..06dc9687648 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -49,8 +49,8 @@ use types::{ BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, ProposerPreparationData, ProposerSlashing, RelativeEpoch, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockMerge, SignedBlindedBeaconBlock, - SignedContributionAndProof, SignedVoluntaryExit, Slot, SyncCommitteeMessage, - SyncContributionData, + SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, + SyncCommitteeMessage, SyncContributionData, }; use version::{ add_consensus_version_header, fork_versioned_response, inconsistent_fork_rejection, @@ -2408,12 +2408,10 @@ pub fn serve( .and(warp::path::end()) .and(not_while_syncing_filter.clone()) .and(chain_filter.clone()) - .and(warp::addr::remote()) .and(log_filter.clone()) .and(warp::body::json()) .and_then( |chain: Arc>, - client_addr: Option, log: Logger, preparation_data: Vec| { blocking_json_task(move || { @@ -2430,9 +2428,6 @@ pub fn serve( log, "Received proposer preparation data"; "count" => preparation_data.len(), - "client" => client_addr - .map(|a| a.to_string()) - .unwrap_or_else(|| "unknown".to_string()), ); execution_layer @@ -2455,6 +2450,82 @@ pub fn serve( }, ); + // POST validator/register_validator + let post_validator_register_validator = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("register_validator")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and(log_filter.clone()) + .and(warp::body::json()) + .and_then( + |chain: Arc>, + log: Logger, + register_val_data: Vec| { + blocking_json_task(move || { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = chain + .slot_clock + .now_or_genesis() + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(warp_utils::reject::beacon_chain_error)? + .epoch(T::EthSpec::slots_per_epoch()); + + debug!( + log, + "Received register validator request"; + "count" => register_val_data.len(), + ); + + let preparation_data = register_val_data + .iter() + .filter_map(|register_data| { + chain + .validator_index(®ister_data.message.pubkey) + .ok() + .flatten() + .map(|validator_index| ProposerPreparationData { + validator_index: validator_index as u64, + fee_recipient: register_data.message.fee_recipient, + }) + }) + .collect::>(); + + debug!( + log, + "Resolved validator request pubkeys"; + "count" => preparation_data.len() + ); + + // Update the prepare beacon proposer cache based on this request. + execution_layer + .update_proposer_preparation_blocking(current_epoch, &preparation_data) + .map_err(|_e| { + warp_utils::reject::custom_bad_request( + "error processing proposer preparations".to_string(), + ) + })?; + + // Call prepare beacon proposer blocking with the latest update in order to make + // sure we have a local payload to fall back to in the event of the blined block + // flow failing. + chain.prepare_beacon_proposer_blocking().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "error updating proposer preparations: {:?}", + e + )) + })?; + + //TODO(sean): In the MEV-boost PR, add a call here to send the update request to the builder + + Ok(()) + }) + }, + ); // POST validator/sync_committee_subscriptions let post_validator_sync_committee_subscriptions = eth1_v1 .and(warp::path("validator")) @@ -3008,6 +3079,7 @@ pub fn serve( .or(post_validator_beacon_committee_subscriptions.boxed()) .or(post_validator_sync_committee_subscriptions.boxed()) .or(post_validator_prepare_beacon_proposer.boxed()) + .or(post_validator_register_validator.boxed()) .or(post_lighthouse_liveness.boxed()) .or(post_lighthouse_database_reconstruct.boxed()) .or(post_lighthouse_database_historical_blocks.boxed()) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 5f53a961560..2b0cfd7c41b 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -11,6 +11,7 @@ use eth2::{ types::*, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; +use execution_layer::test_utils::MockExecutionLayer; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; use lighthouse_network::{Enr, EnrExt, PeerId}; @@ -24,6 +25,7 @@ use task_executor::test_utils::TestRuntime; use tokio::sync::{mpsc, oneshot}; use tokio::time::Duration; use tree_hash::TreeHash; +use types::application_domain::ApplicationDomain; use types::{ AggregateSignature, BeaconState, BitList, Domain, EthSpec, Hash256, Keypair, MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot, @@ -64,6 +66,9 @@ struct ApiTester { network_rx: mpsc::UnboundedReceiver>, local_enr: Enr, external_peer_id: PeerId, + // This is never directly accessed, but adding it creates a payload cache, which we use in tests here. + #[allow(dead_code)] + mock_el: Option>, _runtime: TestRuntime, } @@ -80,6 +85,7 @@ impl ApiTester { .spec(spec.clone()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() + .mock_execution_layer() .build(); harness.advance_slot(); @@ -214,6 +220,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, + mock_el: harness.mock_execution_layer, _runtime: harness.runtime, } } @@ -293,6 +300,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, + mock_el: None, _runtime: harness.runtime, } } @@ -2226,6 +2234,66 @@ impl ApiTester { self } + pub async fn test_post_validator_register_validator(self) -> Self { + let mut registrations = vec![]; + let mut fee_recipients = vec![]; + + let fork = self.chain.head().unwrap().beacon_state.fork(); + + for (val_index, keypair) in self.validator_keypairs.iter().enumerate() { + let pubkey = keypair.pk.compress(); + let fee_recipient = Address::from_low_u64_be(val_index as u64); + + let data = ValidatorRegistrationData { + fee_recipient, + gas_limit: 0, + timestamp: 0, + pubkey, + }; + let domain = self.chain.spec.get_domain( + Epoch::new(0), + Domain::ApplicationMask(ApplicationDomain::Builder), + &fork, + Hash256::zero(), + ); + let message = data.signing_root(domain); + let signature = keypair.sk.sign(message); + + fee_recipients.push(fee_recipient); + registrations.push(SignedValidatorRegistrationData { + message: data, + signature, + }); + } + + self.client + .post_validator_register_validator(®istrations) + .await + .unwrap(); + + for (val_index, (_, fee_recipient)) in self + .chain + .head() + .unwrap() + .beacon_state + .validators() + .into_iter() + .zip(fee_recipients.into_iter()) + .enumerate() + { + let actual = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_suggested_fee_recipient(val_index as u64) + .await; + assert_eq!(actual, fee_recipient); + } + + self + } + #[cfg(target_os = "linux")] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); @@ -2973,6 +3041,14 @@ async fn get_validator_beacon_committee_subscriptions() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_validator() { + ApiTester::new() + .await + .test_post_validator_register_validator() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index ae091130f3f..69cd83db5ce 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -134,6 +134,7 @@ Typical Responses | 200 "DOMAIN_VOLUNTARY_EXIT": "0x04000000", "DOMAIN_SELECTION_PROOF": "0x05000000", "DOMAIN_AGGREGATE_AND_PROOF": "0x06000000", + "DOMAIN_APPLICATION_MASK": "0x00000001", "MAX_VALIDATORS_PER_COMMITTEE": "2048", "SLOTS_PER_EPOCH": "32", "EPOCHS_PER_ETH1_VOTING_PERIOD": "32", diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 3e965a2bf86..529bad1d852 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -929,6 +929,23 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST validator/register_validator` + pub async fn post_validator_register_validator( + &self, + registration_data: &[SignedValidatorRegistrationData], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("register_validator"); + + self.post(path, ®istration_data).await?; + + Ok(()) + } + /// `GET config/fork_schedule` pub async fn get_config_fork_schedule(&self) -> Result>, Error> { let mut path = self.eth_path(V1)?; diff --git a/consensus/types/src/application_domain.rs b/consensus/types/src/application_domain.rs new file mode 100644 index 00000000000..5e33f2dfd50 --- /dev/null +++ b/consensus/types/src/application_domain.rs @@ -0,0 +1,16 @@ +/// This value is an application index of 0 with the bitmask applied (so it's equivalent to the bit mask). +/// Little endian hex: 0x00000001, Binary: 1000000000000000000000000 +pub const APPLICATION_DOMAIN_BUILDER: u32 = 16777216; + +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum ApplicationDomain { + Builder, +} + +impl ApplicationDomain { + pub fn get_domain_constant(&self) -> u32 { + match self { + ApplicationDomain::Builder => APPLICATION_DOMAIN_BUILDER, + } + } +} diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index c283d4cb48c..8a69505a51f 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1,3 +1,4 @@ +use crate::application_domain::{ApplicationDomain, APPLICATION_DOMAIN_BUILDER}; use crate::*; use eth2_serde_utils::quoted_u64::MaybeQuoted; use int_to_bytes::int_to_bytes4; @@ -20,6 +21,7 @@ pub enum Domain { SyncCommittee, ContributionAndProof, SyncCommitteeSelectionProof, + ApplicationMask(ApplicationDomain), } /// Lighthouse's internal configuration struct. @@ -159,6 +161,11 @@ pub struct ChainSpec { pub attestation_subnet_count: u64, pub random_subnets_per_validator: u64, pub epochs_per_random_subnet_subscription: u64, + + /* + * Application params + */ + pub(crate) domain_application_mask: u32, } impl ChainSpec { @@ -326,6 +333,7 @@ impl ChainSpec { Domain::SyncCommittee => self.domain_sync_committee, Domain::ContributionAndProof => self.domain_contribution_and_proof, Domain::SyncCommitteeSelectionProof => self.domain_sync_committee_selection_proof, + Domain::ApplicationMask(application_domain) => application_domain.get_domain_constant(), } } @@ -353,6 +361,17 @@ impl ChainSpec { self.compute_domain(Domain::Deposit, self.genesis_fork_version, Hash256::zero()) } + // This should be updated to include the current fork and the genesis validators root, but discussion is ongoing: + // + // https://github.com/ethereum/builder-specs/issues/14 + pub fn get_builder_domain(&self) -> Hash256 { + self.compute_domain( + Domain::ApplicationMask(ApplicationDomain::Builder), + self.genesis_fork_version, + Hash256::zero(), + ) + } + /// Return the 32-byte fork data root for the `current_version` and `genesis_validators_root`. /// /// This is used primarily in signature domains to avoid collisions across forks/chains. @@ -565,6 +584,11 @@ impl ChainSpec { maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, epochs_per_random_subnet_subscription: 256, + + /* + * Application specific + */ + domain_application_mask: APPLICATION_DOMAIN_BUILDER, } } @@ -763,6 +787,11 @@ impl ChainSpec { maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, epochs_per_random_subnet_subscription: 256, + + /* + * Application specific + */ + domain_application_mask: APPLICATION_DOMAIN_BUILDER, } } } @@ -1119,6 +1148,27 @@ mod tests { &spec, ); test_domain(Domain::SyncCommittee, spec.domain_sync_committee, &spec); + + // The builder domain index is zero + let builder_domain_pre_mask = [0; 4]; + test_domain( + Domain::ApplicationMask(ApplicationDomain::Builder), + apply_bit_mask(builder_domain_pre_mask, &spec), + &spec, + ); + } + + fn apply_bit_mask(domain_bytes: [u8; 4], spec: &ChainSpec) -> u32 { + let mut domain = [0; 4]; + let mask_bytes = int_to_bytes4(spec.domain_application_mask); + + // Apply application bit mask + for (i, (domain_byte, mask_byte)) in domain_bytes.iter().zip(mask_bytes.iter()).enumerate() + { + domain[i] = domain_byte | mask_byte; + } + + u32::from_le_bytes(domain) } // Test that `fork_name_at_epoch` and `fork_epoch` are consistent. diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index f721e6c3bb7..8b3a753bd5d 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -69,6 +69,10 @@ impl ConfigAndPreset { "domain_aggregate_and_proof", u32_hex(spec.domain_aggregate_and_proof), ), + ( + "domain_application_mask", + u32_hex(spec.domain_application_mask), + ), ( "target_aggregators_per_committee", spec.target_aggregators_per_committee.to_string(), diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 22e429a58c2..ecfd77d7a4e 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -18,6 +18,7 @@ extern crate lazy_static; pub mod test_utils; pub mod aggregate_and_proof; +pub mod application_domain; pub mod attestation; pub mod attestation_data; pub mod attestation_duty; @@ -82,6 +83,7 @@ pub mod sync_committee_message; pub mod sync_selection_proof; pub mod sync_subnet_id; mod tree_hash_impls; +pub mod validator_registration_data; pub mod slot_data; #[cfg(feature = "sqlite")] @@ -157,6 +159,7 @@ pub use crate::sync_duty::SyncDuty; pub use crate::sync_selection_proof::SyncSelectionProof; pub use crate::sync_subnet_id::SyncSubnetId; pub use crate::validator::Validator; +pub use crate::validator_registration_data::*; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator_registration_data.rs new file mode 100644 index 00000000000..5a3450df081 --- /dev/null +++ b/consensus/types/src/validator_registration_data.rs @@ -0,0 +1,23 @@ +use crate::*; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use tree_hash_derive::TreeHash; + +/// Validator registration, for use in interacting with servers implementing the builder API. +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct SignedValidatorRegistrationData { + pub message: ValidatorRegistrationData, + pub signature: Signature, +} + +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode, TreeHash)] +pub struct ValidatorRegistrationData { + pub fee_recipient: Address, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub gas_limit: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub timestamp: u64, + pub pubkey: PublicKeyBytes, +} + +impl SignedRoot for ValidatorRegistrationData {} diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 800f988654b..e39e6515fc9 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -67,6 +67,7 @@ mod tests { impl SignedObject for SyncSelectionProof {} impl SignedObject for SyncCommitteeMessage {} impl SignedObject for SignedContributionAndProof {} + impl SignedObject for SignedValidatorRegistrationData {} /// A file format used by Web3Signer to discover and unlock keystores. #[derive(Serialize)] @@ -448,6 +449,18 @@ mod tests { } } + //TODO: remove this once the consensys web3signer includes the `validator_registration` method + #[allow(dead_code)] + fn get_validator_registration(pubkey: PublicKeyBytes) -> ValidatorRegistrationData { + let fee_recipient = Address::repeat_byte(42); + ValidatorRegistrationData { + fee_recipient, + gas_limit: 30_000_000, + timestamp: 100, + pubkey, + } + } + /// Test all the "base" (phase 0) types. async fn test_base_types(network: &str, listen_port: u16) { let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); @@ -499,6 +512,16 @@ mod tests { .await .unwrap() }) + //TODO: uncomment this once the consensys web3signer includes the `validator_registration` method + // + // .await + // .assert_signatures_match("validator_registration", |pubkey, validator_store| async move { + // let val_reg_data = get_validator_registration(pubkey); + // validator_store + // .sign_validator_registration_data(val_reg_data) + // .await + // .unwrap() + // }) .await; } @@ -575,6 +598,16 @@ mod tests { .unwrap() }, ) + //TODO: uncomment this once the consensys web3signer includes the `validator_registration` method + // + // .await + // .assert_signatures_match("validator_registration", |pubkey, validator_store| async move { + // let val_reg_data = get_validator_registration(pubkey); + // validator_store + // .sign_validator_registration_data(val_reg_data) + // .await + // .unwrap() + // }) .await; } diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index f405f1a2b3c..836aab4c1fb 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -85,6 +85,11 @@ lazy_static::lazy_static! { "Total count of attempted SyncSelectionProof signings", &["status"] ); + pub static ref SIGNED_VALIDATOR_REGISTRATIONS_TOTAL: Result = try_create_int_counter_vec( + "builder_validator_registrations_total", + "Total count of ValidatorRegistrationData signings", + &["status"] + ); pub static ref DUTIES_SERVICE_TIMES: Result = try_create_histogram_vec( "vc_duties_service_task_times_seconds", "Duration to perform duties service tasks", diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index ce35a00351f..5e45847598a 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -485,7 +485,10 @@ impl ProductionValidatorClient { self.preparation_service .clone() - .start_update_service(&self.context.eth2_config.spec) + .start_update_service( + self.config.private_tx_proposals, + &self.context.eth2_config.spec, + ) .map_err(|e| format!("Unable to start preparation service: {}", e))?; if let Some(doppelganger_service) = self.doppelganger_service.clone() { diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index b4b6caa05db..34201180c01 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -3,17 +3,28 @@ use crate::{ fee_recipient_file::FeeRecipientFile, validator_store::{DoppelgangerStatus, ValidatorStore}, }; +use bls::PublicKeyBytes; use environment::RuntimeContext; +use parking_lot::RwLock; use slog::{debug, error, info}; use slot_clock::SlotClock; +use std::collections::HashMap; +use std::hash::Hash; use std::ops::Deref; use std::sync::Arc; +use std::time::{SystemTime, UNIX_EPOCH}; use tokio::time::{sleep, Duration}; -use types::{Address, ChainSpec, EthSpec, ProposerPreparationData}; +use types::{ + Address, ChainSpec, EthSpec, ProposerPreparationData, SignedValidatorRegistrationData, + ValidatorRegistrationData, +}; /// Number of epochs before the Bellatrix hard fork to begin posting proposer preparations. const PROPOSER_PREPARATION_LOOKAHEAD_EPOCHS: u64 = 2; +/// Number of epochs to wait before re-submitting validator registration. +const EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION: u64 = 1; + /// Builds an `PreparationService`. pub struct PreparationServiceBuilder { validator_store: Option>>, @@ -83,6 +94,7 @@ impl PreparationServiceBuilder { .ok_or("Cannot build PreparationService without runtime_context")?, fee_recipient: self.fee_recipient, fee_recipient_file: self.fee_recipient_file, + validator_registration_cache: RwLock::new(HashMap::new()), }), }) } @@ -96,6 +108,32 @@ pub struct Inner { context: RuntimeContext, fee_recipient: Option
, fee_recipient_file: Option, + // Used to track unpublished validator registration changes. + validator_registration_cache: + RwLock>, +} + +#[derive(Hash, Eq, PartialEq, Debug, Clone)] +pub struct ValidatorRegistrationKey { + pub fee_recipient: Address, + pub gas_limit: u64, + pub pubkey: PublicKeyBytes, +} + +impl From for ValidatorRegistrationKey { + fn from(data: ValidatorRegistrationData) -> Self { + let ValidatorRegistrationData { + fee_recipient, + gas_limit, + timestamp: _, + pubkey, + } = data; + Self { + fee_recipient, + gas_limit, + pubkey, + } + } } /// Attempts to produce proposer preparations for all known validators at the beginning of each epoch. @@ -120,8 +158,19 @@ impl Deref for PreparationService { } impl PreparationService { + pub fn start_update_service( + self, + start_registration_service: bool, + spec: &ChainSpec, + ) -> Result<(), String> { + if start_registration_service { + self.clone().start_validator_registration_service(spec)?; + } + self.start_proposer_prepare_service(spec) + } + /// Starts the service which periodically produces proposer preparations. - pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> { + pub fn start_proposer_prepare_service(self, spec: &ChainSpec) -> Result<(), String> { let log = self.context.log().clone(); let slot_duration = Duration::from_secs(spec.seconds_per_slot); @@ -163,6 +212,41 @@ impl PreparationService { Ok(()) } + /// Starts the service which periodically sends connected beacon nodes validator registration information. + pub fn start_validator_registration_service(self, spec: &ChainSpec) -> Result<(), String> { + let log = self.context.log().clone(); + + info!( + log, + "Validator registration service started"; + ); + + let spec = spec.clone(); + let slot_duration = Duration::from_secs(spec.seconds_per_slot); + + let executor = self.context.executor.clone(); + + let validator_registration_fut = async move { + loop { + // Poll the endpoint immediately to ensure fee recipients are received. + if let Err(e) = self.register_validators(&spec).await { + error!(log,"Error during validator registration";"error" => ?e); + } + + // Wait one slot if the register validator request fails or if we should not publish at the current slot. + if let Some(duration_to_next_slot) = self.slot_clock.duration_to_next_slot() { + sleep(duration_to_next_slot).await; + } else { + error!(log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(slot_duration).await; + } + } + }; + executor.spawn(validator_registration_fut, "validator_registration_service"); + Ok(()) + } + /// Return `true` if the current slot is close to or past the Bellatrix fork epoch. /// /// This avoids spamming the BN with preparations before the Bellatrix fork epoch, which may @@ -188,6 +272,33 @@ impl PreparationService { } fn collect_preparation_data(&self, spec: &ChainSpec) -> Vec { + self.collect_data(spec, |_, validator_index, fee_recipient| { + ProposerPreparationData { + validator_index, + fee_recipient, + } + }) + } + + fn collect_validator_registration_keys( + &self, + spec: &ChainSpec, + ) -> Vec { + self.collect_data(spec, |pubkey, _, fee_recipient| { + ValidatorRegistrationKey { + fee_recipient, + //TODO(sean) this is geth's default, we should make this configurable and maybe have the default be dynamic. + // Discussion here: https://github.com/ethereum/builder-specs/issues/17 + gas_limit: 30_000_000, + pubkey, + } + }) + } + + fn collect_data(&self, spec: &ChainSpec, map_fn: G) -> Vec + where + G: Fn(PublicKeyBytes, u64, Address) -> U, + { let log = self.context.log(); let fee_recipient_file = self @@ -234,10 +345,7 @@ impl PreparationService { .or(self.fee_recipient); if let Some(fee_recipient) = fee_recipient { - Some(ProposerPreparationData { - validator_index, - fee_recipient, - }) + Some(map_fn(pubkey, validator_index, fee_recipient)) } else { if spec.bellatrix_fork_epoch.is_some() { error!( @@ -284,4 +392,116 @@ impl PreparationService { } Ok(()) } + + /// Register validators with builders, used in the blinded block proposal flow. + async fn register_validators(&self, spec: &ChainSpec) -> Result<(), String> { + let registration_keys = self.collect_validator_registration_keys(spec); + + let mut changed_keys = vec![]; + + // Need to scope this so the read lock is not held across an await point (I don't know why + // but the explicit `drop` is not enough). + { + let guard = self.validator_registration_cache.read(); + for key in registration_keys.iter() { + if !guard.contains_key(key) { + changed_keys.push(key.clone()); + } + } + drop(guard); + } + + // Check if any have changed or it's been `EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION`. + if let Some(slot) = self.slot_clock.now() { + if slot % (E::slots_per_epoch() * EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION) == 0 { + self.publish_validator_registration_data(registration_keys) + .await?; + } else if !changed_keys.is_empty() { + self.publish_validator_registration_data(changed_keys) + .await?; + } + } + + Ok(()) + } + + async fn publish_validator_registration_data( + &self, + registration_keys: Vec, + ) -> Result<(), String> { + let log = self.context.log(); + + let registration_data_len = registration_keys.len(); + let mut signed = Vec::with_capacity(registration_data_len); + + for key in registration_keys { + let cached_registration_opt = + self.validator_registration_cache.read().get(&key).cloned(); + + let signed_data = if let Some(signed_data) = cached_registration_opt { + signed_data + } else { + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("{e:?}"))? + .as_secs(); + + let ValidatorRegistrationKey { + fee_recipient, + gas_limit, + pubkey, + } = key.clone(); + + let signed_data = match self + .validator_store + .sign_validator_registration_data(ValidatorRegistrationData { + fee_recipient, + gas_limit, + timestamp, + pubkey, + }) + .await + { + Ok(data) => data, + Err(e) => { + error!(log, "Unable to sign validator registration data"; "error" => ?e, "pubkey" => ?pubkey); + continue; + } + }; + + self.validator_registration_cache + .write() + .insert(key, signed_data.clone()); + + signed_data + }; + signed.push(signed_data); + } + + if !signed.is_empty() { + let signed_ref = signed.as_slice(); + + match self + .beacon_nodes + .first_success(RequireSynced::Yes, |beacon_node| async move { + beacon_node + .post_validator_register_validator(signed_ref) + .await + }) + .await + { + Ok(()) => debug!( + log, + "Published validator registration"; + "count" => registration_data_len, + ), + Err(e) => error!( + log, + "Unable to publish validator registration"; + "error" => %e, + ), + } + } + Ok(()) + } } diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index 0daefc43c45..de69d990033 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -30,6 +30,7 @@ pub enum Error { ShuttingDown, TokioJoin(String), MergeForkNotSupported, + GenesisForkVersionRequired, } /// Enumerates all messages that can be signed by a validator. @@ -45,6 +46,7 @@ pub enum SignableMessage<'a, T: EthSpec, Payload: ExecPayload = FullPayload), + ValidatorRegistration(&'a ValidatorRegistrationData), } impl<'a, T: EthSpec, Payload: ExecPayload> SignableMessage<'a, T, Payload> { @@ -64,6 +66,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> SignableMessage<'a, T, Payload> { beacon_block_root, .. } => beacon_block_root.signing_root(domain), SignableMessage::SignedContributionAndProof(c) => c.signing_root(domain), + SignableMessage::ValidatorRegistration(v) => v.signing_root(domain), } } } @@ -129,6 +132,22 @@ impl SigningMethod { let signing_root = signable_message.signing_root(domain_hash); + let fork_info = Some(ForkInfo { + fork, + genesis_validators_root, + }); + + self.get_signature_from_root(signable_message, signing_root, executor, fork_info) + .await + } + + pub async fn get_signature_from_root>( + &self, + signable_message: SignableMessage<'_, T, Payload>, + signing_root: Hash256, + executor: &TaskExecutor, + fork_info: Option, + ) -> Result { match self { SigningMethod::LocalKeystore { voting_keypair, .. } => { let _timer = @@ -181,21 +200,21 @@ impl SigningMethod { SignableMessage::SignedContributionAndProof(c) => { Web3SignerObject::ContributionAndProof(c) } + SignableMessage::ValidatorRegistration(v) => { + Web3SignerObject::ValidatorRegistration(v) + } }; // Determine the Web3Signer message type. let message_type = object.message_type(); - // The `fork_info` field is not required for deposits since they sign across the - // genesis fork version. - let fork_info = if let Web3SignerObject::Deposit { .. } = &object { - None - } else { - Some(ForkInfo { - fork, - genesis_validators_root, - }) - }; + if matches!( + object, + Web3SignerObject::Deposit { .. } | Web3SignerObject::ValidatorRegistration(_) + ) && fork_info.is_some() + { + return Err(Error::GenesisForkVersionRequired); + } let request = SigningRequest { message_type, diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 9ac1655ccea..0ab37484ba7 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -17,6 +17,7 @@ pub enum MessageType { SyncCommitteeMessage, SyncCommitteeSelectionProof, SyncCommitteeContributionAndProof, + ValidatorRegistration, } #[derive(Debug, PartialEq, Copy, Clone, Serialize)] @@ -64,6 +65,7 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload> { }, SyncAggregatorSelectionData(&'a SyncAggregatorSelectionData), ContributionAndProof(&'a ContributionAndProof), + ValidatorRegistration(&'a ValidatorRegistrationData), } impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { @@ -93,6 +95,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { Web3SignerObject::ContributionAndProof(_) => { MessageType::SyncCommitteeContributionAndProof } + Web3SignerObject::ValidatorRegistration(_) => MessageType::ValidatorRegistration, } } } diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index b39ef9ef830..36ec5e89551 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -20,9 +20,9 @@ use types::{ attestation::Error as AttestationError, graffiti::GraffitiString, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, ExecPayload, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, - Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, Slot, - SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, - SyncSelectionProof, SyncSubnetId, + Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, + SignedValidatorRegistrationData, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, + SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, }; use validator_dir::ValidatorDir; @@ -524,6 +524,35 @@ impl ValidatorStore { } } + pub async fn sign_validator_registration_data( + &self, + validator_registration_data: ValidatorRegistrationData, + ) -> Result { + let domain_hash = self.spec.get_builder_domain(); + let signing_root = validator_registration_data.signing_root(domain_hash); + + let signing_method = + self.doppelganger_bypassed_signing_method(validator_registration_data.pubkey)?; + let signature = signing_method + .get_signature_from_root::>( + SignableMessage::ValidatorRegistration(&validator_registration_data), + signing_root, + &self.task_executor, + None, + ) + .await?; + + metrics::inc_counter_vec( + &metrics::SIGNED_VALIDATOR_REGISTRATIONS_TOTAL, + &[metrics::SUCCESS], + ); + + Ok(SignedValidatorRegistrationData { + message: validator_registration_data, + signature, + }) + } + /// Signs an `AggregateAndProof` for a given validator. /// /// The resulting `SignedAggregateAndProof` is sent on the aggregation channel and cannot be From d40c76e667dcc2db66478d96c8ce76aafdbb747d Mon Sep 17 00:00:00 2001 From: Divma Date: Thu, 30 Jun 2022 22:51:49 +0000 Subject: [PATCH 07/15] Fix clippy lints for rust 1.62 (#3300) ## Issue Addressed Fixes some new clippy lints after the last rust release ### Lints fixed for the curious: - [cast_abs_to_unsigned](https://rust-lang.github.io/rust-clippy/master/index.html#cast_abs_to_unsigned) - [map_identity](https://rust-lang.github.io/rust-clippy/master/index.html#map_identity) - [let_unit_value](https://rust-lang.github.io/rust-clippy/master/index.html#let_unit_value) - [crate_in_macro_def](https://rust-lang.github.io/rust-clippy/master/index.html#crate_in_macro_def) - [extra_unused_lifetimes](https://rust-lang.github.io/rust-clippy/master/index.html#extra_unused_lifetimes) - [format_push_string](https://rust-lang.github.io/rust-clippy/master/index.html#format_push_string) --- beacon_node/beacon_chain/src/beacon_chain.rs | 6 ++---- beacon_node/beacon_chain/src/block_verification.rs | 7 +++---- .../network/src/beacon_processor/worker/gossip_methods.rs | 4 ++-- .../network/src/subnet_service/attestation_subnets.rs | 2 +- beacon_node/src/config.rs | 4 +++- boot_node/src/lib.rs | 2 +- consensus/proto_array/src/fork_choice_test_definition.rs | 2 -- consensus/proto_array/src/proto_array.rs | 2 +- consensus/types/src/test_utils/macros.rs | 4 ++-- 9 files changed, 15 insertions(+), 18 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5d2b35727fc..568179a0626 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1079,12 +1079,10 @@ impl BeaconChain { } /// Apply a function to the canonical head without cloning it. - pub fn with_head( - &self, - f: impl FnOnce(&BeaconSnapshot) -> Result, - ) -> Result + pub fn with_head(&self, f: F) -> Result where E: From, + F: FnOnce(&BeaconSnapshot) -> Result, { let head_lock = self .canonical_head diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index c791a35f689..a6cd98c253c 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1717,14 +1717,13 @@ fn verify_header_signature( .get(header.message.proposer_index as usize) .cloned() .ok_or(BlockError::UnknownValidator(header.message.proposer_index))?; - let (fork, genesis_validators_root) = chain - .with_head(|head| { + let (fork, genesis_validators_root) = + chain.with_head::<_, BlockError, _>(|head| { Ok(( head.beacon_state.fork(), head.beacon_state.genesis_validators_root(), )) - }) - .map_err(|e: BlockError| e)?; + })?; if header.verify_signature::( &proposer_pubkey, diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index aa01841106f..f014af4c555 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -45,7 +45,7 @@ struct VerifiedUnaggregate { /// This implementation allows `Self` to be imported to fork choice and other functions on the /// `BeaconChain`. -impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedUnaggregate { +impl VerifiedAttestation for VerifiedUnaggregate { fn attestation(&self) -> &Attestation { &self.attestation } @@ -72,7 +72,7 @@ struct VerifiedAggregate { /// This implementation allows `Self` to be imported to fork choice and other functions on the /// `BeaconChain`. -impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedAggregate { +impl VerifiedAttestation for VerifiedAggregate { fn attestation(&self) -> &Attestation { &self.signed_aggregate.message.aggregate } diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index 2b0fe6f55ab..475bd7f17d7 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -623,7 +623,7 @@ impl Stream for AttestationService { // process any known validator expiries match self.known_validators.poll_next_unpin(cx) { Poll::Ready(Some(Ok(_validator_index))) => { - let _ = self.handle_known_validator_expiry(); + self.handle_known_validator_expiry(); } Poll::Ready(Some(Err(e))) => { error!(self.log, "Failed to check for random subnet cycles"; "error"=> e); diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 0421df34290..63cc9214ff0 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -11,6 +11,7 @@ use slog::{info, warn, Logger}; use std::cmp; use std::cmp::max; use std::fmt::Debug; +use std::fmt::Write; use std::fs; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; use std::path::{Path, PathBuf}; @@ -784,7 +785,8 @@ pub fn set_network_config( None }) { - addr.push_str(&format!(":{}", enr_udp_port)); + write!(addr, ":{}", enr_udp_port) + .map_err(|e| format!("Failed to write enr address {}", e))?; } else { return Err( "enr-udp-port must be set for node to be discoverable with dns address" diff --git a/boot_node/src/lib.rs b/boot_node/src/lib.rs index f4391f987a9..3d9dada0fdb 100644 --- a/boot_node/src/lib.rs +++ b/boot_node/src/lib.rs @@ -50,7 +50,7 @@ pub fn run( let logger = Logger::root(drain.fuse(), o!()); let _scope_guard = slog_scope::set_global_logger(logger); - let _log_guard = slog_stdlog::init_with_level(debug_level).unwrap(); + slog_stdlog::init_with_level(debug_level).unwrap(); let log = slog_scope::logger(); // Run the main function emitting any errors diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 2980c019e82..2be46cc5903 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -105,7 +105,6 @@ impl ForkChoiceTestDefinition { Hash256::zero(), &spec, ) - .map_err(|e| e) .unwrap_or_else(|e| { panic!("find_head op at index {} returned error {}", op_index, e) }); @@ -132,7 +131,6 @@ impl ForkChoiceTestDefinition { proposer_boost_root, &spec, ) - .map_err(|e| e) .unwrap_or_else(|e| { panic!("find_head op at index {} returned error {}", op_index, e) }); diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 3f7909553b2..acdb42897aa 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -240,7 +240,7 @@ impl ProtoArray { // not exist. node.weight = node .weight - .checked_sub(node_delta.abs() as u64) + .checked_sub(node_delta.unsigned_abs()) .ok_or(Error::DeltaOverflow(node_index))?; } else { node.weight = node diff --git a/consensus/types/src/test_utils/macros.rs b/consensus/types/src/test_utils/macros.rs index df449c712d6..1e275a5760e 100644 --- a/consensus/types/src/test_utils/macros.rs +++ b/consensus/types/src/test_utils/macros.rs @@ -13,8 +13,8 @@ macro_rules! ssz_tests { ($type: ty) => { #[test] pub fn test_ssz_round_trip() { - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use ssz::{ssz_encode, Decode}; + use $crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; let mut rng = XorShiftRng::from_seed([42; 16]); let original = <$type>::random_for_test(&mut rng); @@ -33,8 +33,8 @@ macro_rules! tree_hash_tests { ($type: ty) => { #[test] pub fn test_tree_hash_root() { - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use tree_hash::TreeHash; + use $crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; let mut rng = XorShiftRng::from_seed([42; 16]); let original = <$type>::random_for_test(&mut rng); From a7da0677d5db0bfa747f4a1d8d6135218a0983bf Mon Sep 17 00:00:00 2001 From: realbigsean Date: Fri, 1 Jul 2022 01:15:19 +0000 Subject: [PATCH 08/15] Remove builder redundancy (#3294) ## Issue Addressed This PR is a subset of the changes in #3134. Unstable will still not function correctly with the new builder spec once this is merged, #3134 should be used on testnets ## Proposed Changes - Removes redundancy in "builders" (servers implementing the builder spec) - Renames `payload-builder` flag to `builder` - Moves from old builder RPC API to new HTTP API, but does not implement the validator registration API (implemented in https://github.com/sigp/lighthouse/pull/3194) Co-authored-by: sean Co-authored-by: realbigsean --- Cargo.lock | 35 ++ Cargo.toml | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 9 +- beacon_node/beacon_chain/src/builder.rs | 4 +- .../beacon_chain/src/execution_payload.rs | 11 +- beacon_node/beacon_chain/src/test_utils.rs | 3 +- .../tests/payload_invalidation.rs | 2 +- beacon_node/builder_client/Cargo.toml | 12 + beacon_node/builder_client/src/lib.rs | 192 ++++++++++ beacon_node/client/src/builder.rs | 2 +- beacon_node/execution_layer/Cargo.toml | 1 + beacon_node/execution_layer/src/engine_api.rs | 32 +- .../execution_layer/src/engine_api/http.rs | 65 +--- beacon_node/execution_layer/src/engines.rs | 106 +---- beacon_node/execution_layer/src/lib.rs | 361 ++++++++++-------- beacon_node/execution_layer/src/metrics.rs | 1 + .../src/test_utils/mock_execution_layer.rs | 11 +- beacon_node/src/cli.rs | 5 +- beacon_node/src/config.rs | 6 +- common/eth2/src/lib.rs | 2 +- consensus/types/Cargo.toml | 1 + consensus/types/src/builder_bid.rs | 52 +++ consensus/types/src/lib.rs | 1 + lighthouse/tests/beacon_node.rs | 3 +- .../src/test_rig.rs | 20 +- 25 files changed, 564 insertions(+), 374 deletions(-) create mode 100644 beacon_node/builder_client/Cargo.toml create mode 100644 beacon_node/builder_client/src/lib.rs create mode 100644 consensus/types/src/builder_bid.rs diff --git a/Cargo.lock b/Cargo.lock index 3bdce9138ef..1e9b5b42394 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -537,6 +537,17 @@ dependencies = [ "safemem", ] +[[package]] +name = "builder_client" +version = "0.1.0" +dependencies = [ + "eth2", + "reqwest", + "sensitive_url", + "serde", + "serde_json", +] + [[package]] name = "bumpalo" version = "3.10.0" @@ -1876,6 +1887,7 @@ name = "execution_layer" version = "0.1.0" dependencies = [ "async-trait", + "builder_client", "bytes", "environment", "eth2", @@ -5505,6 +5517,28 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_with" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" +dependencies = [ + "serde", + "serde_with_macros", +] + +[[package]] +name = "serde_with_macros" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "serde_yaml" version = "0.8.24" @@ -6655,6 +6689,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", + "serde_with", "serde_yaml", "slog", "smallvec", diff --git a/Cargo.toml b/Cargo.toml index c79859d0a78..819f92d99ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ members = [ "beacon_node", "beacon_node/beacon_chain", + "beacon_node/builder_client", "beacon_node/client", "beacon_node/eth1", "beacon_node/lighthouse_network", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 568179a0626..a64c9718755 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -334,7 +334,7 @@ pub struct BeaconChain { /// Provides information from the Ethereum 1 (PoW) chain. pub eth1_chain: Option>, /// Interfaces with the execution client. - pub execution_layer: Option, + pub execution_layer: Option>, /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. pub(crate) canonical_head: TimeoutRwLock>, /// The root of the genesis block. @@ -3216,6 +3216,11 @@ impl BeaconChain { let slot = state.slot(); let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; + let pubkey_opt = state + .validators() + .get(proposer_index as usize) + .map(|v| v.pubkey); + // Closure to fetch a sync aggregate in cases where it is required. let get_sync_aggregate = || -> Result, BlockProductionError> { Ok(self @@ -3274,7 +3279,7 @@ impl BeaconChain { BeaconState::Merge(_) => { let sync_aggregate = get_sync_aggregate()?; let execution_payload = - get_execution_payload::(self, &state, proposer_index)?; + get_execution_payload::(self, &state, proposer_index, pubkey_opt)?; BeaconBlock::Merge(BeaconBlockMerge { slot, proposer_index, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 361246b4d38..87f94161585 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -77,7 +77,7 @@ pub struct BeaconChainBuilder { >, op_pool: Option>, eth1_chain: Option>, - execution_layer: Option, + execution_layer: Option>, event_handler: Option>, slot_clock: Option, shutdown_sender: Option>, @@ -481,7 +481,7 @@ where } /// Sets the `BeaconChain` execution layer. - pub fn execution_layer(mut self, execution_layer: Option) -> Self { + pub fn execution_layer(mut self, execution_layer: Option>) -> Self { self.execution_layer = execution_layer; self } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 08e4cd41efd..7085fc6500f 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -247,9 +247,10 @@ pub fn get_execution_payload, state: &BeaconState, proposer_index: u64, + pubkey: Option, ) -> Result { Ok( - prepare_execution_payload_blocking::(chain, state, proposer_index)? + prepare_execution_payload_blocking::(chain, state, proposer_index, pubkey)? .unwrap_or_default(), ) } @@ -259,6 +260,7 @@ pub fn prepare_execution_payload_blocking, state: &BeaconState, proposer_index: u64, + pubkey: Option, ) -> Result, BlockProductionError> { let execution_layer = chain .execution_layer @@ -267,7 +269,7 @@ pub fn prepare_execution_payload_blocking(chain, state, proposer_index).await + prepare_execution_payload::(chain, state, proposer_index, pubkey).await }) .map_err(BlockProductionError::BlockingFailed)? } @@ -290,6 +292,7 @@ pub async fn prepare_execution_payload, state: &BeaconState, proposer_index: u64, + pubkey: Option, ) -> Result, BlockProductionError> { let spec = &chain.spec; let execution_layer = chain @@ -345,12 +348,14 @@ pub async fn prepare_execution_payload( + .get_payload::( parent_hash, timestamp, random, finalized_block_hash.unwrap_or_else(ExecutionBlockHash::zero), proposer_index, + pubkey, + state.slot(), ) .await .map_err(BlockProductionError::GetPayloadFailed)?; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 69ed413fd40..980de25cf3c 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -147,7 +147,7 @@ pub struct Builder { store: Option>>, initial_mutator: Option>, store_mutator: Option>, - execution_layer: Option, + execution_layer: Option>, mock_execution_layer: Option>, runtime: TestRuntime, log: Logger, @@ -361,6 +361,7 @@ where DEFAULT_TERMINAL_BLOCK, spec.terminal_block_hash, spec.terminal_block_hash_activation_epoch, + None, ); self.execution_layer = Some(mock.el.clone()); self.mock_execution_layer = Some(mock); diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 1aa9844a351..2a48a4b6911 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -64,7 +64,7 @@ impl InvalidPayloadRig { self } - fn execution_layer(&self) -> ExecutionLayer { + fn execution_layer(&self) -> ExecutionLayer { self.harness.chain.execution_layer.clone().unwrap() } diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml new file mode 100644 index 00000000000..c4d21c59ab8 --- /dev/null +++ b/beacon_node/builder_client/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "builder_client" +version = "0.1.0" +edition = "2021" +authors = ["Sean Anderson "] + +[dependencies] +reqwest = { version = "0.11.0", features = ["json","stream"] } +sensitive_url = { path = "../../common/sensitive_url" } +eth2 = { path = "../../common/eth2" } +serde = { version = "1.0.116", features = ["derive"] } +serde_json = "1.0.58" \ No newline at end of file diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs new file mode 100644 index 00000000000..500f5aa9ffe --- /dev/null +++ b/beacon_node/builder_client/src/lib.rs @@ -0,0 +1,192 @@ +use eth2::ok_or_error; +use eth2::types::builder_bid::SignedBuilderBid; +use eth2::types::{ + BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, ExecutionPayload, + ForkVersionedResponse, PublicKeyBytes, SignedBeaconBlock, SignedValidatorRegistrationData, + Slot, +}; +pub use eth2::Error; +use reqwest::{IntoUrl, Response}; +use sensitive_url::SensitiveUrl; +use serde::de::DeserializeOwned; +use serde::Serialize; +use std::time::Duration; + +pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 500; + +#[derive(Clone)] +pub struct Timeouts { + get_header: Duration, +} + +impl Default for Timeouts { + fn default() -> Self { + Self { + get_header: Duration::from_millis(DEFAULT_GET_HEADER_TIMEOUT_MILLIS), + } + } +} + +#[derive(Clone)] +pub struct BuilderHttpClient { + client: reqwest::Client, + server: SensitiveUrl, + timeouts: Timeouts, +} + +impl BuilderHttpClient { + pub fn new(server: SensitiveUrl) -> Result { + Ok(Self { + client: reqwest::Client::new(), + server, + timeouts: Timeouts::default(), + }) + } + + pub fn new_with_timeouts(server: SensitiveUrl, timeouts: Timeouts) -> Result { + Ok(Self { + client: reqwest::Client::new(), + server, + timeouts, + }) + } + + async fn get(&self, url: U) -> Result { + self.get_response_with_timeout(url, None) + .await? + .json() + .await + .map_err(Error::Reqwest) + } + + async fn get_with_timeout( + &self, + url: U, + timeout: Duration, + ) -> Result { + self.get_response_with_timeout(url, Some(timeout)) + .await? + .json() + .await + .map_err(Error::Reqwest) + } + + /// Perform a HTTP GET request, returning the `Response` for further processing. + async fn get_response_with_timeout( + &self, + url: U, + timeout: Option, + ) -> Result { + let mut builder = self.client.get(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder.send().await.map_err(Error::Reqwest)?; + ok_or_error(response).await + } + + /// Generic POST function supporting arbitrary responses and timeouts. + async fn post_generic( + &self, + url: U, + body: &T, + timeout: Option, + ) -> Result { + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder.json(body).send().await?; + ok_or_error(response).await + } + + async fn post_with_raw_response( + &self, + url: U, + body: &T, + ) -> Result { + let response = self + .client + .post(url) + .json(body) + .send() + .await + .map_err(Error::Reqwest)?; + ok_or_error(response).await + } + + /// `POST /eth/v1/builder/validators` + pub async fn post_builder_validators( + &self, + validator: &[SignedValidatorRegistrationData], + ) -> Result<(), Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("builder") + .push("validators"); + + self.post_generic(path, &validator, None).await?; + Ok(()) + } + + /// `POST /eth/v1/builder/blinded_blocks` + pub async fn post_builder_blinded_blocks( + &self, + blinded_block: &SignedBeaconBlock>, + ) -> Result>, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("builder") + .push("blinded_blocks"); + + Ok(self + .post_with_raw_response(path, &blinded_block) + .await? + .json() + .await?) + } + + /// `GET /eth/v1/builder/header` + pub async fn get_builder_header>( + &self, + slot: Slot, + parent_hash: ExecutionBlockHash, + pubkey: &PublicKeyBytes, + ) -> Result>, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("builder") + .push("header") + .push(slot.to_string().as_str()) + .push(format!("{parent_hash:?}").as_str()) + .push(pubkey.as_hex_string().as_str()); + + self.get_with_timeout(path, self.timeouts.get_header).await + } + + /// `GET /eth/v1/builder/status` + pub async fn get_builder_status(&self) -> Result<(), Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("builder") + .push("status"); + + self.get(path).await + } +} diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index a6124bdfadc..95ba1b56578 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -724,7 +724,7 @@ where execution_layer.spawn_watchdog_routine(beacon_chain.slot_clock.clone()); // Spawn a routine that removes expired proposer preparations. - execution_layer.spawn_clean_proposer_caches_routine::( + execution_layer.spawn_clean_proposer_caches_routine::( beacon_chain.slot_clock.clone(), ); diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index dbd63246803..c181c190509 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -38,3 +38,4 @@ zeroize = { version = "1.4.2", features = ["zeroize_derive"] } lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lazy_static = "1.4.0" ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } +builder_client = { path = "../builder_client" } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 5f3edb78bfd..a1e769e3e35 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,11 +1,9 @@ use crate::engines::ForkChoiceState; -use async_trait::async_trait; pub use ethers_core::types::Transaction; use http::deposit_methods::RpcError; pub use json_structures::TransitionConfigurationV1; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; -use slog::Logger; pub use types::{ Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, FixedVector, Hash256, Uint256, VariableList, @@ -28,10 +26,7 @@ pub enum Error { InvalidExecutePayloadResponse(&'static str), JsonRpc(RpcError), Json(serde_json::Error), - ServerMessage { - code: i64, - message: String, - }, + ServerMessage { code: i64, message: String }, Eip155Failure, IsSyncing, ExecutionBlockNotFound(ExecutionBlockHash), @@ -40,15 +35,9 @@ pub enum Error { PayloadIdUnavailable, TransitionConfigurationMismatch, PayloadConversionLogicFlaw, - InvalidBuilderQuery, - MissingPayloadId { - parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, - }, DeserializeTransaction(ssz_types::Error), DeserializeTransactions(ssz_types::Error), + BuilderApi(builder_client::Error), } impl From for Error { @@ -76,19 +65,14 @@ impl From for Error { } } -pub struct EngineApi; -pub struct BuilderApi; - -#[async_trait] -pub trait Builder { - async fn notify_forkchoice_updated( - &self, - forkchoice_state: ForkChoiceState, - payload_attributes: Option, - log: &Logger, - ) -> Result; +impl From for Error { + fn from(e: builder_client::Error) -> Self { + Error::BuilderApi(e) + } } +pub struct EngineApi; + #[derive(Clone, Copy, Debug, PartialEq)] pub enum PayloadStatusV1Status { Valid, diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 157f9a3054d..832771460e5 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -10,7 +10,7 @@ use serde_json::json; use std::marker::PhantomData; use std::time::Duration; -use types::{BlindedPayload, EthSpec, ExecutionPayloadHeader, SignedBeaconBlock}; +use types::EthSpec; pub use deposit_log::{DepositLog, Log}; pub use reqwest::Client; @@ -43,12 +43,6 @@ pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_millis(500); -pub const BUILDER_GET_PAYLOAD_HEADER_V1: &str = "builder_getPayloadHeaderV1"; -pub const BUILDER_GET_PAYLOAD_HEADER_TIMEOUT: Duration = Duration::from_secs(2); - -pub const BUILDER_PROPOSE_BLINDED_BLOCK_V1: &str = "builder_proposeBlindedBlockV1"; -pub const BUILDER_PROPOSE_BLINDED_BLOCK_TIMEOUT: Duration = Duration::from_secs(2); - /// This error is returned during a `chainId` call by Geth. pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; @@ -714,63 +708,6 @@ impl HttpJsonRpc { } } -impl HttpJsonRpc { - pub async fn get_payload_header_v1( - &self, - payload_id: PayloadId, - ) -> Result, Error> { - let params = json!([JsonPayloadIdRequest::from(payload_id)]); - - let response: JsonExecutionPayloadHeaderV1 = self - .rpc_request( - BUILDER_GET_PAYLOAD_HEADER_V1, - params, - BUILDER_GET_PAYLOAD_HEADER_TIMEOUT, - ) - .await?; - - Ok(response.into()) - } - - pub async fn forkchoice_updated_v1( - &self, - forkchoice_state: ForkChoiceState, - payload_attributes: Option, - ) -> Result { - let params = json!([ - JsonForkChoiceStateV1::from(forkchoice_state), - payload_attributes.map(JsonPayloadAttributesV1::from) - ]); - - let response: JsonForkchoiceUpdatedV1Response = self - .rpc_request( - ENGINE_FORKCHOICE_UPDATED_V1, - params, - ENGINE_FORKCHOICE_UPDATED_TIMEOUT, - ) - .await?; - - Ok(response.into()) - } - - pub async fn propose_blinded_block_v1( - &self, - block: SignedBeaconBlock>, - ) -> Result, Error> { - let params = json!([block]); - - let response: JsonExecutionPayloadV1 = self - .rpc_request( - BUILDER_PROPOSE_BLINDED_BLOCK_V1, - params, - BUILDER_PROPOSE_BLINDED_BLOCK_TIMEOUT, - ) - .await?; - - Ok(response.into()) - } -} - #[cfg(test)] mod test { use super::auth::JwtKey; diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index d3c4d0e421a..88c94162f82 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -1,12 +1,9 @@ //! Provides generic behaviour for multiple execution engines, specifically fallback behaviour. use crate::engine_api::{ - Builder, EngineApi, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, - PayloadId, + EngineApi, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, }; -use crate::{BuilderApi, HttpJsonRpc}; -use async_trait::async_trait; -use futures::future::join_all; +use crate::HttpJsonRpc; use lru::LruCache; use slog::{crit, debug, info, warn, Logger}; use std::future::Future; @@ -97,9 +94,8 @@ impl Engine { } } -#[async_trait] -impl Builder for Engine { - async fn notify_forkchoice_updated( +impl Engine { + pub async fn notify_forkchoice_updated( &self, forkchoice_state: ForkChoiceState, payload_attributes: Option, @@ -128,34 +124,6 @@ impl Builder for Engine { } } -#[async_trait] -impl Builder for Engine { - async fn notify_forkchoice_updated( - &self, - forkchoice_state: ForkChoiceState, - pa: Option, - log: &Logger, - ) -> Result { - let payload_attributes = pa.ok_or(EngineApiError::InvalidBuilderQuery)?; - let response = self - .api - .forkchoice_updated_v1(forkchoice_state, Some(payload_attributes)) - .await?; - - if let Some(payload_id) = response.payload_id { - let key = PayloadIdCacheKey::new(&forkchoice_state, &payload_attributes); - self.payload_id_cache.lock().await.put(key, payload_id); - } else { - warn!( - log, - "Builder should have returned a payload_id for attributes {:?}", payload_attributes - ); - } - - Ok(response) - } -} - // This structure used to hold multiple execution engines managed in a fallback manner. This // functionality has been removed following https://github.com/sigp/lighthouse/issues/3118 and this // struct will likely be removed in the future. @@ -165,15 +133,11 @@ pub struct Engines { pub log: Logger, } -pub struct Builders { - pub builders: Vec>, - pub log: Logger, -} - #[derive(Debug)] pub enum EngineError { Offline { id: String }, Api { id: String, error: EngineApiError }, + BuilderApi { error: EngineApiError }, Auth { id: String }, } @@ -422,66 +386,6 @@ impl Engines { } } -impl Builders { - pub async fn first_success_without_retry<'a, F, G, H>( - &'a self, - func: F, - ) -> Result> - where - F: Fn(&'a Engine) -> G, - G: Future>, - { - let mut errors = vec![]; - - for builder in &self.builders { - match func(builder).await { - Ok(result) => return Ok(result), - Err(error) => { - debug!( - self.log, - "Builder call failed"; - "error" => ?error, - "id" => &builder.id - ); - errors.push(EngineError::Api { - id: builder.id.clone(), - error, - }) - } - } - } - - Err(errors) - } - - pub async fn broadcast_without_retry<'a, F, G, H>( - &'a self, - func: F, - ) -> Vec> - where - F: Fn(&'a Engine) -> G, - G: Future>, - { - let func = &func; - let futures = self.builders.iter().map(|engine| async move { - func(engine).await.map_err(|error| { - debug!( - self.log, - "Builder call failed"; - "error" => ?error, - "id" => &engine.id - ); - EngineError::Api { - id: engine.id.clone(), - error, - } - }) - }); - - join_all(futures).await - } -} - impl PayloadIdCacheKey { fn new(state: &ForkChoiceState, attributes: &PayloadAttributes) -> Self { Self { diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 4b298876756..156382c4812 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -4,9 +4,8 @@ //! This crate only provides useful functionality for "The Merge", it does not provide any of the //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. -use crate::engine_api::Builder; -use crate::engines::Builders; use auth::{strip_prefix, Auth, JwtKey}; +use builder_client::BuilderHttpClient; use engine_api::Error as ApiError; pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; @@ -20,7 +19,6 @@ use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::HashMap; -use std::convert::TryInto; use std::future::Future; use std::io::Write; use std::path::PathBuf; @@ -33,7 +31,7 @@ use tokio::{ }; use types::{ BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, - ProposerPreparationData, SignedBeaconBlock, Slot, + ProposerPreparationData, PublicKeyBytes, SignedBeaconBlock, Slot, }; mod engine_api; @@ -69,6 +67,7 @@ pub enum Error { NoEngines, NoPayloadBuilder, ApiError(ApiError), + Builder(builder_client::Error), EngineErrors(Vec), NotSynced, ShuttingDown, @@ -102,15 +101,16 @@ pub struct Proposer { payload_attributes: PayloadAttributes, } -struct Inner { +struct Inner { engines: Engines, - builders: Builders, + builder: Option, execution_engine_forkchoice_lock: Mutex<()>, suggested_fee_recipient: Option
, proposer_preparation_data: Mutex>, execution_blocks: Mutex>, proposers: RwLock>, executor: TaskExecutor, + phantom: std::marker::PhantomData, log: Logger, } @@ -119,7 +119,7 @@ pub struct Config { /// Endpoint urls for EL nodes that are running the engine api. pub execution_endpoints: Vec, /// Endpoint urls for services providing the builder api. - pub builder_endpoints: Vec, + pub builder_url: Option, /// JWT secrets for the above endpoints running the engine api. pub secret_files: Vec, /// The default fee recipient to use on the beacon node if none if provided from @@ -143,16 +143,16 @@ pub struct Config { /// /// The fallback nodes have an ordering. The first supplied will be the first contacted, and so on. #[derive(Clone)] -pub struct ExecutionLayer { - inner: Arc, +pub struct ExecutionLayer { + inner: Arc>, } -impl ExecutionLayer { +impl ExecutionLayer { /// Instantiate `Self` with Execution engines specified using `Config`, all using the JSON-RPC via HTTP. pub fn from_config(config: Config, executor: TaskExecutor, log: Logger) -> Result { let Config { execution_endpoints: urls, - builder_endpoints: builder_urls, + builder_url, secret_files, suggested_fee_recipient, jwt_id, @@ -208,14 +208,9 @@ impl ExecutionLayer { Engine::::new(id, api) }; - let builders: Vec> = builder_urls - .into_iter() - .map(|url| { - let id = url.to_string(); - let api = HttpJsonRpc::::new(url)?; - Ok(Engine::::new(id, api)) - }) - .collect::>()?; + let builder = builder_url + .map(|url| BuilderHttpClient::new(url).map_err(Error::Builder)) + .transpose()?; let inner = Inner { engines: Engines { @@ -223,16 +218,14 @@ impl ExecutionLayer { latest_forkchoice_state: <_>::default(), log: log.clone(), }, - builders: Builders { - builders, - log: log.clone(), - }, + builder, execution_engine_forkchoice_lock: <_>::default(), suggested_fee_recipient, proposer_preparation_data: Mutex::new(HashMap::new()), proposers: RwLock::new(HashMap::new()), execution_blocks: Mutex::new(LruCache::new(EXECUTION_BLOCKS_LRU_CACHE_SIZE)), executor, + phantom: std::marker::PhantomData, log, }; @@ -242,13 +235,13 @@ impl ExecutionLayer { } } -impl ExecutionLayer { +impl ExecutionLayer { fn engines(&self) -> &Engines { &self.inner.engines } - fn builders(&self) -> &Builders { - &self.inner.builders + pub fn builder(&self) -> &Option { + &self.inner.builder } pub fn executor(&self) -> &TaskExecutor { @@ -282,9 +275,9 @@ impl ExecutionLayer { } /// Convenience function to allow calling async functions in a non-async context. - pub fn block_on<'a, T, U, V>(&'a self, generate_future: T) -> Result + pub fn block_on<'a, F, U, V>(&'a self, generate_future: F) -> Result where - T: Fn(&'a Self) -> U, + F: Fn(&'a Self) -> U, U: Future>, { let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; @@ -296,9 +289,9 @@ impl ExecutionLayer { /// /// The function is "generic" since it does not enforce a particular return type on /// `generate_future`. - pub fn block_on_generic<'a, T, U, V>(&'a self, generate_future: T) -> Result + pub fn block_on_generic<'a, F, U, V>(&'a self, generate_future: F) -> Result where - T: Fn(&'a Self) -> U, + F: Fn(&'a Self) -> U, U: Future, { let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; @@ -307,9 +300,9 @@ impl ExecutionLayer { } /// Convenience function to allow spawning a task without waiting for the result. - pub fn spawn(&self, generate_future: T, name: &'static str) + pub fn spawn(&self, generate_future: F, name: &'static str) where - T: FnOnce(Self) -> U, + F: FnOnce(Self) -> U, U: Future + Send + 'static, { self.executor().spawn(generate_future(self.clone()), name); @@ -317,12 +310,12 @@ impl ExecutionLayer { /// Spawns a routine which attempts to keep the execution engines online. pub fn spawn_watchdog_routine(&self, slot_clock: S) { - let watchdog = |el: ExecutionLayer| async move { + let watchdog = |el: ExecutionLayer| async move { // Run one task immediately. el.watchdog_task().await; let recurring_task = - |el: ExecutionLayer, now: Instant, duration_to_next_slot: Duration| async move { + |el: ExecutionLayer, now: Instant, duration_to_next_slot: Duration| async move { // We run the task three times per slot. // // The interval between each task is 1/3rd of the slot duration. This matches nicely @@ -377,11 +370,8 @@ impl ExecutionLayer { } /// Spawns a routine which cleans the cached proposer data periodically. - pub fn spawn_clean_proposer_caches_routine( - &self, - slot_clock: S, - ) { - let preparation_cleaner = |el: ExecutionLayer| async move { + pub fn spawn_clean_proposer_caches_routine(&self, slot_clock: S) { + let preparation_cleaner = |el: ExecutionLayer| async move { // Start the loop to periodically clean proposer preparation cache. loop { if let Some(duration_to_next_epoch) = @@ -395,7 +385,7 @@ impl ExecutionLayer { .map(|slot| slot.epoch(T::slots_per_epoch())) { Some(current_epoch) => el - .clean_proposer_caches::(current_epoch) + .clean_proposer_caches(current_epoch) .await .map_err(|e| { error!( @@ -420,7 +410,7 @@ impl ExecutionLayer { /// Spawns a routine that polls the `exchange_transition_configuration` endpoint. pub fn spawn_transition_configuration_poll(&self, spec: ChainSpec) { - let routine = |el: ExecutionLayer| async move { + let routine = |el: ExecutionLayer| async move { loop { if let Err(e) = el.exchange_transition_configuration(&spec).await { error!( @@ -454,7 +444,7 @@ impl ExecutionLayer { } /// Updates the proposer preparation data provided by validators - async fn update_proposer_preparation( + pub async fn update_proposer_preparation( &self, update_epoch: Epoch, preparation_data: &[ProposerPreparationData], @@ -476,7 +466,7 @@ impl ExecutionLayer { } /// Removes expired entries from proposer_preparation_data and proposers caches - async fn clean_proposer_caches(&self, current_epoch: Epoch) -> Result<(), Error> { + async fn clean_proposer_caches(&self, current_epoch: Epoch) -> Result<(), Error> { let mut proposer_preparation_data = self.proposer_preparation_data().await; // Keep all entries that have been updated in the last 2 epochs @@ -561,104 +551,164 @@ impl ExecutionLayer { /// /// The result will be returned from the first node that returns successfully. No more nodes /// will be contacted. - pub async fn get_payload>( + #[allow(clippy::too_many_arguments)] + pub async fn get_payload>( &self, parent_hash: ExecutionBlockHash, timestamp: u64, prev_randao: Hash256, finalized_block_hash: ExecutionBlockHash, proposer_index: u64, + pubkey: Option, + slot: Slot, ) -> Result { - let _timer = metrics::start_timer_vec( - &metrics::EXECUTION_LAYER_REQUEST_TIMES, - &[metrics::GET_PAYLOAD], - ); - let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; match Payload::block_type() { BlockType::Blinded => { - debug!( - self.log(), - "Issuing builder_getPayloadHeader"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "prev_randao" => ?prev_randao, - "timestamp" => timestamp, - "parent_hash" => ?parent_hash, + let _timer = metrics::start_timer_vec( + &metrics::EXECUTION_LAYER_REQUEST_TIMES, + &[metrics::GET_BLINDED_PAYLOAD], ); - self.builders() - .first_success_without_retry(|engine| async move { - let payload_id = engine - .get_payload_id( - parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, - ) - .await - .ok_or(ApiError::MissingPayloadId { - parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, - })?; - engine - .api - .get_payload_header_v1::(payload_id) - .await? - .try_into() - .map_err(|_| ApiError::PayloadConversionLogicFlaw) - }) - .await - .map_err(Error::EngineErrors) + self.get_blinded_payload( + parent_hash, + timestamp, + prev_randao, + finalized_block_hash, + suggested_fee_recipient, + pubkey, + slot, + ) + .await } BlockType::Full => { - debug!( + let _timer = metrics::start_timer_vec( + &metrics::EXECUTION_LAYER_REQUEST_TIMES, + &[metrics::GET_PAYLOAD], + ); + self.get_full_payload( + parent_hash, + timestamp, + prev_randao, + finalized_block_hash, + suggested_fee_recipient, + ) + .await + } + } + } + + #[allow(clippy::too_many_arguments)] + async fn get_blinded_payload>( + &self, + parent_hash: ExecutionBlockHash, + timestamp: u64, + prev_randao: Hash256, + finalized_block_hash: ExecutionBlockHash, + suggested_fee_recipient: Address, + pubkey_opt: Option, + slot: Slot, + ) -> Result { + //FIXME(sean) fallback logic included in PR #3134 + + // Don't attempt to outsource payload construction until after the merge transition has been + // finalized. We want to be conservative with payload construction until then. + if let (Some(builder), Some(pubkey)) = (self.builder(), pubkey_opt) { + if finalized_block_hash != ExecutionBlockHash::zero() { + info!( self.log(), - "Issuing engine_getPayload"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "prev_randao" => ?prev_randao, - "timestamp" => timestamp, + "Requesting blinded header from connected builder"; + "slot" => ?slot, + "pubkey" => ?pubkey, "parent_hash" => ?parent_hash, ); - self.engines() - .first_success(|engine| async move { - let payload_id = if let Some(id) = engine - .get_payload_id( - parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, - ) - .await - { - // The payload id has been cached for this engine. - metrics::inc_counter_vec( - &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, - &[metrics::HIT], - ); - id - } else { - // The payload id has *not* been cached for this engine. Trigger an artificial - // fork choice update to retrieve a payload ID. - // - // TODO(merge): a better algorithm might try to favour a node that already had a - // cached payload id, since a payload that has had more time to produce is - // likely to be more profitable. - metrics::inc_counter_vec( - &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, - &[metrics::MISS], - ); - let fork_choice_state = ForkChoiceState { - head_block_hash: parent_hash, - safe_block_hash: parent_hash, - finalized_block_hash, - }; - let payload_attributes = PayloadAttributes { - timestamp, - prev_randao, - suggested_fee_recipient, - }; + return builder + .get_builder_header::(slot, parent_hash, &pubkey) + .await + .map(|d| d.data.message.header) + .map_err(Error::Builder); + } + } + self.get_full_payload::( + parent_hash, + timestamp, + prev_randao, + finalized_block_hash, + suggested_fee_recipient, + ) + .await + } + + /// Get a full payload without caching its result in the execution layer's payload cache. + async fn get_full_payload>( + &self, + parent_hash: ExecutionBlockHash, + timestamp: u64, + prev_randao: Hash256, + finalized_block_hash: ExecutionBlockHash, + suggested_fee_recipient: Address, + ) -> Result { + self.get_full_payload_with( + parent_hash, + timestamp, + prev_randao, + finalized_block_hash, + suggested_fee_recipient, + noop, + ) + .await + } + + async fn get_full_payload_with>( + &self, + parent_hash: ExecutionBlockHash, + timestamp: u64, + prev_randao: Hash256, + finalized_block_hash: ExecutionBlockHash, + suggested_fee_recipient: Address, + f: fn(&ExecutionLayer, &ExecutionPayload) -> Option>, + ) -> Result { + debug!( + self.log(), + "Issuing engine_getPayload"; + "suggested_fee_recipient" => ?suggested_fee_recipient, + "prev_randao" => ?prev_randao, + "timestamp" => timestamp, + "parent_hash" => ?parent_hash, + ); + self.engines() + .first_success(|engine| async move { + let payload_id = if let Some(id) = engine + .get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient) + .await + { + // The payload id has been cached for this engine. + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, + &[metrics::HIT], + ); + id + } else { + // The payload id has *not* been cached for this engine. Trigger an artificial + // fork choice update to retrieve a payload ID. + // + // TODO(merge): a better algorithm might try to favour a node that already had a + // cached payload id, since a payload that has had more time to produce is + // likely to be more profitable. + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, + &[metrics::MISS], + ); + let fork_choice_state = ForkChoiceState { + head_block_hash: parent_hash, + safe_block_hash: parent_hash, + finalized_block_hash, + }; + let payload_attributes = PayloadAttributes { + timestamp, + prev_randao, + suggested_fee_recipient, + }; let response = engine .notify_forkchoice_updated( @@ -684,16 +734,19 @@ impl ExecutionLayer { } }; - engine - .api - .get_payload_v1::(payload_id) - .await - .map(Into::into) - }) + engine + .api + .get_payload_v1::(payload_id) .await - .map_err(Error::EngineErrors) - } - } + .map(|full_payload| { + if f(self, &full_payload).is_some() { + warn!(self.log(), "Duplicate payload cached, this might indicate redundant proposal attempts."); + } + full_payload.into() + }) + }) + .await + .map_err(Error::EngineErrors) } /// Maps to the `engine_newPayload` JSON-RPC call. @@ -709,7 +762,7 @@ impl ExecutionLayer { /// - Invalid, if any nodes return invalid. /// - Syncing, if any nodes return syncing. /// - An error, if all nodes return an error. - pub async fn notify_new_payload( + pub async fn notify_new_payload( &self, execution_payload: &ExecutionPayload, ) -> Result { @@ -872,23 +925,10 @@ impl ExecutionLayer { }) .await; - // Only query builders with payload attributes populated. - let builder_broadcast_results = if payload_attributes.is_some() { - self.builders() - .broadcast_without_retry(|engine| async move { - engine - .notify_forkchoice_updated(forkchoice_state, payload_attributes, self.log()) - .await - }) - .await - } else { - vec![] - }; process_multiple_payload_statuses( head_block_hash, Some(broadcast_results) .into_iter() - .chain(builder_broadcast_results.into_iter()) .map(|result| result.map(|response| response.payload_status)), self.log(), ) @@ -1147,7 +1187,7 @@ impl ExecutionLayer { } } - pub async fn get_payload_by_block_hash( + pub async fn get_payload_by_block_hash( &self, hash: ExecutionBlockHash, ) -> Result>, Error> { @@ -1160,7 +1200,7 @@ impl ExecutionLayer { .map_err(Error::EngineErrors) } - async fn get_payload_by_block_hash_from_engine( + async fn get_payload_by_block_hash_from_engine( &self, engine: &Engine, hash: ExecutionBlockHash, @@ -1205,21 +1245,24 @@ impl ExecutionLayer { })) } - pub async fn propose_blinded_beacon_block( + pub async fn propose_blinded_beacon_block( &self, block: &SignedBeaconBlock>, ) -> Result, Error> { debug!( self.log(), - "Issuing builder_proposeBlindedBlock"; + "Sending block to builder"; "root" => ?block.canonical_root(), ); - self.builders() - .first_success_without_retry(|engine| async move { - engine.api.propose_blinded_block_v1(block.clone()).await - }) - .await - .map_err(Error::EngineErrors) + if let Some(builder) = self.builder() { + builder + .post_builder_blinded_blocks(block) + .await + .map_err(Error::Builder) + .map(|d| d.data) + } else { + Err(Error::NoPayloadBuilder) + } } } @@ -1320,3 +1363,7 @@ mod test { .await; } } + +fn noop(_: &ExecutionLayer, _: &ExecutionPayload) -> Option> { + None +} diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 356c5a46dd9..e28a81fd878 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -3,6 +3,7 @@ pub use lighthouse_metrics::*; pub const HIT: &str = "hit"; pub const MISS: &str = "miss"; pub const GET_PAYLOAD: &str = "get_payload"; +pub const GET_BLINDED_PAYLOAD: &str = "get_blinded_payload"; pub const NEW_PAYLOAD: &str = "new_payload"; pub const FORKCHOICE_UPDATED: &str = "forkchoice_updated"; pub const GET_TERMINAL_POW_BLOCK_HASH: &str = "get_terminal_pow_block_hash"; diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 5770a8a3821..707a7c0c3e7 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -9,7 +9,7 @@ use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, Uint256}; pub struct MockExecutionLayer { pub server: MockServer, - pub el: ExecutionLayer, + pub el: ExecutionLayer, pub executor: TaskExecutor, pub spec: ChainSpec, } @@ -22,6 +22,7 @@ impl MockExecutionLayer { DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), Epoch::new(0), + None, ) } @@ -31,6 +32,7 @@ impl MockExecutionLayer { terminal_block: u64, terminal_block_hash: ExecutionBlockHash, terminal_block_hash_activation_epoch: Epoch, + builder_url: Option, ) -> Self { let handle = executor.handle().unwrap(); @@ -54,6 +56,7 @@ impl MockExecutionLayer { let config = Config { execution_endpoints: vec![url], + builder_url, secret_files: vec![path], suggested_fee_recipient: Some(Address::repeat_byte(42)), ..Default::default() @@ -111,12 +114,14 @@ impl MockExecutionLayer { let validator_index = 0; let payload = self .el - .get_payload::>( + .get_payload::>( parent_hash, timestamp, prev_randao, finalized_block_hash, validator_index, + None, + slot, ) .await .unwrap() @@ -173,7 +178,7 @@ impl MockExecutionLayer { pub async fn with_terminal_block<'a, U, V>(self, func: U) -> Self where - U: Fn(ChainSpec, ExecutionLayer, Option) -> V, + U: Fn(ChainSpec, ExecutionLayer, Option) -> V, V: Future, { let terminal_block_number = self diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index a0cc124d476..964873a9490 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -466,8 +466,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) ) .arg( - Arg::with_name("payload-builder") - .long("payload-builder") + Arg::with_name("builder") + .long("builder") + .alias("payload-builder") .alias("payload-builders") .help("The URL of a service compatible with the MEV-boost API.") .requires("execution-endpoint") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 63cc9214ff0..c91bd711e50 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -288,10 +288,10 @@ pub fn get_config( parse_only_one_value(&secret_files, PathBuf::from_str, "--execution-jwt", log)?; // Parse and set the payload builder, if any. - if let Some(endpoints) = cli_args.value_of("payload-builder") { + if let Some(endpoint) = cli_args.value_of("builder") { let payload_builder = - parse_only_one_value(endpoints, SensitiveUrl::parse, "--payload-builder", log)?; - el_config.builder_endpoints = vec![payload_builder]; + parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?; + el_config.builder_url = Some(payload_builder); } // Set config values from parse values. diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 529bad1d852..d3741013088 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1508,7 +1508,7 @@ impl BeaconNodeHttpClient { /// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an /// appropriate error message. -async fn ok_or_error(response: Response) -> Result { +pub async fn ok_or_error(response: Response) -> Result { let status = response.status(); if status == StatusCode::OK { diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 881d17a3309..96018230f0d 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -46,6 +46,7 @@ itertools = "0.10.0" superstruct = "0.5.0" serde_json = "1.0.74" smallvec = "1.8.0" +serde_with = "1.13.0" [dev-dependencies] criterion = "0.3.3" diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs new file mode 100644 index 00000000000..1726f2ad077 --- /dev/null +++ b/consensus/types/src/builder_bid.rs @@ -0,0 +1,52 @@ +use crate::{EthSpec, ExecPayload, ExecutionPayloadHeader, Uint256}; +use bls::blst_implementations::PublicKeyBytes; +use bls::Signature; +use serde::{Deserialize as De, Deserializer, Serialize as Ser, Serializer}; +use serde_derive::{Deserialize, Serialize}; +use serde_with::{serde_as, DeserializeAs, SerializeAs}; +use std::marker::PhantomData; + +#[serde_as] +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +#[serde(bound = "E: EthSpec, Payload: ExecPayload")] +pub struct BuilderBid> { + #[serde_as(as = "BlindedPayloadAsHeader")] + pub header: Payload, + #[serde(with = "eth2_serde_utils::quoted_u256")] + pub value: Uint256, + pub pubkey: PublicKeyBytes, + #[serde(skip)] + _phantom_data: PhantomData, +} + +/// Validator registration, for use in interacting with servers implementing the builder API. +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +#[serde(bound = "E: EthSpec, Payload: ExecPayload")] +pub struct SignedBuilderBid> { + pub message: BuilderBid, + pub signature: Signature, +} + +struct BlindedPayloadAsHeader(PhantomData); + +impl> SerializeAs for BlindedPayloadAsHeader { + fn serialize_as(source: &Payload, serializer: S) -> Result + where + S: Serializer, + { + source.to_execution_payload_header().serialize(serializer) + } +} + +impl<'de, E: EthSpec, Payload: ExecPayload> DeserializeAs<'de, Payload> + for BlindedPayloadAsHeader +{ + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let payload_header = ExecutionPayloadHeader::deserialize(deserializer)?; + Payload::try_from(payload_header) + .map_err(|_| serde::de::Error::custom("unable to convert payload header to payload")) + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index ecfd77d7a4e..7823ec223c0 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -28,6 +28,7 @@ pub mod beacon_block_body; pub mod beacon_block_header; pub mod beacon_committee; pub mod beacon_state; +pub mod builder_bid; pub mod chain_spec; pub mod checkpoint; pub mod consts; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 443c442027e..a9f8900d0cf 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -411,12 +411,13 @@ fn run_payload_builder_flag_test(flag: &str, builders: &str) { let config = config.execution_layer.as_ref().unwrap(); // Only first provided endpoint is parsed as we don't support // redundancy. - assert_eq!(&config.builder_endpoints, &all_builders[..1]); + assert_eq!(config.builder_url, all_builders.get(0).cloned()); }); } #[test] fn payload_builder_flags() { + run_payload_builder_flag_test("builder", "http://meow.cats"); run_payload_builder_flag_test("payload-builder", "http://meow.cats"); run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs"); run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs"); diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 21162fea56d..a5bab4ed781 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -11,9 +11,9 @@ use types::{ const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(20); -struct ExecutionPair { +struct ExecutionPair { /// The Lighthouse `ExecutionLayer` struct, connected to the `execution_engine` via HTTP. - execution_layer: ExecutionLayer, + execution_layer: ExecutionLayer, /// A handle to external EE process, once this is dropped the process will be killed. #[allow(dead_code)] execution_engine: ExecutionEngine, @@ -23,11 +23,11 @@ struct ExecutionPair { /// /// There are two EEs held here so that we can test out-of-order application of payloads, and other /// edge-cases. -pub struct TestRig { +pub struct TestRig { #[allow(dead_code)] runtime: Arc, - ee_a: ExecutionPair, - ee_b: ExecutionPair, + ee_a: ExecutionPair, + ee_b: ExecutionPair, spec: ChainSpec, _runtime_shutdown: exit_future::Signal, } @@ -172,12 +172,14 @@ impl TestRig { let valid_payload = self .ee_a .execution_layer - .get_payload::>( + .get_payload::>( parent_hash, timestamp, prev_randao, finalized_block_hash, proposer_index, + None, + Slot::new(0), ) .await .unwrap() @@ -265,12 +267,14 @@ impl TestRig { let second_payload = self .ee_a .execution_layer - .get_payload::>( + .get_payload::>( parent_hash, timestamp, prev_randao, finalized_block_hash, proposer_index, + None, + Slot::new(0), ) .await .unwrap() @@ -400,7 +404,7 @@ impl TestRig { /// /// Panic if payload reconstruction fails. async fn check_payload_reconstruction( - ee: &ExecutionPair, + ee: &ExecutionPair, payload: &ExecutionPayload, ) { let reconstructed = ee From e5212f132021d58de9637355729096ccb511886e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 1 Jul 2022 03:44:37 +0000 Subject: [PATCH 09/15] Avoid growing Vec for sync committee indices (#3301) ## Issue Addressed NA ## Proposed Changes This is a fairly simple micro-optimization to avoid using `Vec::grow`. I don't believe this will have a substantial effect on block processing times, however it was showing up in flamegraphs. I think it's worth making this change for general memory-hygiene. ## Additional Info NA --- consensus/types/src/beacon_state.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 3a0f7d02e80..66656d35894 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -779,14 +779,14 @@ impl BeaconState { &mut self, sync_committee: &SyncCommittee, ) -> Result, Error> { - sync_committee - .pubkeys - .iter() - .map(|pubkey| { + let mut indices = Vec::with_capacity(sync_committee.pubkeys.len()); + for pubkey in sync_committee.pubkeys.iter() { + indices.push( self.get_validator_index(pubkey)? - .ok_or(Error::PubkeyCacheInconsistent) - }) - .collect() + .ok_or(Error::PubkeyCacheInconsistent)?, + ) + } + Ok(indices) } /// Compute the sync committee indices for the next sync committee. From be4e261e7433e02983648f7d7d8f21f74d3fa9d8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 3 Jul 2022 05:36:50 +0000 Subject: [PATCH 10/15] Use async code when interacting with EL (#3244) ## Overview This rather extensive PR achieves two primary goals: 1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state. 2. Refactors fork choice, block production and block processing to `async` functions. Additionally, it achieves: - Concurrent forkchoice updates to the EL and cache pruning after a new head is selected. - Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production. - Concurrent per-block-processing and execution payload verification during block processing. - The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?): - I had to do this to deal with sending blocks into spawned tasks. - Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones. - We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap. - Avoids cloning *all the blocks* in *every chain segment* during sync. - It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:) - The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs. For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273 ## Changes to `canonical_head` and `fork_choice` Previously, the `BeaconChain` had two separate fields: ``` canonical_head: RwLock, fork_choice: RwLock ``` Now, we have grouped these values under a single struct: ``` canonical_head: CanonicalHead { cached_head: RwLock>, fork_choice: RwLock } ``` Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously. ## Breaking Changes ### The `state` (root) field in the `finalized_checkpoint` SSE event Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event: 1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`. 4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots. Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1). I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku. ## Notes for Reviewers I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct. I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking". I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it. I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around. Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2. You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests: - Changing tests to be `tokio::async` tests. - Adding `.await` to fork choice, block processing and block production functions. - Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`. - Wrapping `SignedBeaconBlock` in an `Arc`. - In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant. I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic. Co-authored-by: Mac L --- Cargo.lock | 5 + .../src/attestation_verification.rs | 9 +- .../src/attestation_verification/batch.rs | 6 +- beacon_node/beacon_chain/src/beacon_chain.rs | 1951 +++++++---------- .../src/beacon_fork_choice_store.rs | 6 +- .../beacon_chain/src/beacon_proposer_cache.rs | 31 +- .../beacon_chain/src/beacon_snapshot.rs | 7 +- .../beacon_chain/src/block_verification.rs | 332 +-- beacon_node/beacon_chain/src/builder.rs | 72 +- .../beacon_chain/src/canonical_head.rs | 1307 +++++++++++ .../beacon_chain/src/early_attester_cache.rs | 7 +- beacon_node/beacon_chain/src/errors.rs | 19 +- .../beacon_chain/src/execution_payload.rs | 298 ++- beacon_node/beacon_chain/src/fork_revert.rs | 8 +- .../beacon_chain/src/historical_blocks.rs | 3 +- beacon_node/beacon_chain/src/lib.rs | 7 +- .../beacon_chain/src/proposer_prep_service.rs | 4 +- beacon_node/beacon_chain/src/schema_change.rs | 7 +- .../src/schema_change/migration_schema_v7.rs | 10 +- .../beacon_chain/src/shuffling_cache.rs | 6 + .../beacon_chain/src/snapshot_cache.rs | 15 +- .../beacon_chain/src/state_advance_timer.rs | 43 +- beacon_node/beacon_chain/src/test_utils.rs | 154 +- .../tests/attestation_production.rs | 53 +- .../tests/attestation_verification.rs | 104 +- .../beacon_chain/tests/block_verification.rs | 491 +++-- beacon_node/beacon_chain/tests/merge.rs | 33 +- .../beacon_chain/tests/op_verification.rs | 16 +- .../tests/payload_invalidation.rs | 398 ++-- beacon_node/beacon_chain/tests/store_tests.rs | 870 ++++---- .../tests/sync_committee_verification.rs | 64 +- beacon_node/beacon_chain/tests/tests.rs | 374 ++-- beacon_node/client/src/builder.rs | 34 +- beacon_node/client/src/notifier.rs | 143 +- beacon_node/execution_layer/src/lib.rs | 37 - beacon_node/http_api/src/attester_duties.rs | 8 +- beacon_node/http_api/src/block_id.rs | 41 +- beacon_node/http_api/src/database.rs | 2 +- beacon_node/http_api/src/lib.rs | 695 +++--- beacon_node/http_api/src/proposer_duties.rs | 26 +- beacon_node/http_api/src/state_id.rs | 40 +- beacon_node/http_api/tests/fork_tests.rs | 4 + .../http_api/tests/interactive_tests.rs | 29 +- beacon_node/http_api/tests/tests.rs | 219 +- .../lighthouse_network/src/behaviour/mod.rs | 4 +- .../src/rpc/codec/ssz_snappy.rs | 72 +- .../lighthouse_network/src/rpc/methods.rs | 5 +- .../lighthouse_network/src/types/pubsub.rs | 5 +- .../lighthouse_network/tests/rpc_tests.rs | 20 +- .../network/src/beacon_processor/mod.rs | 561 ++--- .../network/src/beacon_processor/tests.rs | 339 ++- .../beacon_processor/worker/gossip_methods.rs | 72 +- .../beacon_processor/worker/rpc_methods.rs | 7 +- .../beacon_processor/worker/sync_methods.rs | 52 +- beacon_node/network/src/metrics.rs | 4 - beacon_node/network/src/router/processor.rs | 46 +- beacon_node/network/src/service.rs | 41 +- beacon_node/network/src/status.rs | 38 +- .../network/src/subnet_service/tests/mod.rs | 10 +- .../network/src/sync/backfill_sync/mod.rs | 4 +- .../network/src/sync/block_lookups/mod.rs | 13 +- .../src/sync/block_lookups/parent_lookup.rs | 15 +- .../sync/block_lookups/single_block_lookup.rs | 7 +- .../network/src/sync/block_lookups/tests.rs | 38 +- beacon_node/network/src/sync/manager.rs | 35 +- .../network/src/sync/network_context.rs | 39 +- .../network/src/sync/peer_sync_info.rs | 4 +- .../network/src/sync/range_sync/batch.rs | 18 +- .../src/sync/range_sync/block_storage.rs | 2 +- .../network/src/sync/range_sync/chain.rs | 3 +- .../network/src/sync/range_sync/range.rs | 25 +- beacon_node/operation_pool/Cargo.toml | 1 + beacon_node/operation_pool/src/lib.rs | 40 +- beacon_node/store/src/hot_cold_store.rs | 6 +- beacon_node/store/src/lib.rs | 3 +- beacon_node/timer/src/lib.rs | 30 +- common/task_executor/Cargo.toml | 2 +- common/task_executor/src/lib.rs | 57 + common/task_executor/src/metrics.rs | 10 + consensus/fork_choice/Cargo.toml | 1 + consensus/fork_choice/src/fork_choice.rs | 130 +- .../fork_choice/src/fork_choice_store.rs | 4 +- consensus/fork_choice/src/lib.rs | 5 +- consensus/fork_choice/tests/tests.rs | 403 ++-- consensus/proto_array/src/proto_array.rs | 1 + consensus/state_processing/Cargo.toml | 1 + .../src/per_block_processing/tests.rs | 322 ++- .../src/per_epoch_processing/tests.rs | 60 +- .../examples/flamegraph_beacon_state.rs | 2 +- consensus/types/Cargo.toml | 1 + consensus/types/src/beacon_block.rs | 46 +- consensus/types/src/beacon_block_body.rs | 47 + .../src/beacon_state/committee_cache/tests.rs | 40 +- consensus/types/src/beacon_state/tests.rs | 99 +- consensus/types/src/payload.rs | 2 + consensus/types/src/signed_beacon_block.rs | 8 + database_manager/src/lib.rs | 5 +- slasher/service/src/service.rs | 9 +- testing/ef_tests/src/cases.rs | 24 +- testing/ef_tests/src/cases/fork_choice.rs | 146 +- testing/ef_tests/src/handler.rs | 11 +- .../src/test_rig.rs | 7 +- testing/state_transition_vectors/Cargo.toml | 1 + testing/state_transition_vectors/src/exit.rs | 33 +- .../state_transition_vectors/src/macros.rs | 10 +- testing/state_transition_vectors/src/main.rs | 31 +- 106 files changed, 6521 insertions(+), 4544 deletions(-) create mode 100644 beacon_node/beacon_chain/src/canonical_head.rs diff --git a/Cargo.lock b/Cargo.lock index 1e9b5b42394..bb7308b9384 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2062,6 +2062,7 @@ dependencies = [ "eth2_ssz_derive", "proto_array", "store", + "tokio", "types", ] @@ -4173,6 +4174,7 @@ dependencies = [ "serde_derive", "state_processing", "store", + "tokio", "types", ] @@ -5972,6 +5974,7 @@ dependencies = [ "rayon", "safe_arith", "smallvec", + "tokio", "tree_hash", "types", ] @@ -5984,6 +5987,7 @@ dependencies = [ "eth2_ssz", "lazy_static", "state_processing", + "tokio", "types", ] @@ -6698,6 +6702,7 @@ dependencies = [ "swap_or_not_shuffle", "tempfile", "test_random_derive", + "tokio", "tree_hash", "tree_hash_derive", ] diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 406c0049aaa..63af6ab9e11 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -976,8 +976,8 @@ fn verify_head_block_is_known( max_skip_slots: Option, ) -> Result { let block_opt = chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&attestation.data.beacon_block_root) .or_else(|| { chain @@ -1245,7 +1245,10 @@ where // processing an attestation that does not include our latest finalized block in its chain. // // We do not delay consideration for later, we simply drop the attestation. - if !chain.fork_choice.read().contains_block(&target.root) + if !chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&target.root) && !chain.early_attester_cache.contains_block(target.root) { return Err(Error::UnknownTargetRoot(target.root)); diff --git a/beacon_node/beacon_chain/src/attestation_verification/batch.rs b/beacon_node/beacon_chain/src/attestation_verification/batch.rs index 30f1ae7e5be..6f76cce0246 100644 --- a/beacon_node/beacon_chain/src/attestation_verification/batch.rs +++ b/beacon_node/beacon_chain/src/attestation_verification/batch.rs @@ -65,7 +65,7 @@ where .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; - let fork = chain.with_head(|head| Ok::<_, BeaconChainError>(head.beacon_state.fork()))?; + let fork = chain.canonical_head.cached_head().head_fork(); let mut signature_sets = Vec::with_capacity(num_indexed * 3); @@ -169,13 +169,13 @@ where &metrics::ATTESTATION_PROCESSING_BATCH_UNAGG_SIGNATURE_SETUP_TIMES, ); + let fork = chain.canonical_head.cached_head().head_fork(); + let pubkey_cache = chain .validator_pubkey_cache .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; - let fork = chain.with_head(|head| Ok::<_, BeaconChainError>(head.beacon_state.fork()))?; - let mut signature_sets = Vec::with_capacity(num_partially_verified); // Iterate, flattening to get only the `Ok` values. diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a64c9718755..9fb895f78f0 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -9,15 +9,15 @@ use crate::beacon_proposer_cache::BeaconProposerCache; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::{ check_block_is_finalized_descendant, check_block_relevancy, get_block_root, - signature_verify_chain_segment, BlockError, FullyVerifiedBlock, GossipVerifiedBlock, - IntoFullyVerifiedBlock, + signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock, + IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER, }; use crate::chain_config::ChainConfig; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::events::ServerSentEventHandler; -use crate::execution_payload::get_execution_payload; +use crate::execution_payload::{get_execution_payload, PreparePayloadHandle}; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::head_tracker::HeadTracker; use crate::historical_blocks::HistoricalBlockError; @@ -52,17 +52,17 @@ use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::BeaconForkChoiceStore; use crate::BeaconSnapshot; use crate::{metrics, BeaconChainError}; -use eth2::types::{ - EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead, SyncDuty, -}; +use eth2::types::{EventKind, SseBlock, SyncDuty}; use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatus}; -use fork_choice::{AttestationFromBlock, ForkChoice, InvalidationOperation}; +use fork_choice::{ + AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, + InvalidationOperation, PayloadVerificationStatus, +}; use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; -use proto_array::ExecutionStatus; use safe_arith::SafeArith; use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; @@ -71,7 +71,7 @@ use ssz::Encode; use state_processing::{ common::get_indexed_attestation, per_block_processing, - per_block_processing::{errors::AttestationValidationError, is_merge_transition_complete}, + per_block_processing::errors::AttestationValidationError, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, BlockSignatureStrategy, SigVerifiedOp, VerifyBlockRoot, @@ -87,16 +87,17 @@ use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterato use store::{ DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; -use task_executor::ShutdownReason; +use task_executor::{ShutdownReason, TaskExecutor}; use tree_hash::TreeHash; use types::beacon_state::CloneConfig; use types::*; +pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; + pub type ForkChoiceError = fork_choice::Error; -/// The time-out before failure during an operation to take a read/write RwLock on the canonical -/// head. -pub const HEAD_LOCK_TIMEOUT: Duration = Duration::from_secs(1); +/// Alias to appease clippy. +type HashBlockTuple = (Hash256, Arc>); /// The time-out before failure during an operation to take a read/write RwLock on the block /// processing cache. @@ -216,22 +217,6 @@ pub enum StateSkipConfig { WithoutStateRoots, } -#[derive(Debug, PartialEq)] -pub struct HeadInfo { - pub slot: Slot, - pub block_root: Hash256, - pub state_root: Hash256, - pub current_justified_checkpoint: types::Checkpoint, - pub finalized_checkpoint: types::Checkpoint, - pub fork: Fork, - pub genesis_time: u64, - pub genesis_validators_root: Hash256, - pub proposer_shuffling_decision_root: Hash256, - pub is_merge_transition_complete: bool, - pub execution_payload_block_hash: Option, - pub random: Hash256, -} - pub trait BeaconChainTypes: Send + Sync + 'static { type HotStore: store::ItemStore; type ColdStore: store::ItemStore; @@ -240,23 +225,22 @@ pub trait BeaconChainTypes: Send + Sync + 'static { type EthSpec: types::EthSpec; } -/// Indicates the EL payload verification status of the head beacon block. -#[derive(Debug, PartialEq)] -pub enum HeadSafetyStatus { - /// The head block has either been verified by an EL or is does not require EL verification - /// (e.g., it is pre-merge or pre-terminal-block). - /// - /// If the block is post-terminal-block, `Some(execution_payload.block_hash)` is included with - /// the variant. - Safe(Option), - /// The head block execution payload has not yet been verified by an EL. - /// - /// The `execution_payload.block_hash` of the head block is returned. - Unsafe(ExecutionBlockHash), - /// The head block execution payload was deemed to be invalid by an EL. - /// - /// The `execution_payload.block_hash` of the head block is returned. - Invalid(ExecutionBlockHash), +/// Used internally to split block production into discrete functions. +struct PartialBeaconBlock { + state: BeaconState, + slot: Slot, + proposer_index: u64, + parent_root: Hash256, + randao_reveal: Signature, + eth1_data: Eth1Data, + graffiti: Graffiti, + proposer_slashings: Vec, + attester_slashings: Vec>, + attestations: Vec>, + deposits: Vec, + voluntary_exits: Vec, + sync_aggregate: Option>, + prepare_payload_handle: Option>, } pub type BeaconForkChoice = ForkChoice< @@ -284,6 +268,8 @@ pub struct BeaconChain { pub config: ChainConfig, /// Persistent storage for blocks, states, etc. Typically an on-disk store, such as LevelDB. pub store: BeaconStore, + /// Used for spawning async and blocking tasks. + pub task_executor: TaskExecutor, /// Database migrator for running background maintenance on the store. pub store_migrator: BackgroundMigrator, /// Reports the current slot, typically based upon the system clock. @@ -335,21 +321,21 @@ pub struct BeaconChain { pub eth1_chain: Option>, /// Interfaces with the execution client. pub execution_layer: Option>, - /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. - pub(crate) canonical_head: TimeoutRwLock>, + /// Stores information about the canonical head and finalized/justified checkpoints of the + /// chain. Also contains the fork choice struct, for computing the canonical head. + pub canonical_head: CanonicalHead, /// The root of the genesis block. pub genesis_block_root: Hash256, /// The root of the genesis state. pub genesis_state_root: Hash256, /// The root of the list of genesis validators, used during syncing. pub genesis_validators_root: Hash256, - /// A state-machine that is updated with information from the network and chooses a canonical - /// head block. - pub fork_choice: RwLock>, /// Transmitter used to indicate that slot-start fork choice has completed running. pub fork_choice_signal_tx: Option, /// Receiver used by block production to wait on slot-start fork choice. pub fork_choice_signal_rx: Option, + /// The genesis time of this `BeaconChain` (seconds since UNIX epoch). + pub genesis_time: u64, /// A handler for events generated by the beacon chain. This is only initialized when the /// HTTP server is enabled. pub event_handler: Option>, @@ -358,7 +344,7 @@ pub struct BeaconChain { /// A cache dedicated to block processing. pub(crate) snapshot_cache: TimeoutRwLock>, /// Caches the attester shuffling for a given epoch and shuffling key root. - pub(crate) shuffling_cache: TimeoutRwLock, + pub shuffling_cache: TimeoutRwLock, /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. pub beacon_proposer_cache: Mutex, /// Caches a map of `validator_index -> validator_pubkey`. @@ -430,25 +416,11 @@ impl BeaconChain { .as_kv_store_op(BEACON_CHAIN_DB_KEY) } - /// Return a database operation for writing fork choice to disk. - pub fn persist_fork_choice_in_batch(&self) -> KeyValueStoreOp { - let fork_choice = self.fork_choice.read(); - Self::persist_fork_choice_in_batch_standalone(&fork_choice) - } - - /// Return a database operation for writing fork choice to disk. - pub fn persist_fork_choice_in_batch_standalone( - fork_choice: &BeaconForkChoice, - ) -> KeyValueStoreOp { - let persisted_fork_choice = PersistedForkChoice { - fork_choice: fork_choice.to_persisted(), - fork_choice_store: fork_choice.fc_store().to_persisted(), - }; - persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY) - } - /// Load fork choice from disk, returning `None` if it isn't found. - pub fn load_fork_choice(store: BeaconStore) -> Result>, Error> { + pub fn load_fork_choice( + store: BeaconStore, + spec: &ChainSpec, + ) -> Result>, Error> { let persisted_fork_choice = match store.get_item::(&FORK_CHOICE_DB_KEY)? { Some(fc) => fc, @@ -461,6 +433,7 @@ impl BeaconChain { Ok(Some(ForkChoice::from_persisted( persisted_fork_choice.fork_choice, fc_store, + spec, )?)) } @@ -538,11 +511,11 @@ impl BeaconChain { )); } - let local_head = self.head()?; + let local_head = self.head_snapshot(); let iter = self.store.forwards_block_roots_iterator( start_slot, - local_head.beacon_state, + local_head.beacon_state.clone_with(CloneConfig::none()), local_head.beacon_block_root, &self.spec, )?; @@ -612,77 +585,6 @@ impl BeaconChain { .map(|result| result.map_err(|e| e.into()))) } - /// Iterate through the current chain to find the slot intersecting with the given beacon state. - /// The maximum depth this will search is `SLOTS_PER_HISTORICAL_ROOT`, and if that depth is reached - /// and no intersection is found, the finalized slot will be returned. - pub fn find_reorg_slot( - &self, - new_state: &BeaconState, - new_block_root: Hash256, - ) -> Result { - self.with_head(|snapshot| { - let old_state = &snapshot.beacon_state; - let old_block_root = snapshot.beacon_block_root; - - // The earliest slot for which the two chains may have a common history. - let lowest_slot = std::cmp::min(new_state.slot(), old_state.slot()); - - // Create an iterator across `$state`, assuming that the block at `$state.slot` has the - // block root of `$block_root`. - // - // The iterator will be skipped until the next value returns `lowest_slot`. - // - // This is a macro instead of a function or closure due to the complex types invloved - // in all the iterator wrapping. - macro_rules! aligned_roots_iter { - ($state: ident, $block_root: ident) => { - std::iter::once(Ok(($state.slot(), $block_root))) - .chain($state.rev_iter_block_roots(&self.spec)) - .skip_while(|result| { - result - .as_ref() - .map_or(false, |(slot, _)| *slot > lowest_slot) - }) - }; - } - - // Create iterators across old/new roots where iterators both start at the same slot. - let mut new_roots = aligned_roots_iter!(new_state, new_block_root); - let mut old_roots = aligned_roots_iter!(old_state, old_block_root); - - // Whilst *both* of the iterators are still returning values, try and find a common - // ancestor between them. - while let (Some(old), Some(new)) = (old_roots.next(), new_roots.next()) { - let (old_slot, old_root) = old?; - let (new_slot, new_root) = new?; - - // Sanity check to detect programming errors. - if old_slot != new_slot { - return Err(Error::InvalidReorgSlotIter { new_slot, old_slot }); - } - - if old_root == new_root { - // A common ancestor has been found. - return Ok(old_slot); - } - } - - // If no common ancestor is found, declare that the re-org happened at the previous - // finalized slot. - // - // Sometimes this will result in the return slot being *lower* than the actual reorg - // slot. However, assuming we don't re-org through a finalized slot, it will never be - // *higher*. - // - // We provide this potentially-inaccurate-but-safe information to avoid onerous - // database reads during times of deep reorgs. - Ok(old_state - .finalized_checkpoint() - .epoch - .start_slot(T::EthSpec::slots_per_epoch())) - }) - } - /// Iterates backwards across all `(state_root, slot)` pairs starting from /// an arbitrary `BeaconState` to the earliest reachable ancestor (may or may not be genesis). /// @@ -713,12 +615,12 @@ impl BeaconChain { &self, start_slot: Slot, ) -> Result> + '_, Error> { - let local_head = self.head()?; + let local_head = self.head_snapshot(); let iter = self.store.forwards_state_roots_iterator( start_slot, local_head.beacon_state_root(), - local_head.beacon_state, + local_head.beacon_state.clone_with(CloneConfig::none()), &self.spec, )?; @@ -978,11 +880,11 @@ impl BeaconChain { pub async fn get_block_checking_early_attester_cache( &self, block_root: &Hash256, - ) -> Result>, Error> { + ) -> Result>>, Error> { if let Some(block) = self.early_attester_cache.get_block(*block_root) { return Ok(Some(block)); } - self.get_block(block_root).await + Ok(self.get_block(block_root).await?.map(Arc::new)) } /// Returns the block at the given root, if any. @@ -1068,53 +970,6 @@ impl BeaconChain { Ok(self.store.get_state(state_root, slot)?) } - /// Returns a `Checkpoint` representing the head block and state. Contains the "best block"; - /// the head of the canonical `BeaconChain`. - /// - /// It is important to note that the `beacon_state` returned may not match the present slot. It - /// is the state as it was when the head block was received, which could be some slots prior to - /// now. - pub fn head(&self) -> Result, Error> { - self.with_head(|head| Ok(head.clone_with(CloneConfig::committee_caches_only()))) - } - - /// Apply a function to the canonical head without cloning it. - pub fn with_head(&self, f: F) -> Result - where - E: From, - F: FnOnce(&BeaconSnapshot) -> Result, - { - let head_lock = self - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout)?; - f(&head_lock) - } - - /// Returns the beacon block root at the head of the canonical chain. - /// - /// See `Self::head` for more information. - pub fn head_beacon_block_root(&self) -> Result { - self.with_head(|s| Ok(s.beacon_block_root)) - } - - /// Returns the beacon block at the head of the canonical chain. - /// - /// See `Self::head` for more information. - pub fn head_beacon_block(&self) -> Result, Error> { - self.with_head(|s| Ok(s.beacon_block.clone())) - } - - /// Returns the beacon state at the head of the canonical chain. - /// - /// See `Self::head` for more information. - pub fn head_beacon_state(&self) -> Result, Error> { - self.with_head(|s| { - Ok(s.beacon_state - .clone_with(CloneConfig::committee_caches_only())) - }) - } - /// Return the sync committee at `slot + 1` from the canonical chain. /// /// This is useful when dealing with sync committee messages, because messages are signed @@ -1189,42 +1044,6 @@ impl BeaconChain { self.state_at_slot(load_slot, StateSkipConfig::WithoutStateRoots) } - /// Returns info representing the head block and state. - /// - /// A summarized version of `Self::head` that involves less cloning. - pub fn head_info(&self) -> Result { - self.with_head(|head| { - let proposer_shuffling_decision_root = head - .beacon_state - .proposer_shuffling_decision_root(head.beacon_block_root)?; - - // The `random` value is used whilst producing an `ExecutionPayload` atop the head. - let current_epoch = head.beacon_state.current_epoch(); - let random = *head.beacon_state.get_randao_mix(current_epoch)?; - - Ok(HeadInfo { - slot: head.beacon_block.slot(), - block_root: head.beacon_block_root, - state_root: head.beacon_state_root(), - current_justified_checkpoint: head.beacon_state.current_justified_checkpoint(), - finalized_checkpoint: head.beacon_state.finalized_checkpoint(), - fork: head.beacon_state.fork(), - genesis_time: head.beacon_state.genesis_time(), - genesis_validators_root: head.beacon_state.genesis_validators_root(), - proposer_shuffling_decision_root, - is_merge_transition_complete: is_merge_transition_complete(&head.beacon_state), - execution_payload_block_hash: head - .beacon_block - .message() - .body() - .execution_payload() - .ok() - .map(|ep| ep.block_hash()), - random, - }) - }) - } - /// Returns the current heads of the `BeaconChain`. For the canonical head, see `Self::head`. /// /// Returns `(block_root, block_slot)`. @@ -1245,7 +1064,7 @@ impl BeaconChain { slot: Slot, config: StateSkipConfig, ) -> Result, Error> { - let head_state = self.head()?.beacon_state; + let head_state = self.head_beacon_state_cloned(); match slot.cmp(&head_state.slot()) { Ordering::Equal => Ok(head_state), @@ -1330,14 +1149,6 @@ impl BeaconChain { self.state_at_slot(self.slot()?, StateSkipConfig::WithStateRoots) } - /// Returns the slot of the highest block in the canonical chain. - pub fn best_slot(&self) -> Result { - self.canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .map(|head| head.beacon_block.slot()) - .ok_or(Error::CanonicalHeadLockTimeout) - } - /// Returns the validator index (if any) for the given public key. /// /// ## Notes @@ -1477,7 +1288,7 @@ impl BeaconChain { validator_indices: &[u64], epoch: Epoch, head_block_root: Hash256, - ) -> Result<(Vec>, Hash256), Error> { + ) -> Result<(Vec>, Hash256, ExecutionStatus), Error> { self.with_committee_cache(head_block_root, epoch, |committee_cache, dependent_root| { let duties = validator_indices .iter() @@ -1487,7 +1298,13 @@ impl BeaconChain { }) .collect(); - Ok((duties, dependent_root)) + let execution_status = self + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(Error::AttestationHeadNotInForkChoice(head_block_root))?; + + Ok((duties, dependent_root, execution_status)) }) } @@ -1535,8 +1352,8 @@ impl BeaconChain { ) -> Result, Error> { let beacon_block_root = attestation.data.beacon_block_root; match self - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block_execution_status(&beacon_block_root) { // The attestation references a block that is not in fork choice, it must be @@ -1624,7 +1441,10 @@ impl BeaconChain { let current_epoch_attesting_info: Option<(Checkpoint, usize)>; let attester_cache_key; let head_timer = metrics::start_timer(&metrics::ATTESTATION_PRODUCTION_HEAD_SCRAPE_SECONDS); - if let Some(head) = self.canonical_head.try_read_for(HEAD_LOCK_TIMEOUT) { + // The following braces are to prevent the `cached_head` Arc from being held for longer than + // required. It also helps reduce the diff for a very large PR (#3244). + { + let head = self.head_snapshot(); let head_state = &head.beacon_state; head_state_slot = head_state.slot(); @@ -1699,15 +1519,13 @@ impl BeaconChain { // routine. attester_cache_key = AttesterCacheKey::new(request_epoch, head_state, beacon_block_root)?; - } else { - return Err(Error::CanonicalHeadLockTimeout); } drop(head_timer); // Only attest to a block if it is fully verified (i.e. not optimistic or invalid). match self - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block_execution_status(&beacon_block_root) { Some(execution_status) if execution_status.is_valid_or_irrelevant() => (), @@ -1911,8 +1729,8 @@ impl BeaconChain { ) -> Result<(), Error> { let _timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); - self.fork_choice - .write() + self.canonical_head + .fork_choice_write_lock() .on_attestation( self.slot()?, verified.indexed_attestation(), @@ -2047,8 +1865,7 @@ impl BeaconChain { // If there's no eth1 chain then it's impossible to produce blocks and therefore // useless to put things in the op pool. if self.eth1_chain.is_some() { - let fork = - self.with_head(|head| Ok::<_, AttestationError>(head.beacon_state.fork()))?; + let fork = self.canonical_head.cached_head().head_fork(); self.op_pool .insert_attestation( @@ -2153,7 +1970,7 @@ impl BeaconChain { // pivot block is the same as the current state's pivot block. If it is, then the // attestation's shuffling is the same as the current state's. // To account for skipped slots, find the first block at *or before* the pivot slot. - let fork_choice_lock = self.fork_choice.read(); + let fork_choice_lock = self.canonical_head.fork_choice_read_lock(); let pivot_block_root = fork_choice_lock .proto_array() .core_proto_array() @@ -2244,12 +2061,13 @@ impl BeaconChain { pub fn import_attester_slashing( &self, attester_slashing: SigVerifiedOp>, - ) -> Result<(), Error> { + ) { if self.eth1_chain.is_some() { - self.op_pool - .insert_attester_slashing(attester_slashing, self.head_info()?.fork) + self.op_pool.insert_attester_slashing( + attester_slashing, + self.canonical_head.cached_head().head_fork(), + ) } - Ok(()) } /// Attempt to obtain sync committee duties from the head. @@ -2265,22 +2083,36 @@ impl BeaconChain { }) } - /// Attempt to verify and import a chain of blocks to `self`. - /// - /// The provided blocks _must_ each reference the previous block via `block.parent_root` (i.e., - /// be a chain). An error will be returned if this is not the case. - /// - /// This operation is not atomic; if one of the blocks in the chain is invalid then some prior - /// blocks might be imported. + /// A convenience method for spawning a blocking task. It maps an `Option` and + /// `tokio::JoinError` into a single `BeaconChainError`. + pub(crate) async fn spawn_blocking_handle( + &self, + task: F, + name: &'static str, + ) -> Result + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + let handle = self + .task_executor + .spawn_blocking_handle(task, name) + .ok_or(Error::RuntimeShutdown)?; + + handle.await.map_err(Error::TokioJoin) + } + + /// Accepts a `chain_segment` and filters out any uninteresting blocks (e.g., pre-finalization + /// or already-known). /// - /// This method is generally much more efficient than importing each block using - /// `Self::process_block`. - pub fn process_chain_segment( + /// This method is potentially long-running and should not run on the core executor. + pub fn filter_chain_segment( self: &Arc, - chain_segment: Vec>, - ) -> ChainSegmentResult { + chain_segment: Vec>>, + ) -> Result>, ChainSegmentResult> { + // This function will never import any blocks. + let imported_blocks = 0; let mut filtered_chain_segment = Vec::with_capacity(chain_segment.len()); - let mut imported_blocks = 0; // Produce a list of the parent root and slot of the child of each block. // @@ -2294,10 +2126,10 @@ impl BeaconChain { for (i, block) in chain_segment.into_iter().enumerate() { // Ensure the block is the correct structure for the fork at `block.slot()`. if let Err(e) = block.fork_name(&self.spec) { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::InconsistentFork(e), - }; + }); } let block_root = get_block_root(&block); @@ -2309,18 +2141,18 @@ impl BeaconChain { // Without this check it would be possible to have a block verified using the // incorrect shuffling. That would be bad, mmkay. if block_root != *child_parent_root { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::NonLinearParentRoots, - }; + }); } // Ensure that the slots are strictly increasing throughout the chain segment. if *child_slot <= block.slot() { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::NonLinearSlots, - }; + }); } } @@ -2348,18 +2180,18 @@ impl BeaconChain { // The block has a known parent that does not descend from the finalized block. // There is no need to process this block or any children. Err(BlockError::NotFinalizedDescendant { block_parent_root }) => { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::NotFinalizedDescendant { block_parent_root }, - }; + }); } // If there was an error whilst determining if the block was invalid, return that // error. Err(BlockError::BeaconChainError(e)) => { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::BeaconChainError(e), - }; + }); } // If the block was decided to be irrelevant for any other reason, don't include // this block or any of it's children in the filtered chain segment. @@ -2367,6 +2199,42 @@ impl BeaconChain { } } + Ok(filtered_chain_segment) + } + + /// Attempt to verify and import a chain of blocks to `self`. + /// + /// The provided blocks _must_ each reference the previous block via `block.parent_root` (i.e., + /// be a chain). An error will be returned if this is not the case. + /// + /// This operation is not atomic; if one of the blocks in the chain is invalid then some prior + /// blocks might be imported. + /// + /// This method is generally much more efficient than importing each block using + /// `Self::process_block`. + pub async fn process_chain_segment( + self: &Arc, + chain_segment: Vec>>, + ) -> ChainSegmentResult { + let mut imported_blocks = 0; + + // Filter uninteresting blocks from the chain segment in a blocking task. + let chain = self.clone(); + let filtered_chain_segment_future = self.spawn_blocking_handle( + move || chain.filter_chain_segment(chain_segment), + "filter_chain_segment", + ); + let mut filtered_chain_segment = match filtered_chain_segment_future.await { + Ok(Ok(filtered_segment)) => filtered_segment, + Ok(Err(segment_result)) => return segment_result, + Err(error) => { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::BeaconChainError(error), + } + } + }; + while let Some((_root, block)) = filtered_chain_segment.first() { // Determine the epoch of the first block in the remaining segment. let start_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); @@ -2386,20 +2254,32 @@ impl BeaconChain { let mut blocks = filtered_chain_segment.split_off(last_index); std::mem::swap(&mut blocks, &mut filtered_chain_segment); + let chain = self.clone(); + let signature_verification_future = self.spawn_blocking_handle( + move || signature_verify_chain_segment(blocks, &chain), + "signature_verify_chain_segment", + ); + // Verify the signature of the blocks, returning early if the signature is invalid. - let signature_verified_blocks = match signature_verify_chain_segment(blocks, self) { - Ok(blocks) => blocks, - Err(error) => { + let signature_verified_blocks = match signature_verification_future.await { + Ok(Ok(blocks)) => blocks, + Ok(Err(error)) => { return ChainSegmentResult::Failed { imported_blocks, error, }; } + Err(error) => { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::BeaconChainError(error), + }; + } }; // Import the blocks into the chain. for signature_verified_block in signature_verified_blocks { - match self.process_block(signature_verified_block) { + match self.process_block(signature_verified_block).await { Ok(_) => imported_blocks += 1, Err(error) => { return ChainSegmentResult::Failed { @@ -2424,43 +2304,54 @@ impl BeaconChain { /// ## Errors /// /// Returns an `Err` if the given block was invalid, or an error was encountered during - pub fn verify_block_for_gossip( - &self, - block: SignedBeaconBlock, + pub async fn verify_block_for_gossip( + self: &Arc, + block: Arc>, ) -> Result, BlockError> { - let slot = block.slot(); - let graffiti_string = block.message().body().graffiti().as_utf8_lossy(); - - match GossipVerifiedBlock::new(block, self) { - Ok(verified) => { - debug!( - self.log, - "Successfully processed gossip block"; - "graffiti" => graffiti_string, - "slot" => slot, - "root" => ?verified.block_root(), - ); + let chain = self.clone(); + self.task_executor + .clone() + .spawn_blocking_handle( + move || { + let slot = block.slot(); + let graffiti_string = block.message().body().graffiti().as_utf8_lossy(); + + match GossipVerifiedBlock::new(block, &chain) { + Ok(verified) => { + debug!( + chain.log, + "Successfully processed gossip block"; + "graffiti" => graffiti_string, + "slot" => slot, + "root" => ?verified.block_root(), + ); - Ok(verified) - } - Err(e) => { - debug!( - self.log, - "Rejected gossip block"; - "error" => e.to_string(), - "graffiti" => graffiti_string, - "slot" => slot, - ); + Ok(verified) + } + Err(e) => { + debug!( + chain.log, + "Rejected gossip block"; + "error" => e.to_string(), + "graffiti" => graffiti_string, + "slot" => slot, + ); - Err(e) - } - } + Err(e) + } + } + }, + "payload_verification_handle", + ) + .ok_or(BeaconChainError::RuntimeShutdown)? + .await + .map_err(BeaconChainError::TokioJoin)? } /// Returns `Ok(block_root)` if the given `unverified_block` was successfully verified and /// imported into the chain. /// - /// Items that implement `IntoFullyVerifiedBlock` include: + /// Items that implement `IntoExecutionPendingBlock` include: /// /// - `SignedBeaconBlock` /// - `GossipVerifiedBlock` @@ -2469,7 +2360,7 @@ impl BeaconChain { /// /// Returns an `Err` if the given block was invalid, or an error was encountered during /// verification. - pub fn process_block>( + pub async fn process_block>( self: &Arc, unverified_block: B, ) -> Result> { @@ -2483,13 +2374,16 @@ impl BeaconChain { let block = unverified_block.block().clone(); // A small closure to group the verification and import errors. - let import_block = |unverified_block: B| -> Result> { - let fully_verified = unverified_block.into_fully_verified_block(self)?; - self.import_block(fully_verified) + let chain = self.clone(); + let import_block = async move { + let execution_pending = unverified_block.into_execution_pending_block(&chain)?; + chain + .import_execution_pending_block(execution_pending) + .await }; // Verify and import the block. - match import_block(unverified_block) { + match import_block.await { // The block was successfully verified and imported. Yay. Ok(block_root) => { trace!( @@ -2504,6 +2398,14 @@ impl BeaconChain { Ok(block_root) } + Err(e @ BlockError::BeaconChainError(BeaconChainError::TokioJoin(_))) => { + debug!( + self.log, + "Beacon block processing cancelled"; + "error" => ?e, + ); + Err(e) + } // There was an error whilst attempting to verify and import the block. The block might // be partially verified or partially imported. Err(BlockError::BeaconChainError(e)) => { @@ -2526,6 +2428,81 @@ impl BeaconChain { } } + /// Accepts a fully-verified block and imports it into the chain without performing any + /// additional verification. + /// + /// An error is returned if the block was unable to be imported. It may be partially imported + /// (i.e., this function is not atomic). + async fn import_execution_pending_block( + self: Arc, + execution_pending_block: ExecutionPendingBlock, + ) -> Result> { + let ExecutionPendingBlock { + block, + block_root, + state, + parent_block: _, + confirmed_state_roots, + payload_verification_handle, + } = execution_pending_block; + + let PayloadVerificationOutcome { + payload_verification_status, + is_valid_merge_transition_block, + } = payload_verification_handle + .await + .map_err(BeaconChainError::TokioJoin)? + .ok_or(BeaconChainError::RuntimeShutdown)??; + + // Log the PoS pandas if a merge transition just occurred. + if is_valid_merge_transition_block { + info!(self.log, "{}", POS_PANDA_BANNER); + info!( + self.log, + "Proof of Stake Activated"; + "slot" => block.slot() + ); + info!( + self.log, ""; + "Terminal POW Block Hash" => ?block + .message() + .execution_payload()? + .parent_hash() + .into_root() + ); + info!( + self.log, ""; + "Merge Transition Block Root" => ?block.message().tree_hash_root() + ); + info!( + self.log, ""; + "Merge Transition Execution Hash" => ?block + .message() + .execution_payload()? + .block_hash() + .into_root() + ); + } + + let chain = self.clone(); + let block_hash = self + .spawn_blocking_handle( + move || { + chain.import_block( + block, + block_root, + state, + confirmed_state_roots, + payload_verification_status, + ) + }, + "payload_verification_handle", + ) + .await??; + + Ok(block_hash) + } + /// Accepts a fully-verified block and imports it into the chain without performing any /// additional verification. /// @@ -2533,15 +2510,14 @@ impl BeaconChain { /// (i.e., this function is not atomic). fn import_block( &self, - fully_verified_block: FullyVerifiedBlock, + signed_block: Arc>, + block_root: Hash256, + mut state: BeaconState, + confirmed_state_roots: Vec, + payload_verification_status: PayloadVerificationStatus, ) -> Result> { - let signed_block = fully_verified_block.block; - let block_root = fully_verified_block.block_root; - let mut state = fully_verified_block.state; let current_slot = self.slot()?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - let mut ops = fully_verified_block.confirmation_db_batch; - let payload_verification_status = fully_verified_block.payload_verification_status; let attestation_observation_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION); @@ -2614,21 +2590,24 @@ impl BeaconChain { .map_err(BeaconChainError::from)?; } - let mut fork_choice = self.fork_choice.write(); - - // Do not import a block that doesn't descend from the finalized root. - let signed_block = - check_block_is_finalized_descendant::(signed_block, &fork_choice, &self.store)?; - let (block, block_signature) = signed_block.clone().deconstruct(); - - // compare the existing finalized checkpoint with the incoming block's finalized checkpoint - let old_finalized_checkpoint = fork_choice.finalized_checkpoint(); - let new_finalized_checkpoint = state.finalized_checkpoint(); + // Alias for readability. + let block = signed_block.message(); // Only perform the weak subjectivity check if it was configured. if let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint { + // Note: we're using the finalized checkpoint from the head state, rather than fork + // choice. + // + // We are doing this to ensure that we detect changes in finalization. It's possible + // that fork choice has already been updated to the finalized checkpoint in the block + // we're importing. + let current_head_finalized_checkpoint = + self.canonical_head.cached_head().finalized_checkpoint(); + // Compare the existing finalized checkpoint with the incoming block's finalized checkpoint. + let new_finalized_checkpoint = state.finalized_checkpoint(); + // This ensures we only perform the check once. - if (old_finalized_checkpoint.epoch < wss_checkpoint.epoch) + if (current_head_finalized_checkpoint.epoch < wss_checkpoint.epoch) && (wss_checkpoint.epoch <= new_finalized_checkpoint.epoch) { if let Err(e) = @@ -2640,7 +2619,7 @@ impl BeaconChain { "Weak subjectivity checkpoint verification failed while importing block!"; "block_root" => ?block_root, "parent_root" => ?block.parent_root(), - "old_finalized_epoch" => ?old_finalized_checkpoint.epoch, + "old_finalized_epoch" => ?current_head_finalized_checkpoint.epoch, "new_finalized_epoch" => ?new_finalized_checkpoint.epoch, "weak_subjectivity_epoch" => ?wss_checkpoint.epoch, "error" => ?e, @@ -2656,6 +2635,13 @@ impl BeaconChain { } } + // Take an exclusive write-lock on fork choice. It's very important prevent deadlocks by + // avoiding taking other locks whilst holding this lock. + let mut fork_choice = self.canonical_head.fork_choice_write_lock(); + + // Do not import a block that doesn't descend from the finalized root. + check_block_is_finalized_descendant(self, &fork_choice, &signed_block)?; + // Register the new block with the fork choice service. { let _fork_choice_block_timer = @@ -2668,7 +2654,7 @@ impl BeaconChain { fork_choice .on_block( current_slot, - &block, + block, block_root, block_delay, &state, @@ -2843,7 +2829,11 @@ impl BeaconChain { // If the write fails, revert fork choice to the version from disk, else we can // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 - ops.push(StoreOp::PutBlock(block_root, Box::new(signed_block))); + let mut ops: Vec<_> = confirmed_state_roots + .into_iter() + .map(StoreOp::DeleteStateTemporaryFlag) + .collect(); + ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); let txn_lock = self.store.hot_db.begin_rw_transaction(); @@ -2854,18 +2844,23 @@ impl BeaconChain { "msg" => "Restoring fork choice from disk", "error" => ?e, ); - match Self::load_fork_choice(self.store.clone())? { - Some(persisted_fork_choice) => { - *fork_choice = persisted_fork_choice; - } - None => { - crit!( - self.log, - "No stored fork choice found to restore from"; - "warning" => "The database is likely corrupt now, consider --purge-db" - ); - } + + // Since the write failed, try to revert the canonical head back to what was stored + // in the database. This attempts to prevent inconsistency between the database and + // fork choice. + if let Err(e) = + self.canonical_head + .restore_from_store(fork_choice, &self.store, &self.spec) + { + crit!( + self.log, + "No stored fork choice found to restore from"; + "error" => ?e, + "warning" => "The database is likely corrupt now, consider --purge-db" + ); + return Err(BlockError::BeaconChainError(e)); } + return Err(e.into()); } drop(txn_lock); @@ -2880,7 +2875,6 @@ impl BeaconChain { let parent_root = block.parent_root(); let slot = block.slot(); - let signed_block = SignedBeaconBlock::from_block(block, block_signature); self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) @@ -3017,7 +3011,7 @@ impl BeaconChain { /// /// The produced block will not be inherently valid, it must be signed by a block producer. /// Block signing is out of the scope of this function and should be done by a separate program. - pub fn produce_block>( + pub async fn produce_block>( self: &Arc, randao_reveal: Signature, slot: Slot, @@ -3029,16 +3023,51 @@ impl BeaconChain { validator_graffiti, ProduceBlockVerification::VerifyRandao, ) + .await } /// Same as `produce_block` but allowing for configuration of RANDAO-verification. - pub fn produce_block_with_verification>( + pub async fn produce_block_with_verification>( self: &Arc, randao_reveal: Signature, slot: Slot, validator_graffiti: Option, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { + // Part 1/2 (blocking) + // + // Load the parent state from disk. + let chain = self.clone(); + let (state, state_root_opt) = self + .task_executor + .spawn_blocking_handle( + move || chain.load_state_for_block_production::(slot), + "produce_partial_beacon_block", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)??; + + // Part 2/2 (async, with some blocking components) + // + // Produce the block upon the state + self.produce_block_on_state::( + state, + state_root_opt, + slot, + randao_reveal, + validator_graffiti, + verification, + ) + .await + } + + /// Load a beacon state from the database for block production. This is a long-running process + /// that should not be performed in an `async` context. + fn load_state_for_block_production>( + self: &Arc, + slot: Slot, + ) -> Result<(BeaconState, Option), BlockProductionError> { metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); @@ -3052,16 +3081,19 @@ impl BeaconChain { // signed. If we miss the cache or we're producing a block that conflicts with the head, // fall back to getting the head from `slot - 1`. let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES); - let head_info = self - .head_info() - .map_err(BlockProductionError::UnableToGetHeadInfo)?; - let (state, state_root_opt) = if head_info.slot < slot { + // Atomically read some values from the head whilst avoiding holding cached head `Arc` any + // longer than necessary. + let (head_slot, head_block_root) = { + let head = self.canonical_head.cached_head(); + (head.head_slot(), head.head_block_root()) + }; + let (state, state_root_opt) = if head_slot < slot { // Normal case: proposing a block atop the current head. Use the snapshot cache. if let Some(pre_state) = self .snapshot_cache .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .and_then(|snapshot_cache| { - snapshot_cache.get_state_for_block_production(head_info.block_root) + snapshot_cache.get_state_for_block_production(head_block_root) }) { (pre_state.pre_state, pre_state.state_root) @@ -3091,16 +3123,10 @@ impl BeaconChain { (state, None) }; + drop(state_load_timer); - self.produce_block_on_state::( - state, - state_root_opt, - slot, - randao_reveal, - validator_graffiti, - verification, - ) + Ok((state, state_root_opt)) } /// Produce a block for some `slot` upon the given `state`. @@ -3115,15 +3141,79 @@ impl BeaconChain { /// The provided `state_root_opt` should only ever be set to `Some` if the contained value is /// equal to the root of `state`. Providing this value will serve as an optimization to avoid /// performing a tree hash in some scenarios. - pub fn produce_block_on_state>( - &self, - mut state: BeaconState, + pub async fn produce_block_on_state>( + self: &Arc, + state: BeaconState, state_root_opt: Option, produce_at_slot: Slot, randao_reveal: Signature, validator_graffiti: Option, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { + // Part 1/3 (blocking) + // + // Perform the state advance and block-packing functions. + let chain = self.clone(); + let mut partial_beacon_block = self + .task_executor + .spawn_blocking_handle( + move || { + chain.produce_partial_beacon_block( + state, + state_root_opt, + produce_at_slot, + randao_reveal, + validator_graffiti, + ) + }, + "produce_partial_beacon_block", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)??; + + // Part 2/3 (async) + // + // Wait for the execution layer to return an execution payload (if one is required). + let prepare_payload_handle = partial_beacon_block.prepare_payload_handle.take(); + let execution_payload = if let Some(prepare_payload_handle) = prepare_payload_handle { + let execution_payload = prepare_payload_handle + .await + .map_err(BlockProductionError::TokioJoin)? + .ok_or(BlockProductionError::ShuttingDown)??; + Some(execution_payload) + } else { + None + }; + + // Part 3/3 (blocking) + // + // Perform the final steps of combining all the parts and computing the state root. + let chain = self.clone(); + self.task_executor + .spawn_blocking_handle( + move || { + chain.complete_partial_beacon_block( + partial_beacon_block, + execution_payload, + verification, + ) + }, + "complete_partial_beacon_block", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)? + } + + fn produce_partial_beacon_block>( + self: &Arc, + mut state: BeaconState, + state_root_opt: Option, + produce_at_slot: Slot, + randao_reveal: Signature, + validator_graffiti: Option, + ) -> Result, BlockProductionError> { let eth1_chain = self .eth1_chain .as_ref() @@ -3154,13 +3244,35 @@ impl BeaconChain { state.latest_block_header().canonical_root() }; + let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; + + let pubkey_opt = state + .validators() + .get(proposer_index as usize) + .map(|v| v.pubkey); + + // If required, start the process of loading an execution payload from the EL early. This + // allows it to run concurrently with things like attestation packing. + let prepare_payload_handle = match &state { + BeaconState::Base(_) | BeaconState::Altair(_) => None, + BeaconState::Merge(_) => { + let finalized_checkpoint = self.canonical_head.cached_head().finalized_checkpoint(); + let prepare_payload_handle = get_execution_payload( + self.clone(), + &state, + finalized_checkpoint, + proposer_index, + pubkey_opt, + )?; + Some(prepare_payload_handle) + } + }; + let (proposer_slashings, attester_slashings, voluntary_exits) = self.op_pool.get_slashings_and_exits(&state, &self.spec); let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; - let deposits = eth1_chain - .deposits_for_block_inclusion(&state, ð1_data, &self.spec)? - .into(); + let deposits = eth1_chain.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?; // Iterate through the naive aggregation pool and ensure all the attestations from there // are included in the operation pool. @@ -3209,21 +3321,16 @@ impl BeaconChain { curr_attestation_filter, &self.spec, ) - .map_err(BlockProductionError::OpPoolError)? - .into(); + .map_err(BlockProductionError::OpPoolError)?; drop(attestation_packing_timer); let slot = state.slot(); let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; - let pubkey_opt = state - .validators() - .get(proposer_index as usize) - .map(|v| v.pubkey); - - // Closure to fetch a sync aggregate in cases where it is required. - let get_sync_aggregate = || -> Result, BlockProductionError> { - Ok(self + let sync_aggregate = if matches!(&state, BeaconState::Base(_)) { + None + } else { + let sync_aggregate = self .op_pool .get_sync_aggregate(&state) .map_err(BlockProductionError::OpPoolError)? @@ -3234,9 +3341,54 @@ impl BeaconChain { "slot" => state.slot(), ); SyncAggregate::new() - })) + }); + Some(sync_aggregate) }; + Ok(PartialBeaconBlock { + state, + slot, + proposer_index, + parent_root, + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + prepare_payload_handle, + }) + } + + fn complete_partial_beacon_block>( + &self, + partial_beacon_block: PartialBeaconBlock, + execution_payload: Option, + verification: ProduceBlockVerification, + ) -> Result, BlockProductionError> { + let PartialBeaconBlock { + mut state, + slot, + proposer_index, + parent_root, + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + // We don't need the prepare payload handle since the `execution_payload` is passed into + // this function. We can assume that the handle has already been consumed in order to + // produce said `execution_payload`. + prepare_payload_handle: _, + } = partial_beacon_block; + let inner_block = match &state { BeaconState::Base(_) => BeaconBlock::Base(BeaconBlockBase { slot, @@ -3249,56 +3401,51 @@ impl BeaconChain { graffiti, proposer_slashings: proposer_slashings.into(), attester_slashings: attester_slashings.into(), - attestations, - deposits, + attestations: attestations.into(), + deposits: deposits.into(), voluntary_exits: voluntary_exits.into(), _phantom: PhantomData, }, }), - BeaconState::Altair(_) => { - let sync_aggregate = get_sync_aggregate()?; - BeaconBlock::Altair(BeaconBlockAltair { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyAltair { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations, - deposits, - voluntary_exits: voluntary_exits.into(), - sync_aggregate, - _phantom: PhantomData, - }, - }) - } - BeaconState::Merge(_) => { - let sync_aggregate = get_sync_aggregate()?; - let execution_payload = - get_execution_payload::(self, &state, proposer_index, pubkey_opt)?; - BeaconBlock::Merge(BeaconBlockMerge { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyMerge { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations, - deposits, - voluntary_exits: voluntary_exits.into(), - sync_aggregate, - execution_payload, - }, - }) - } + BeaconState::Altair(_) => BeaconBlock::Altair(BeaconBlockAltair { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + _phantom: PhantomData, + }, + }), + BeaconState::Merge(_) => BeaconBlock::Merge(BeaconBlockMerge { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: execution_payload + .ok_or(BlockProductionError::MissingExecutionPayload)?, + }, + }), }; let block = SignedBeaconBlock::from_block( @@ -3362,7 +3509,7 @@ impl BeaconChain { /// results in the justified checkpoint being invalidated. /// /// See the documentation of `InvalidationOperation` for information about defining `op`. - pub fn process_invalid_execution_payload( + pub async fn process_invalid_execution_payload( self: &Arc, op: &InvalidationOperation, ) -> Result<(), Error> { @@ -3373,8 +3520,26 @@ impl BeaconChain { "block_root" => ?op.block_root(), ); + // Update the execution status in fork choice. + // + // Use a blocking task since it interacts with the `canonical_head` lock. Lock contention + // on the core executor is bad. + let chain = self.clone(); + let inner_op = op.clone(); + let fork_choice_result = self + .spawn_blocking_handle( + move || { + chain + .canonical_head + .fork_choice_write_lock() + .on_invalid_execution_payload(&inner_op) + }, + "invalid_payload_fork_choice_update", + ) + .await?; + // Update fork choice. - if let Err(e) = self.fork_choice.write().on_invalid_execution_payload(op) { + if let Err(e) = fork_choice_result { crit!( self.log, "Failed to process invalid payload"; @@ -3389,7 +3554,7 @@ impl BeaconChain { // // Don't return early though, since invalidating the justified checkpoint might cause an // error here. - if let Err(e) = self.fork_choice() { + if let Err(e) = self.recompute_head_at_current_slot().await { crit!( self.log, "Failed to run fork choice routine"; @@ -3397,8 +3562,22 @@ impl BeaconChain { ); } - // Atomically obtain the justified root from fork choice. - let justified_block = self.fork_choice.read().get_justified_block()?; + // Obtain the justified root from fork choice. + // + // Use a blocking task since it interacts with the `canonical_head` lock. Lock contention + // on the core executor is bad. + let chain = self.clone(); + let justified_block = self + .spawn_blocking_handle( + move || { + chain + .canonical_head + .fork_choice_read_lock() + .get_justified_block() + }, + "invalid_payload_fork_choice_get_justified", + ) + .await??; if justified_block.execution_status.is_invalid() { crit!( @@ -3430,452 +3609,10 @@ impl BeaconChain { Ok(()) } - /// Execute the fork choice algorithm and enthrone the result as the canonical head. - pub fn fork_choice(self: &Arc) -> Result<(), Error> { - self.fork_choice_at_slot(self.slot()?) - } - - /// Execute fork choice at `slot`, processing queued attestations from `slot - 1` and earlier. - /// - /// The `slot` is not verified in any way, callers should ensure it corresponds to at most - /// one slot ahead of the current wall-clock slot. - pub fn fork_choice_at_slot(self: &Arc, slot: Slot) -> Result<(), Error> { - metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); - let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); - - let result = self.fork_choice_internal(slot); - - if result.is_err() { - metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS); - } - - result - } - - fn fork_choice_internal(self: &Arc, slot: Slot) -> Result<(), Error> { - // Atomically obtain the head block root and the finalized block. - let (beacon_block_root, finalized_block) = { - let mut fork_choice = self.fork_choice.write(); - - // Determine the root of the block that is the head of the chain. - let beacon_block_root = fork_choice.get_head(slot, &self.spec)?; - - (beacon_block_root, fork_choice.get_finalized_block()?) - }; - - let current_head = self.head_info()?; - let old_finalized_checkpoint = current_head.finalized_checkpoint; - - // Exit early if the head hasn't changed. - if beacon_block_root == current_head.block_root { - return Ok(()); - } - - // Check to ensure that this finalized block hasn't been marked as invalid. - if let ExecutionStatus::Invalid(block_hash) = finalized_block.execution_status { - crit!( - self.log, - "Finalized block has an invalid payload"; - "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ - You may be on a hostile network.", - "block_hash" => ?block_hash - ); - let mut shutdown_sender = self.shutdown_sender(); - shutdown_sender - .try_send(ShutdownReason::Failure( - "Finalized block has an invalid execution payload.", - )) - .map_err(BeaconChainError::InvalidFinalizedPayloadShutdownError)?; - - // Exit now, the node is in an invalid state. - return Err(Error::InvalidFinalizedPayload { - finalized_root: finalized_block.root, - execution_block_hash: block_hash, - }); - } - - let lag_timer = metrics::start_timer(&metrics::FORK_CHOICE_SET_HEAD_LAG_TIMES); - - // At this point we know that the new head block is not the same as the previous one - metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); - - // Try and obtain the snapshot for `beacon_block_root` from the snapshot cache, falling - // back to a database read if that fails. - let new_head = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_cloned(beacon_block_root, CloneConfig::committee_caches_only()) - }) - .map::, _>(Ok) - .unwrap_or_else(|| { - let beacon_block = self - .store - .get_full_block(&beacon_block_root)? - .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; - - let beacon_state_root = beacon_block.state_root(); - let beacon_state: BeaconState = self - .get_state(&beacon_state_root, Some(beacon_block.slot()))? - .ok_or(Error::MissingBeaconState(beacon_state_root))?; - - Ok(BeaconSnapshot { - beacon_block, - beacon_block_root, - beacon_state, - }) - }) - .and_then(|mut snapshot| { - // Regardless of where we got the state from, attempt to build the committee - // caches. - snapshot - .beacon_state - .build_all_committee_caches(&self.spec) - .map_err(Into::into) - .map(|()| snapshot) - })?; - - // Attempt to detect if the new head is not on the same chain as the previous block - // (i.e., a re-org). - // - // Note: this will declare a re-org if we skip `SLOTS_PER_HISTORICAL_ROOT` blocks - // between calls to fork choice without swapping between chains. This seems like an - // extreme-enough scenario that a warning is fine. - let is_reorg = new_head - .beacon_state - .get_block_root(current_head.slot) - .map_or(true, |root| *root != current_head.block_root); - - let mut reorg_distance = Slot::new(0); - - if is_reorg { - match self.find_reorg_slot(&new_head.beacon_state, new_head.beacon_block_root) { - Ok(slot) => reorg_distance = current_head.slot.saturating_sub(slot), - Err(e) => { - warn!( - self.log, - "Could not find re-org depth"; - "error" => format!("{:?}", e), - ); - } - } - - metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT); - metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT_INTEROP); - warn!( - self.log, - "Beacon chain re-org"; - "previous_head" => ?current_head.block_root, - "previous_slot" => current_head.slot, - "new_head_parent" => ?new_head.beacon_block.parent_root(), - "new_head" => ?beacon_block_root, - "new_slot" => new_head.beacon_block.slot(), - "reorg_distance" => reorg_distance, - ); - } else { - debug!( - self.log, - "Head beacon block"; - "justified_root" => ?new_head.beacon_state.current_justified_checkpoint().root, - "justified_epoch" => new_head.beacon_state.current_justified_checkpoint().epoch, - "finalized_root" => ?new_head.beacon_state.finalized_checkpoint().root, - "finalized_epoch" => new_head.beacon_state.finalized_checkpoint().epoch, - "root" => ?beacon_block_root, - "slot" => new_head.beacon_block.slot(), - ); - }; - - let new_finalized_checkpoint = new_head.beacon_state.finalized_checkpoint(); - - // It is an error to try to update to a head with a lesser finalized epoch. - if new_finalized_checkpoint.epoch < old_finalized_checkpoint.epoch { - return Err(Error::RevertedFinalizedEpoch { - previous_epoch: old_finalized_checkpoint.epoch, - new_epoch: new_finalized_checkpoint.epoch, - }); - } - - let is_epoch_transition = current_head.slot.epoch(T::EthSpec::slots_per_epoch()) - < new_head - .beacon_state - .slot() - .epoch(T::EthSpec::slots_per_epoch()); - - let update_head_timer = metrics::start_timer(&metrics::UPDATE_HEAD_TIMES); - - // These fields are used for server-sent events. - let state_root = new_head.beacon_state_root(); - let head_slot = new_head.beacon_state.slot(); - let head_proposer_index = new_head.beacon_block.message().proposer_index(); - let proposer_graffiti = new_head - .beacon_block - .message() - .body() - .graffiti() - .as_utf8_lossy(); - - // Find the dependent roots associated with this head before updating the snapshot. This - // is to ensure consistency when sending server sent events later in this method. - let dependent_root = new_head - .beacon_state - .proposer_shuffling_decision_root(self.genesis_block_root); - let prev_dependent_root = new_head - .beacon_state - .attester_shuffling_decision_root(self.genesis_block_root, RelativeEpoch::Current); - - drop(lag_timer); - - // Clear the early attester cache in case it conflicts with `self.canonical_head`. - self.early_attester_cache.clear(); - - // Update the snapshot that stores the head of the chain at the time it received the - // block. - *self - .canonical_head - .try_write_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout)? = new_head; - - // The block has now been set as head so we can record times and delays. - metrics::stop_timer(update_head_timer); - - let block_time_set_as_head = timestamp_now(); - - // Calculate the total delay between the start of the slot and when it was set as head. - let block_delay_total = - get_slot_delay_ms(block_time_set_as_head, head_slot, &self.slot_clock); - - // Do not write to the cache for blocks older than 2 epochs, this helps reduce writes to - // the cache during sync. - if block_delay_total < self.slot_clock.slot_duration() * 64 { - self.block_times_cache.write().set_time_set_as_head( - beacon_block_root, - current_head.slot, - block_time_set_as_head, - ); - } - - // If a block comes in from over 4 slots ago, it is most likely a block from sync. - let block_from_sync = block_delay_total > self.slot_clock.slot_duration() * 4; - - // Determine whether the block has been set as head too late for proper attestation - // production. - let late_head = block_delay_total >= self.slot_clock.unagg_attestation_production_delay(); - - // Do not store metrics if the block was > 4 slots old, this helps prevent noise during - // sync. - if !block_from_sync { - // Observe the total block delay. This is the delay between the time the slot started - // and when the block was set as head. - metrics::observe_duration( - &metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME, - block_delay_total, - ); - - // Observe the delay between when we imported the block and when we set the block as - // head. - let block_delays = self.block_times_cache.read().get_block_delays( - beacon_block_root, - self.slot_clock - .start_of(head_slot) - .unwrap_or_else(|| Duration::from_secs(0)), - ); - - metrics::observe_duration( - &metrics::BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME, - block_delays - .observed - .unwrap_or_else(|| Duration::from_secs(0)), - ); - - metrics::observe_duration( - &metrics::BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME, - block_delays - .set_as_head - .unwrap_or_else(|| Duration::from_secs(0)), - ); - - // If the block was enshrined as head too late for attestations to be created for it, - // log a debug warning and increment a metric. - if late_head { - metrics::inc_counter(&metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_EXCEEDED_TOTAL); - debug!( - self.log, - "Delayed head block"; - "block_root" => ?beacon_block_root, - "proposer_index" => head_proposer_index, - "slot" => head_slot, - "block_delay" => ?block_delay_total, - "observed_delay" => ?block_delays.observed, - "imported_delay" => ?block_delays.imported, - "set_as_head_delay" => ?block_delays.set_as_head, - ); - } - } - - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut snapshot_cache| { - snapshot_cache.update_head(beacon_block_root); - }) - .unwrap_or_else(|| { - error!( - self.log, - "Failed to obtain cache write lock"; - "lock" => "snapshot_cache", - "task" => "update head" - ); - }); - - if is_epoch_transition || is_reorg { - self.persist_head_and_fork_choice()?; - self.op_pool.prune_attestations(self.epoch()?); - } - - if new_finalized_checkpoint.epoch != old_finalized_checkpoint.epoch { - // Due to race conditions, it's technically possible that the head we load here is - // different to the one earlier in this function. - // - // Since the head can't move backwards in terms of finalized epoch, we can only load a - // head with a *later* finalized state. There is no harm in this. - let head = self - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout)?; - - // State root of the finalized state on the epoch boundary, NOT the state - // of the finalized block. We need to use an iterator in case the state is beyond - // the reach of the new head's `state_roots` array. - let new_finalized_slot = head - .beacon_state - .finalized_checkpoint() - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let new_finalized_state_root = process_results( - StateRootsIterator::new(&self.store, &head.beacon_state), - |mut iter| { - iter.find_map(|(state_root, slot)| { - if slot == new_finalized_slot { - Some(state_root) - } else { - None - } - }) - }, - )? - .ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))?; - - self.after_finalization(&head.beacon_state, new_finalized_state_root)?; - } - - // Register a server-sent event if necessary - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_head_subscribers() { - match (dependent_root, prev_dependent_root) { - (Ok(current_duty_dependent_root), Ok(previous_duty_dependent_root)) => { - event_handler.register(EventKind::Head(SseHead { - slot: head_slot, - block: beacon_block_root, - state: state_root, - current_duty_dependent_root, - previous_duty_dependent_root, - epoch_transition: is_epoch_transition, - })); - } - (Err(e), _) | (_, Err(e)) => { - warn!( - self.log, - "Unable to find dependent roots, cannot register head event"; - "error" => ?e - ); - } - } - } - - if is_reorg && event_handler.has_reorg_subscribers() { - event_handler.register(EventKind::ChainReorg(SseChainReorg { - slot: head_slot, - depth: reorg_distance.as_u64(), - old_head_block: current_head.block_root, - old_head_state: current_head.state_root, - new_head_block: beacon_block_root, - new_head_state: state_root, - epoch: head_slot.epoch(T::EthSpec::slots_per_epoch()), - })); - } - - if !block_from_sync && late_head && event_handler.has_late_head_subscribers() { - let peer_info = self - .block_times_cache - .read() - .get_peer_info(beacon_block_root); - let block_delays = self.block_times_cache.read().get_block_delays( - beacon_block_root, - self.slot_clock - .start_of(head_slot) - .unwrap_or_else(|| Duration::from_secs(0)), - ); - event_handler.register(EventKind::LateHead(SseLateHead { - slot: head_slot, - block: beacon_block_root, - peer_id: peer_info.id, - peer_client: peer_info.client, - proposer_index: head_proposer_index, - proposer_graffiti, - block_delay: block_delay_total, - observed_delay: block_delays.observed, - imported_delay: block_delays.imported, - set_as_head_delay: block_delays.set_as_head, - })); - } - } - - // Update the execution layer. - // Always use the wall-clock slot to update the execution engine rather than the `slot` - // passed in. - if let Err(e) = self.update_execution_engine_forkchoice_blocking(self.slot()?) { - crit!( - self.log, - "Failed to update execution head"; - "error" => ?e - ); - } - - // Performing this call immediately after - // `update_execution_engine_forkchoice_blocking` might result in two calls to fork - // choice updated, one *without* payload attributes and then a second *with* - // payload attributes. - // - // This seems OK. It's not a significant waste of EL<>CL bandwidth or resources, as - // far as I know. - if let Err(e) = self.prepare_beacon_proposer_blocking() { - crit!( - self.log, - "Failed to prepare proposers after fork choice"; - "error" => ?e - ); - } - - Ok(()) - } - - pub fn prepare_beacon_proposer_blocking(self: &Arc) -> Result<(), Error> { - let current_slot = self.slot()?; - - // Avoids raising an error before Bellatrix. - // - // See `Self::prepare_beacon_proposer_async` for more detail. - if self.slot_is_prior_to_bellatrix(current_slot + 1) { - return Ok(()); - } - - let execution_layer = self - .execution_layer - .as_ref() - .ok_or(Error::ExecutionLayerMissing)?; - - execution_layer - .block_on_generic(|_| self.prepare_beacon_proposer_async(current_slot)) - .map_err(Error::PrepareProposerBlockingFailed)? + pub fn block_is_known_to_fork_choice(&self, root: &Hash256) -> bool { + self.canonical_head + .fork_choice_read_lock() + .contains_block(root) } /// Determines the beacon proposer for the next slot. If that proposer is registered in the @@ -3890,7 +3627,7 @@ impl BeaconChain { /// 1. We're in the tail-end of the slot (as defined by PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR) /// 2. The head block is one slot (or less) behind the prepare slot (e.g., we're preparing for /// the next slot and the block at the current slot is already known). - pub async fn prepare_beacon_proposer_async( + pub async fn prepare_beacon_proposer( self: &Arc, current_slot: Slot, ) -> Result<(), Error> { @@ -3913,20 +3650,45 @@ impl BeaconChain { return Ok(()); } - let head = self.head_info()?; - let head_epoch = head.slot.epoch(T::EthSpec::slots_per_epoch()); + // Atomically read some values from the canonical head, whilst avoiding holding the cached + // head `Arc` any longer than necessary. + // + // Use a blocking task since blocking the core executor on the canonical head read lock can + // block the core tokio executor. + let chain = self.clone(); + let (head_slot, head_root, head_decision_root, head_random, forkchoice_update_params) = + self.spawn_blocking_handle( + move || { + let cached_head = chain.canonical_head.cached_head(); + let head_block_root = cached_head.head_block_root(); + let decision_root = cached_head + .snapshot + .beacon_state + .proposer_shuffling_decision_root(head_block_root)?; + Ok::<_, Error>(( + cached_head.head_slot(), + head_block_root, + decision_root, + cached_head.head_random()?, + cached_head.forkchoice_update_parameters(), + )) + }, + "prepare_beacon_proposer_fork_choice_read", + ) + .await??; + let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch()); // Don't bother with proposer prep if the head is more than // `PREPARE_PROPOSER_HISTORIC_EPOCHS` prior to the current slot. // // This prevents the routine from running during sync. - if head.slot + T::EthSpec::slots_per_epoch() * PREPARE_PROPOSER_HISTORIC_EPOCHS + if head_slot + T::EthSpec::slots_per_epoch() * PREPARE_PROPOSER_HISTORIC_EPOCHS < current_slot { debug!( self.log, "Head too old for proposer prep"; - "head_slot" => head.slot, + "head_slot" => head_slot, "current_slot" => current_slot, ); return Ok(()); @@ -3935,9 +3697,9 @@ impl BeaconChain { // Ensure that the shuffling decision root is correct relative to the epoch we wish to // query. let shuffling_decision_root = if head_epoch == prepare_epoch { - head.proposer_shuffling_decision_root + head_decision_root } else { - head.block_root + head_root }; // Read the proposer from the proposer cache. @@ -3967,7 +3729,7 @@ impl BeaconChain { return Ok(()); } - let (proposers, decision_root, fork) = + let (proposers, decision_root, _, fork) = compute_proposer_duties_from_head(prepare_epoch, self)?; let proposer_index = prepare_slot.as_usize() % (T::EthSpec::slots_per_epoch() as usize); @@ -4013,7 +3775,7 @@ impl BeaconChain { .start_of(prepare_slot) .ok_or(Error::InvalidSlot(prepare_slot))? .as_secs(), - prev_randao: head.random, + prev_randao: head_random, suggested_fee_recipient: execution_layer .get_suggested_fee_recipient(proposer as u64) .await, @@ -4023,18 +3785,13 @@ impl BeaconChain { self.log, "Preparing beacon proposer"; "payload_attributes" => ?payload_attributes, - "head_root" => ?head.block_root, + "head_root" => ?head_root, "prepare_slot" => prepare_slot, "validator" => proposer, ); let already_known = execution_layer - .insert_proposer( - prepare_slot, - head.block_root, - proposer as u64, - payload_attributes, - ) + .insert_proposer(prepare_slot, head_root, proposer as u64, payload_attributes) .await; // Only push a log to the user if this is the first time we've seen this proposer for this // slot. @@ -4076,7 +3833,7 @@ impl BeaconChain { // known). if till_prepare_slot <= self.slot_clock.slot_duration() / PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR - || head.slot + 1 >= prepare_slot + || head_slot + 1 >= prepare_slot { debug!( self.log, @@ -4085,37 +3842,17 @@ impl BeaconChain { "prepare_slot" => prepare_slot ); - self.update_execution_engine_forkchoice_async(current_slot) + self.update_execution_engine_forkchoice(current_slot, forkchoice_update_params) .await?; } Ok(()) } - pub fn update_execution_engine_forkchoice_blocking( - self: &Arc, - current_slot: Slot, - ) -> Result<(), Error> { - // Avoids raising an error before Bellatrix. - // - // See `Self::update_execution_engine_forkchoice_async` for more detail. - if self.slot_is_prior_to_bellatrix(current_slot + 1) { - return Ok(()); - } - - let execution_layer = self - .execution_layer - .as_ref() - .ok_or(Error::ExecutionLayerMissing)?; - - execution_layer - .block_on_generic(|_| self.update_execution_engine_forkchoice_async(current_slot)) - .map_err(Error::ForkchoiceUpdate)? - } - - pub async fn update_execution_engine_forkchoice_async( + pub async fn update_execution_engine_forkchoice( self: &Arc, current_slot: Slot, + params: ForkchoiceUpdateParameters, ) -> Result<(), Error> { let next_slot = current_slot + 1; @@ -4153,73 +3890,56 @@ impl BeaconChain { // `execution_engine_forkchoice_lock` apart from the one here. let forkchoice_lock = execution_layer.execution_engine_forkchoice_lock().await; - // Deadlock warning: - // - // We are taking the `self.fork_choice` lock whilst holding the `forkchoice_lock`. This - // is intentional, since it allows us to ensure a consistent ordering of messages to the - // execution layer. - let forkchoice_update_parameters = - self.fork_choice.read().get_forkchoice_update_parameters(); - let (head_block_root, head_hash, finalized_hash) = if let Some(params) = - forkchoice_update_parameters + let (head_block_root, head_hash, finalized_hash) = if let Some(head_hash) = params.head_hash { - if let Some(head_hash) = params.head_hash { - ( - params.head_root, - head_hash, - params - .finalized_hash - .unwrap_or_else(ExecutionBlockHash::zero), - ) - } else { - // The head block does not have an execution block hash. We must check to see if we - // happen to be the proposer of the transition block, in which case we still need to - // send forkchoice_updated. - match self.spec.fork_name_at_slot::(next_slot) { - // We are pre-bellatrix; no need to update the EL. - ForkName::Base | ForkName::Altair => return Ok(()), - _ => { - // We are post-bellatrix - if execution_layer - .payload_attributes(next_slot, params.head_root) + ( + params.head_root, + head_hash, + params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), + ) + } else { + // The head block does not have an execution block hash. We must check to see if we + // happen to be the proposer of the transition block, in which case we still need to + // send forkchoice_updated. + match self.spec.fork_name_at_slot::(next_slot) { + // We are pre-bellatrix; no need to update the EL. + ForkName::Base | ForkName::Altair => return Ok(()), + _ => { + // We are post-bellatrix + if execution_layer + .payload_attributes(next_slot, params.head_root) + .await + .is_some() + { + // We are a proposer, check for terminal_pow_block_hash + if let Some(terminal_pow_block_hash) = execution_layer + .get_terminal_pow_block_hash(&self.spec) .await - .is_some() + .map_err(Error::ForkchoiceUpdate)? { - // We are a proposer, check for terminal_pow_block_hash - if let Some(terminal_pow_block_hash) = execution_layer - .get_terminal_pow_block_hash(&self.spec) - .await - .map_err(Error::ForkchoiceUpdate)? - { - info!( - self.log, - "Prepared POS transition block proposer"; "slot" => next_slot - ); - ( - params.head_root, - terminal_pow_block_hash, - params - .finalized_hash - .unwrap_or_else(ExecutionBlockHash::zero), - ) - } else { - // TTD hasn't been reached yet, no need to update the EL. - return Ok(()); - } + info!( + self.log, + "Prepared POS transition block proposer"; "slot" => next_slot + ); + ( + params.head_root, + terminal_pow_block_hash, + params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), + ) } else { - // We are not a proposer, no need to update the EL. + // TTD hasn't been reached yet, no need to update the EL. return Ok(()); } + } else { + // We are not a proposer, no need to update the EL. + return Ok(()); } } } - } else { - warn!( - self.log, - "Missing forkchoice params"; - "msg" => "please report this non-critical bug" - ); - return Ok(()); }; let forkchoice_updated_response = execution_layer @@ -4235,11 +3955,19 @@ impl BeaconChain { Ok(status) => match status { PayloadStatus::Valid => { // Ensure that fork choice knows that the block is no longer optimistic. - if let Err(e) = self - .fork_choice - .write() - .on_valid_execution_payload(head_block_root) - { + let chain = self.clone(); + let fork_choice_update_result = self + .spawn_blocking_handle( + move || { + chain + .canonical_head + .fork_choice_write_lock() + .on_valid_execution_payload(head_block_root) + }, + "update_execution_engine_valid_payload", + ) + .await?; + if let Err(e) = fork_choice_update_result { error!( self.log, "Failed to validate payload"; @@ -4275,24 +4003,14 @@ impl BeaconChain { ); // The execution engine has stated that all blocks between the // `head_execution_block_hash` and `latest_valid_hash` are invalid. - let chain = self.clone(); - execution_layer - .executor() - .spawn_blocking_handle( - move || { - chain.process_invalid_execution_payload( - &InvalidationOperation::InvalidateMany { - head_block_root, - always_invalidate_head: true, - latest_valid_ancestor: latest_valid_hash, - }, - ) - }, - "process_invalid_execution_payload_many", - ) - .ok_or(BeaconChainError::RuntimeShutdown)? - .await - .map_err(BeaconChainError::ProcessInvalidExecutionPayload)??; + self.process_invalid_execution_payload( + &InvalidationOperation::InvalidateMany { + head_block_root, + always_invalidate_head: true, + latest_valid_ancestor: latest_valid_hash, + }, + ) + .await?; Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } @@ -4308,22 +4026,10 @@ impl BeaconChain { // // Using a `None` latest valid ancestor will result in only the head block // being invalidated (no ancestors). - let chain = self.clone(); - execution_layer - .executor() - .spawn_blocking_handle( - move || { - chain.process_invalid_execution_payload( - &InvalidationOperation::InvalidateOne { - block_root: head_block_root, - }, - ) - }, - "process_invalid_execution_payload_single", - ) - .ok_or(BeaconChainError::RuntimeShutdown)? - .await - .map_err(BeaconChainError::ProcessInvalidExecutionPayload)??; + self.process_invalid_execution_payload(&InvalidationOperation::InvalidateOne { + block_root: head_block_root, + }) + .await?; Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } @@ -4333,30 +4039,85 @@ impl BeaconChain { } /// Returns `true` if the given slot is prior to the `bellatrix_fork_epoch`. - fn slot_is_prior_to_bellatrix(&self, slot: Slot) -> bool { + pub fn slot_is_prior_to_bellatrix(&self, slot: Slot) -> bool { self.spec.bellatrix_fork_epoch.map_or(true, |bellatrix| { slot.epoch(T::EthSpec::slots_per_epoch()) < bellatrix }) } - /// Returns the status of the current head block, regarding the validity of the execution - /// payload. - pub fn head_safety_status(&self) -> Result { - let head = self.head_info()?; - let head_block = self - .fork_choice - .read() - .get_block(&head.block_root) - .ok_or(BeaconChainError::HeadMissingFromForkChoice(head.block_root))?; - - let status = match head_block.execution_status { - ExecutionStatus::Valid(block_hash) => HeadSafetyStatus::Safe(Some(block_hash)), - ExecutionStatus::Invalid(block_hash) => HeadSafetyStatus::Invalid(block_hash), - ExecutionStatus::Optimistic(block_hash) => HeadSafetyStatus::Unsafe(block_hash), - ExecutionStatus::Irrelevant(_) => HeadSafetyStatus::Safe(None), - }; + /// Returns the value of `execution_optimistic` for `block`. + /// + /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. + /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic`. + pub fn is_optimistic_block( + &self, + block: &SignedBeaconBlock, + ) -> Result { + // Check if the block is pre-Bellatrix. + if self.slot_is_prior_to_bellatrix(block.slot()) { + Ok(false) + } else { + self.canonical_head + .fork_choice_read_lock() + .is_optimistic_block(&block.canonical_root()) + .map_err(BeaconChainError::ForkChoiceError) + } + } - Ok(status) + /// Returns the value of `execution_optimistic` for `head_block`. + /// + /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. + /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic`. + /// + /// This function will return an error if `head_block` is not present in the fork choice store + /// and so should only be used on the head block or when the block *should* be present in the + /// fork choice store. + /// + /// There is a potential race condition when syncing where the block_root of `head_block` could + /// be pruned from the fork choice store before being read. + pub fn is_optimistic_head_block( + &self, + head_block: &SignedBeaconBlock, + ) -> Result { + // Check if the block is pre-Bellatrix. + if self.slot_is_prior_to_bellatrix(head_block.slot()) { + Ok(false) + } else { + self.canonical_head + .fork_choice_read_lock() + .is_optimistic_block_no_fallback(&head_block.canonical_root()) + .map_err(BeaconChainError::ForkChoiceError) + } + } + + /// Returns the value of `execution_optimistic` for the current head block. + /// You can optionally provide `head_info` if it was computed previously. + /// + /// Returns `Ok(false)` if the head block is pre-Bellatrix, or has `ExecutionStatus::Valid`. + /// Returns `Ok(true)` if the head block has `ExecutionStatus::Optimistic`. + /// + /// There is a potential race condition when syncing where the block root of `head_info` could + /// be pruned from the fork choice store before being read. + pub fn is_optimistic_head(&self) -> Result { + self.canonical_head + .head_execution_status() + .map(|status| status.is_optimistic()) + } + + pub fn is_optimistic_block_root( + &self, + block_slot: Slot, + block_root: &Hash256, + ) -> Result { + // Check if the block is pre-Bellatrix. + if self.slot_is_prior_to_bellatrix(block_slot) { + Ok(false) + } else { + self.canonical_head + .fork_choice_read_lock() + .is_optimistic_block_no_fallback(block_root) + .map_err(BeaconChainError::ForkChoiceError) + } } /// This function takes a configured weak subjectivity `Checkpoint` and the latest finalized `Checkpoint`. @@ -4418,7 +4179,7 @@ impl BeaconChain { /// Note: this function **MUST** be called from a non-async context since /// it contains a call to `fork_choice` which may eventually call /// `tokio::runtime::block_on` in certain cases. - pub fn per_slot_task(self: &Arc) { + pub async fn per_slot_task(self: &Arc) { trace!(self.log, "Running beacon chain per slot tasks"); if let Some(slot) = self.slot_clock.now() { // Always run the light-weight pruning tasks (these structures should be empty during @@ -4427,14 +4188,12 @@ impl BeaconChain { self.block_times_cache.write().prune(slot); // Don't run heavy-weight tasks during sync. - if self.best_slot().map_or(true, |head_slot| { - head_slot + MAX_PER_SLOT_FORK_CHOICE_DISTANCE < slot - }) { + if self.best_slot() + MAX_PER_SLOT_FORK_CHOICE_DISTANCE < slot { return; } // Run fork choice and signal to any waiting task that it has completed. - if let Err(e) = self.fork_choice() { + if let Err(e) = self.recompute_head_at_current_slot().await { error!( self.log, "Fork choice error at slot start"; @@ -4445,78 +4204,26 @@ impl BeaconChain { // Send the notification regardless of fork choice success, this is a "best effort" // notification and we don't want block production to hit the timeout in case of error. - if let Some(tx) = &self.fork_choice_signal_tx { - if let Err(e) = tx.notify_fork_choice_complete(slot) { - warn!( - self.log, - "Error signalling fork choice waiter"; - "error" => ?e, - "slot" => slot, - ); - } - } - } - } - - /// Called after `self` has had a new block finalized. - /// - /// Performs pruning and finality-based optimizations. - fn after_finalization( - &self, - head_state: &BeaconState, - new_finalized_state_root: Hash256, - ) -> Result<(), Error> { - self.fork_choice.write().prune()?; - let new_finalized_checkpoint = head_state.finalized_checkpoint(); - - self.observed_block_producers.write().prune( - new_finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - ); - - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut snapshot_cache| { - snapshot_cache.prune(new_finalized_checkpoint.epoch); - debug!( - self.log, - "Snapshot cache pruned"; - "new_len" => snapshot_cache.len(), - "remaining_roots" => ?snapshot_cache.beacon_block_roots(), - ); - }) - .unwrap_or_else(|| { - error!( - self.log, - "Failed to obtain cache write lock"; - "lock" => "snapshot_cache", - "task" => "prune" - ); - }); - - self.op_pool.prune_all(head_state, self.epoch()?); - - self.store_migrator.process_finalization( - new_finalized_state_root.into(), - new_finalized_checkpoint, - self.head_tracker.clone(), - )?; - - self.attester_cache - .prune_below(new_finalized_checkpoint.epoch); - - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_finalized_subscribers() { - event_handler.register(EventKind::FinalizedCheckpoint(SseFinalizedCheckpoint { - epoch: new_finalized_checkpoint.epoch, - block: new_finalized_checkpoint.root, - state: new_finalized_state_root, - })); - } + // Use a blocking task to avoid blocking the core executor whilst waiting for locks + // in `ForkChoiceSignalTx`. + let chain = self.clone(); + self.task_executor.clone().spawn_blocking( + move || { + // Signal block proposal for the next slot (if it happens to be waiting). + if let Some(tx) = &chain.fork_choice_signal_tx { + if let Err(e) = tx.notify_fork_choice_complete(slot) { + warn!( + chain.log, + "Error signalling fork choice waiter"; + "error" => ?e, + "slot" => slot, + ); + } + } + }, + "per_slot_task_fc_signal_tx", + ); } - - Ok(()) } /// Runs the `map_fn` with the committee cache for `shuffling_epoch` from the chain with head @@ -4557,8 +4264,8 @@ impl BeaconChain { F: Fn(&CommitteeCache, Hash256) -> Result, { let head_block = self - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&head_block_root) .ok_or(Error::MissingBeaconBlock(head_block_root))?; @@ -4703,10 +4410,13 @@ impl BeaconChain { ) -> Result>>, Error> { let mut dump = vec![]; - let mut last_slot = BeaconSnapshot { - beacon_block: self.head()?.beacon_block.into(), - beacon_block_root: self.head()?.beacon_block_root, - beacon_state: self.head()?.beacon_state, + let mut last_slot = { + let head = self.canonical_head.cached_head(); + BeaconSnapshot { + beacon_block: Arc::new(head.snapshot.beacon_block.clone_as_blinded()), + beacon_block_root: head.snapshot.beacon_block_root, + beacon_state: head.snapshot.beacon_state.clone(), + } }; dump.push(last_slot.clone()); @@ -4733,7 +4443,7 @@ impl BeaconChain { })?; let slot = BeaconSnapshot { - beacon_block, + beacon_block: Arc::new(beacon_block), beacon_block_root, beacon_state, }; @@ -4771,12 +4481,7 @@ impl BeaconChain { } pub fn dump_as_dot(&self, output: &mut W) { - let canonical_head_hash = self - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout) - .unwrap() - .beacon_block_root; + let canonical_head_hash = self.canonical_head.cached_head().head_block_root(); let mut visited: HashSet = HashSet::new(); let mut finalized_blocks: HashSet = HashSet::new(); let mut justified_blocks: HashSet = HashSet::new(); diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index dc80fb7008f..c7663c77c44 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -13,8 +13,8 @@ use std::sync::Arc; use store::{Error as StoreError, HotColdDB, ItemStore}; use superstruct::superstruct; use types::{ - BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, ExecPayload, Hash256, - Slot, + BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, ExecPayload, + Hash256, Slot, }; #[derive(Debug)] @@ -257,7 +257,7 @@ where fn on_verified_block>( &mut self, - _block: &BeaconBlock, + _block: BeaconBlockRef, block_root: Hash256, state: &BeaconState, ) -> Result<(), Self::Error> { diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index d645201a580..e76a5a80588 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -9,12 +9,14 @@ //! values it stores are very small, so this should not be an issue. use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use fork_choice::ExecutionStatus; use lru::LruCache; use smallvec::SmallVec; use state_processing::state_advance::partial_state_advance; use std::cmp::Ordering; use types::{ - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot, Unsigned, + BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Fork, Hash256, Slot, + Unsigned, }; /// The number of sets of proposer indices that should be cached. @@ -135,11 +137,26 @@ impl BeaconProposerCache { pub fn compute_proposer_duties_from_head( current_epoch: Epoch, chain: &BeaconChain, -) -> Result<(Vec, Hash256, Fork), BeaconChainError> { - // Take a copy of the head of the chain. - let head = chain.head()?; - let mut state = head.beacon_state; - let head_state_root = head.beacon_block.state_root(); +) -> Result<(Vec, Hash256, ExecutionStatus, Fork), BeaconChainError> { + // Atomically collect information about the head whilst holding the canonical head `Arc` as + // short as possible. + let (mut state, head_state_root, head_block_root) = { + let head = chain.canonical_head.cached_head(); + // Take a copy of the head state. + let head_state = head + .snapshot + .beacon_state + .clone_with(CloneConfig::committee_caches_only()); + let head_state_root = head.head_state_root(); + let head_block_root = head.head_block_root(); + (head_state, head_state_root, head_block_root) + }; + + let execution_status = chain + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(BeaconChainError::HeadMissingFromForkChoice(head_block_root))?; // Advance the state into the requested epoch. ensure_state_is_in_epoch(&mut state, head_state_root, current_epoch, &chain.spec)?; @@ -153,7 +170,7 @@ pub fn compute_proposer_duties_from_head( .proposer_shuffling_decision_root(chain.genesis_block_root) .map_err(BeaconChainError::from)?; - Ok((indices, dependent_root, state.fork())) + Ok((indices, dependent_root, execution_status, state.fork())) } /// If required, advance `state` to `target_epoch`. diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index 94adb479c84..8491622cb09 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -1,4 +1,5 @@ use serde_derive::Serialize; +use std::sync::Arc; use types::{ beacon_state::CloneConfig, BeaconState, EthSpec, ExecPayload, FullPayload, Hash256, SignedBeaconBlock, @@ -8,7 +9,7 @@ use types::{ /// head, justified head and finalized head. #[derive(Clone, Serialize, PartialEq, Debug)] pub struct BeaconSnapshot = FullPayload> { - pub beacon_block: SignedBeaconBlock, + pub beacon_block: Arc>, pub beacon_block_root: Hash256, pub beacon_state: BeaconState, } @@ -16,7 +17,7 @@ pub struct BeaconSnapshot = FullPayload> impl> BeaconSnapshot { /// Create a new checkpoint. pub fn new( - beacon_block: SignedBeaconBlock, + beacon_block: Arc>, beacon_block_root: Hash256, beacon_state: BeaconState, ) -> Self { @@ -39,7 +40,7 @@ impl> BeaconSnapshot { /// Update all fields of the checkpoint. pub fn update( &mut self, - beacon_block: SignedBeaconBlock, + beacon_block: Arc>, beacon_block_root: Hash256, beacon_state: BeaconState, ) { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index a6cd98c253c..a64fb387e31 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -31,24 +31,27 @@ //! |--------------- //! | //! â–¼ -//! SignatureVerifiedBlock +//! SignatureVerifiedBlock //! | //! â–¼ -//! FullyVerifiedBlock +//! ExecutionPendingBlock +//! | +//! await //! | //! â–¼ //! END //! //! ``` use crate::execution_payload::{ - notify_new_payload, validate_execution_payload_for_gossip, validate_merge_block, + is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, + PayloadNotifier, }; use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ beacon_chain::{ - BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + BeaconForkChoice, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, @@ -56,11 +59,11 @@ use crate::{ use derivative::Derivative; use eth2::types::EventKind; use execution_layer::PayloadStatus; -use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus}; +use fork_choice::PayloadVerificationStatus; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; -use slog::{debug, error, info, Logger}; +use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; use state_processing::per_block_processing::is_merge_transition_block; @@ -75,16 +78,16 @@ use std::fs; use std::io::Write; use std::sync::Arc; use std::time::Duration; -use store::{Error as DBError, HotColdDB, HotStateSummary, KeyValueStore, StoreOp}; +use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; +use task_executor::JoinHandle; use tree_hash::TreeHash; -use types::ExecPayload; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch, EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; -const POS_PANDA_BANNER: &str = r#" +pub const POS_PANDA_BANNER: &str = r#" ,,, ,,, ,,, ,,, ;" ^; ;' ", ;" ^; ;' ", ; s$$$$$$$s ; ; s$$$$$$$s ; @@ -129,7 +132,7 @@ pub enum BlockError { /// /// It's unclear if this block is valid, but it cannot be processed without already knowing /// its parent. - ParentUnknown(Box>), + ParentUnknown(Arc>), /// The block skips too many slots and is a DoS risk. TooManySkippedSlots { parent_slot: Slot, block_slot: Slot }, /// The block slot is greater than the present slot. @@ -419,6 +422,12 @@ impl From for BlockError { } } +/// Stores information about verifying a payload against an execution engine. +pub struct PayloadVerificationOutcome { + pub payload_verification_status: PayloadVerificationStatus, + pub is_valid_merge_transition_block: bool, +} + /// Information about invalid blocks which might still be slashable despite being invalid. #[allow(clippy::enum_variant_names)] pub enum BlockSlashInfo { @@ -474,7 +483,7 @@ fn process_block_slash_info( /// Verify all signatures (except deposit signatures) on all blocks in the `chain_segment`. If all /// signatures are valid, the `chain_segment` is mapped to a `Vec` that can -/// later be transformed into a `FullyVerifiedBlock` without re-checking the signatures. If any +/// later be transformed into a `ExecutionPendingBlock` without re-checking the signatures. If any /// signature in the block is invalid, an `Err` is returned (it is not possible to known _which_ /// signature was invalid). /// @@ -483,7 +492,7 @@ fn process_block_slash_info( /// The given `chain_segment` must span no more than two epochs, otherwise an error will be /// returned. pub fn signature_verify_chain_segment( - mut chain_segment: Vec<(Hash256, SignedBeaconBlock)>, + mut chain_segment: Vec<(Hash256, Arc>)>, chain: &BeaconChain, ) -> Result>, BlockError> { if chain_segment.is_empty() { @@ -541,7 +550,7 @@ pub fn signature_verify_chain_segment( #[derive(Derivative)] #[derivative(Debug(bound = "T: BeaconChainTypes"))] pub struct GossipVerifiedBlock { - pub block: SignedBeaconBlock, + pub block: Arc>, pub block_root: Hash256, parent: Option>, } @@ -549,11 +558,15 @@ pub struct GossipVerifiedBlock { /// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit /// signatures) have been verified. pub struct SignatureVerifiedBlock { - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, parent: Option>, } +/// Used to await the result of executing payload with a remote EE. +type PayloadVerificationHandle = + JoinHandle>>>; + /// A wrapper around a `SignedBeaconBlock` that indicates that this block is fully verified and /// ready to import into the `BeaconChain`. The validation includes: /// @@ -562,42 +575,42 @@ pub struct SignatureVerifiedBlock { /// - State root check /// - Per block processing /// -/// Note: a `FullyVerifiedBlock` is not _forever_ valid to be imported, it may later become invalid -/// due to finality or some other event. A `FullyVerifiedBlock` should be imported into the +/// Note: a `ExecutionPendingBlock` is not _forever_ valid to be imported, it may later become invalid +/// due to finality or some other event. A `ExecutionPendingBlock` should be imported into the /// `BeaconChain` immediately after it is instantiated. -pub struct FullyVerifiedBlock<'a, T: BeaconChainTypes> { - pub block: SignedBeaconBlock, +pub struct ExecutionPendingBlock { + pub block: Arc>, pub block_root: Hash256, pub state: BeaconState, pub parent_block: SignedBeaconBlock>, - pub confirmation_db_batch: Vec>, - pub payload_verification_status: PayloadVerificationStatus, + pub confirmed_state_roots: Vec, + pub payload_verification_handle: PayloadVerificationHandle, } -/// Implemented on types that can be converted into a `FullyVerifiedBlock`. +/// Implemented on types that can be converted into a `ExecutionPendingBlock`. /// /// Used to allow functions to accept blocks at various stages of verification. -pub trait IntoFullyVerifiedBlock: Sized { - fn into_fully_verified_block( +pub trait IntoExecutionPendingBlock: Sized { + fn into_execution_pending_block( self, chain: &Arc>, - ) -> Result, BlockError> { - self.into_fully_verified_block_slashable(chain) - .map(|fully_verified| { + ) -> Result, BlockError> { + self.into_execution_pending_block_slashable(chain) + .map(|execution_pending| { // Supply valid block to slasher. if let Some(slasher) = chain.slasher.as_ref() { - slasher.accept_block_header(fully_verified.block.signed_block_header()); + slasher.accept_block_header(execution_pending.block.signed_block_header()); } - fully_verified + execution_pending }) .map_err(|slash_info| process_block_slash_info(chain, slash_info)) } /// Convert the block to fully-verified form while producing data to aid checking slashability. - fn into_fully_verified_block_slashable( + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>>; + ) -> Result, BlockSlashInfo>>; fn block(&self) -> &SignedBeaconBlock; } @@ -608,7 +621,7 @@ impl GossipVerifiedBlock { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn new( - block: SignedBeaconBlock, + block: Arc>, chain: &BeaconChain, ) -> Result> { // If the block is valid for gossip we don't supply it to the slasher here because @@ -623,7 +636,7 @@ impl GossipVerifiedBlock { /// As for new, but doesn't pass the block to the slasher. fn new_without_slasher_checks( - block: SignedBeaconBlock, + block: Arc>, chain: &BeaconChain, ) -> Result> { // Ensure the block is the correct structure for the fork at `block.slot()`. @@ -658,7 +671,11 @@ impl GossipVerifiedBlock { // reboot if the `observed_block_producers` cache is empty. In that case, without this // check, we will load the parent and state from disk only to find out later that we // already know this block. - if chain.fork_choice.read().contains_block(&block_root) { + if chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { return Err(BlockError::BlockIsAlreadyKnown); } @@ -678,10 +695,10 @@ impl GossipVerifiedBlock { // Do not process a block that doesn't descend from the finalized root. // // We check this *before* we load the parent so that we can return a more detailed error. - let block = check_block_is_finalized_descendant::( - block, - &chain.fork_choice.read(), - &chain.store, + check_block_is_finalized_descendant( + chain, + &chain.canonical_head.fork_choice_write_lock(), + &block, )?; let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); @@ -827,15 +844,15 @@ impl GossipVerifiedBlock { } } -impl IntoFullyVerifiedBlock for GossipVerifiedBlock { +impl IntoExecutionPendingBlock for GossipVerifiedBlock { /// Completes verification of the wrapped `block`. - fn into_fully_verified_block_slashable( + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>> { - let fully_verified = + ) -> Result, BlockSlashInfo>> { + let execution_pending = SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?; - fully_verified.into_fully_verified_block_slashable(chain) + execution_pending.into_execution_pending_block_slashable(chain) } fn block(&self) -> &SignedBeaconBlock { @@ -849,7 +866,7 @@ impl SignatureVerifiedBlock { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn new( - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, chain: &BeaconChain, ) -> Result> { @@ -892,7 +909,7 @@ impl SignatureVerifiedBlock { /// As for `new` above but producing `BlockSlashInfo`. pub fn check_slashable( - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, chain: &BeaconChain, ) -> Result>> { @@ -947,12 +964,12 @@ impl SignatureVerifiedBlock { } } -impl IntoFullyVerifiedBlock for SignatureVerifiedBlock { +impl IntoExecutionPendingBlock for SignatureVerifiedBlock { /// Completes verification of the wrapped `block`. - fn into_fully_verified_block_slashable( + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>> { + ) -> Result, BlockSlashInfo>> { let header = self.block.signed_block_header(); let (parent, block) = if let Some(parent) = self.parent { (parent, self.block) @@ -961,7 +978,7 @@ impl IntoFullyVerifiedBlock for SignatureVerifiedBlock IntoFullyVerifiedBlock for SignatureVerifiedBlock IntoFullyVerifiedBlock for SignedBeaconBlock { +impl IntoExecutionPendingBlock for Arc> { /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` - /// and then using that implementation of `IntoFullyVerifiedBlock` to complete verification. - fn into_fully_verified_block_slashable( + /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>> { + ) -> Result, BlockSlashInfo>> { // Perform an early check to prevent wasting time on irrelevant blocks. let block_root = check_block_relevancy(&self, None, chain) .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; SignatureVerifiedBlock::check_slashable(self, block_root, chain)? - .into_fully_verified_block_slashable(chain) + .into_execution_pending_block_slashable(chain) } fn block(&self) -> &SignedBeaconBlock { @@ -995,7 +1012,7 @@ impl IntoFullyVerifiedBlock for SignedBeaconBlock FullyVerifiedBlock<'a, T> { +impl ExecutionPendingBlock { /// Instantiates `Self`, a wrapper that indicates that the given `block` is fully valid. See /// the struct-level documentation for more information. /// @@ -1004,12 +1021,16 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn from_signature_verified_components( - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, parent: PreProcessingSnapshot, chain: &Arc>, ) -> Result> { - if let Some(parent) = chain.fork_choice.read().get_block(&block.parent_root()) { + if let Some(parent) = chain + .canonical_head + .fork_choice_read_lock() + .get_block(&block.parent_root()) + { // Reject any block where the parent has an invalid payload. It's impossible for a valid // block to descend from an invalid parent. if parent.execution_status.is_invalid() { @@ -1028,7 +1049,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // because it will revert finalization. Note that the finalized block is stored in fork // choice, so we will not reject any child of the finalized block (this is relevant during // genesis). - return Err(BlockError::ParentUnknown(Box::new(block))); + return Err(BlockError::ParentUnknown(block)); } // Reject any block that exceeds our limit on skipped slots. @@ -1048,7 +1069,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // Stage a batch of operations to be completed atomically if this block is imported // successfully. - let mut confirmation_db_batch = vec![]; + let mut confirmed_state_roots = vec![]; // The block must have a higher slot than its parent. if block.slot() <= parent.beacon_block.slot() { @@ -1121,7 +1142,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { chain.store.do_atomically(state_batch)?; drop(txn_lock); - confirmation_db_batch.push(StoreOp::DeleteStateTemporaryFlag(state_root)); + confirmed_state_roots.push(state_root); state_root }; @@ -1140,59 +1161,82 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { } } - // If this block triggers the merge, check to ensure that it references valid execution - // blocks. - // - // The specification defines this check inside `on_block` in the fork-choice specification, - // however we perform the check here for two reasons: - // - // - There's no point in importing a block that will fail fork choice, so it's best to fail - // early. - // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no - // calls to remote servers. - let valid_merge_transition_block = - if is_merge_transition_block(&state, block.message().body()) { - validate_merge_block(chain, block.message())?; - true - } else { - false + let block_slot = block.slot(); + let state_current_epoch = state.current_epoch(); + + // Define a future that will verify the execution payload with an execution engine (but + // don't execute it yet). + let payload_notifier = PayloadNotifier::new(chain.clone(), block.clone(), &state)?; + let is_valid_merge_transition_block = + is_merge_transition_block(&state, block.message().body()); + let payload_verification_future = async move { + let chain = payload_notifier.chain.clone(); + let block = payload_notifier.block.clone(); + + // If this block triggers the merge, check to ensure that it references valid execution + // blocks. + // + // The specification defines this check inside `on_block` in the fork-choice specification, + // however we perform the check here for two reasons: + // + // - There's no point in importing a block that will fail fork choice, so it's best to fail + // early. + // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no + // calls to remote servers. + if is_valid_merge_transition_block { + validate_merge_block(&chain, block.message()).await?; }; - // The specification declares that this should be run *inside* `per_block_processing`, - // however we run it here to keep `per_block_processing` pure (i.e., no calls to external - // servers). - // - // It is important that this function is called *after* `per_slot_processing`, since the - // `randao` may change. - let payload_verification_status = notify_new_payload(chain, &state, block.message())?; - - // If the payload did not validate or invalidate the block, check to see if this block is - // valid for optimistic import. - if payload_verification_status.is_optimistic() { - let current_slot = chain - .slot_clock - .now() - .ok_or(BeaconChainError::UnableToReadSlot)?; - - if !chain - .fork_choice - .read() - .is_optimistic_candidate_block( - current_slot, - block.slot(), - &block.parent_root(), - &chain.spec, - ) - .map_err(BeaconChainError::from)? - { - return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); + // The specification declares that this should be run *inside* `per_block_processing`, + // however we run it here to keep `per_block_processing` pure (i.e., no calls to external + // servers). + // + // It is important that this function is called *after* `per_slot_processing`, since the + // `randao` may change. + let payload_verification_status = payload_notifier.notify_new_payload().await?; + + // If the payload did not validate or invalidate the block, check to see if this block is + // valid for optimistic import. + if payload_verification_status.is_optimistic() { + let block_hash_opt = block + .message() + .body() + .execution_payload() + .map(|full_payload| full_payload.execution_payload.block_hash); + + // Ensure the block is a candidate for optimistic import. + if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? + { + warn!( + chain.log, + "Rejecting optimistic block"; + "block_hash" => ?block_hash_opt, + "msg" => "the execution engine is not synced" + ); + return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); + } } - } + + Ok(PayloadVerificationOutcome { + payload_verification_status, + is_valid_merge_transition_block, + }) + }; + // Spawn the payload verification future as a new task, but don't wait for it to complete. + // The `payload_verification_future` will be awaited later to ensure verification completed + // successfully. + let payload_verification_handle = chain + .task_executor + .spawn_handle( + payload_verification_future, + "execution_payload_verification", + ) + .ok_or(BeaconChainError::RuntimeShutdown)?; // If the block is sufficiently recent, notify the validator monitor. if let Some(slot) = chain.slot_clock.now() { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); - if block.slot().epoch(T::EthSpec::slots_per_epoch()) + if block_slot.epoch(T::EthSpec::slots_per_epoch()) + VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 >= epoch { @@ -1201,7 +1245,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // the `validator_monitor` lock from being bounced or held for a long time whilst // performing `per_slot_processing`. for (i, summary) in summaries.iter().enumerate() { - let epoch = state.current_epoch() - Epoch::from(summaries.len() - i); + let epoch = state_current_epoch - Epoch::from(summaries.len() - i); if let Err(e) = validator_monitor.process_validator_statuses(epoch, summary, &chain.spec) { @@ -1300,21 +1344,13 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { }); } - if valid_merge_transition_block { - info!(chain.log, "{}", POS_PANDA_BANNER); - info!(chain.log, "Proof of Stake Activated"; "slot" => block.slot()); - info!(chain.log, ""; "Terminal POW Block Hash" => ?block.message().execution_payload()?.parent_hash().into_root()); - info!(chain.log, ""; "Merge Transition Block Root" => ?block.message().tree_hash_root()); - info!(chain.log, ""; "Merge Transition Execution Hash" => ?block.message().execution_payload()?.block_hash().into_root()); - } - Ok(Self { block, block_root, state, parent_block: parent.beacon_block, - confirmation_db_batch, - payload_verification_status, + confirmed_state_roots, + payload_verification_handle, }) } } @@ -1366,8 +1402,9 @@ fn check_block_against_finalized_slot( chain: &BeaconChain, ) -> Result<(), BlockError> { let finalized_slot = chain - .head_info()? - .finalized_checkpoint + .canonical_head + .cached_head() + .finalized_checkpoint() .epoch .start_slot(T::EthSpec::slots_per_epoch()); @@ -1383,13 +1420,17 @@ fn check_block_against_finalized_slot( } /// Returns `Ok(block)` if the block descends from the finalized root. -pub fn check_block_is_finalized_descendant>( - block: SignedBeaconBlock, - fork_choice: &ForkChoice, - store: &HotColdDB, -) -> Result, BlockError> { +/// +/// ## Warning +/// +/// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here. +pub fn check_block_is_finalized_descendant( + chain: &BeaconChain, + fork_choice: &BeaconForkChoice, + block: &Arc>, +) -> Result<(), BlockError> { if fork_choice.is_descendant_of_finalized(block.parent_root()) { - Ok(block) + Ok(()) } else { // If fork choice does *not* consider the parent to be a descendant of the finalized block, // then there are two more cases: @@ -1399,7 +1440,8 @@ pub fn check_block_is_finalized_descendant( // Check if the block is already known. We know it is post-finalization, so it is // sufficient to check the fork choice. - if chain.fork_choice.read().contains_block(&block_root) { + if chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { return Err(BlockError::BlockIsAlreadyKnown); } @@ -1477,16 +1523,16 @@ pub fn get_block_root(block: &SignedBeaconBlock) -> Hash256 { #[allow(clippy::type_complexity)] fn verify_parent_block_is_known( chain: &BeaconChain, - block: SignedBeaconBlock, -) -> Result<(ProtoBlock, SignedBeaconBlock), BlockError> { + block: Arc>, +) -> Result<(ProtoBlock, Arc>), BlockError> { if let Some(proto_block) = chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&block.message().parent_root()) { Ok((proto_block, block)) } else { - Err(BlockError::ParentUnknown(Box::new(block))) + Err(BlockError::ParentUnknown(block)) } } @@ -1496,12 +1542,12 @@ fn verify_parent_block_is_known( /// whilst attempting the operation. #[allow(clippy::type_complexity)] fn load_parent( - block: SignedBeaconBlock, + block: Arc>, chain: &BeaconChain, ) -> Result< ( PreProcessingSnapshot, - SignedBeaconBlock, + Arc>, ), BlockError, > { @@ -1518,11 +1564,11 @@ fn load_parent( // choice, so we will not reject any child of the finalized block (this is relevant during // genesis). if !chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .contains_block(&block.parent_root()) { - return Err(BlockError::ParentUnknown(Box::new(block))); + return Err(BlockError::ParentUnknown(block)); } let block_delay = chain @@ -1717,18 +1763,12 @@ fn verify_header_signature( .get(header.message.proposer_index as usize) .cloned() .ok_or(BlockError::UnknownValidator(header.message.proposer_index))?; - let (fork, genesis_validators_root) = - chain.with_head::<_, BlockError, _>(|head| { - Ok(( - head.beacon_state.fork(), - head.beacon_state.genesis_validators_root(), - )) - })?; + let head_fork = chain.canonical_head.cached_head().head_fork(); if header.verify_signature::( &proposer_pubkey, - &fork, - genesis_validators_root, + &head_fork, + chain.genesis_validators_root, &chain.spec, ) { Ok(()) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 87f94161585..cef33ee4f72 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,4 +1,4 @@ -use crate::beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; +use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; use crate::eth1_chain::{CachingEth1Backend, SszEth1}; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; @@ -245,6 +245,7 @@ where let fork_choice = BeaconChain::>::load_fork_choice( store.clone(), + &self.spec, ) .map_err(|e| format!("Unable to load fork choice from disk: {:?}", e))? .ok_or("Fork choice not found in store")?; @@ -337,7 +338,7 @@ where Ok(( BeaconSnapshot { beacon_block_root, - beacon_block, + beacon_block: Arc::new(beacon_block), beacon_state, }, self, @@ -352,12 +353,15 @@ where self = updated_builder; let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis); + let current_slot = None; let fork_choice = ForkChoice::from_anchor( fc_store, genesis.beacon_block_root, &genesis.beacon_block, &genesis.beacon_state, + current_slot, + &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -455,17 +459,20 @@ where let snapshot = BeaconSnapshot { beacon_block_root: weak_subj_block_root, - beacon_block: weak_subj_block, + beacon_block: Arc::new(weak_subj_block), beacon_state: weak_subj_state, }; let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot); + let current_slot = Some(snapshot.beacon_block.slot()); let fork_choice = ForkChoice::from_anchor( fc_store, snapshot.beacon_block_root, &snapshot.beacon_block, &snapshot.beacon_state, + current_slot, + &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -638,17 +645,18 @@ where head_block_root, &head_state, store.clone(), + Some(current_slot), &self.spec, )?; } - let mut canonical_head = BeaconSnapshot { + let mut head_snapshot = BeaconSnapshot { beacon_block_root: head_block_root, - beacon_block: head_block, + beacon_block: Arc::new(head_block), beacon_state: head_state, }; - canonical_head + head_snapshot .beacon_state .build_all_caches(&self.spec) .map_err(|e| format!("Failed to build state caches: {:?}", e))?; @@ -658,25 +666,17 @@ where // // This is a sanity check to detect database corruption. let fc_finalized = fork_choice.finalized_checkpoint(); - let head_finalized = canonical_head.beacon_state.finalized_checkpoint(); - if fc_finalized != head_finalized { - let is_genesis = head_finalized.root.is_zero() - && head_finalized.epoch == fc_finalized.epoch - && fc_finalized.root == genesis_block_root; - let is_wss = store.get_anchor_slot().map_or(false, |anchor_slot| { - fc_finalized.epoch == anchor_slot.epoch(TEthSpec::slots_per_epoch()) - }); - if !is_genesis && !is_wss { - return Err(format!( - "Database corrupt: fork choice is finalized at {:?} whilst head is finalized at \ + let head_finalized = head_snapshot.beacon_state.finalized_checkpoint(); + if fc_finalized.epoch < head_finalized.epoch { + return Err(format!( + "Database corrupt: fork choice is finalized at {:?} whilst head is finalized at \ {:?}", - fc_finalized, head_finalized - )); - } + fc_finalized, head_finalized + )); } let validator_pubkey_cache = self.validator_pubkey_cache.map(Ok).unwrap_or_else(|| { - ValidatorPubkeyCache::new(&canonical_head.beacon_state, store.clone()) + ValidatorPubkeyCache::new(&head_snapshot.beacon_state, store.clone()) .map_err(|e| format!("Unable to init validator pubkey cache: {:?}", e)) })?; @@ -691,7 +691,7 @@ where if let Some(slot) = slot_clock.now() { validator_monitor.process_valid_state( slot.epoch(TEthSpec::slots_per_epoch()), - &canonical_head.beacon_state, + &head_snapshot.beacon_state, ); } @@ -725,10 +725,18 @@ where .do_atomically(self.pending_io_batch) .map_err(|e| format!("Error writing chain & metadata to disk: {:?}", e))?; + let genesis_validators_root = head_snapshot.beacon_state.genesis_validators_root(); + let genesis_time = head_snapshot.beacon_state.genesis_time(); + let head_for_snapshot_cache = head_snapshot.clone(); + let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); + let beacon_chain = BeaconChain { spec: self.spec, config: self.chain_config, store, + task_executor: self + .task_executor + .ok_or("Cannot build without task executor")?, store_migrator, slot_clock, op_pool: self.op_pool.ok_or("Cannot build without op pool")?, @@ -758,18 +766,18 @@ where observed_attester_slashings: <_>::default(), eth1_chain: self.eth1_chain, execution_layer: self.execution_layer, - genesis_validators_root: canonical_head.beacon_state.genesis_validators_root(), - canonical_head: TimeoutRwLock::new(canonical_head.clone()), + genesis_validators_root, + genesis_time, + canonical_head, genesis_block_root, genesis_state_root, - fork_choice: RwLock::new(fork_choice), fork_choice_signal_tx, fork_choice_signal_rx, event_handler: self.event_handler, head_tracker, snapshot_cache: TimeoutRwLock::new(SnapshotCache::new( DEFAULT_SNAPSHOT_CACHE_SIZE, - canonical_head, + head_for_snapshot_cache, )), shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()), beacon_proposer_cache: <_>::default(), @@ -787,9 +795,7 @@ where validator_monitor: RwLock::new(validator_monitor), }; - let head = beacon_chain - .head() - .map_err(|e| format!("Failed to get head: {:?}", e))?; + let head = beacon_chain.head_snapshot(); // Prime the attester cache with the head state. beacon_chain @@ -992,10 +998,10 @@ mod test { .build() .expect("should build"); - let head = chain.head().expect("should get head"); + let head = chain.head_snapshot(); - let state = head.beacon_state; - let block = head.beacon_block; + let state = &head.beacon_state; + let block = &head.beacon_block; assert_eq!(state.slot(), Slot::new(0), "should start from genesis"); assert_eq!( @@ -1014,7 +1020,7 @@ mod test { .get_blinded_block(&Hash256::zero()) .expect("should read db") .expect("should find genesis block"), - block.clone().into(), + block.clone_as_blinded(), "should store genesis block under zero hash alias" ); assert_eq!( diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs new file mode 100644 index 00000000000..c02ddb82632 --- /dev/null +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -0,0 +1,1307 @@ +//! This module provides all functionality for finding the canonical head, updating all necessary +//! components (e.g. caches) and maintaining a cached head block and state. +//! +//! For practically all applications, the "canonical head" can be read using +//! `beacon_chain.canonical_head.cached_head()`. +//! +//! The canonical head can be updated using `beacon_chain.recompute_head()`. +//! +//! ## Deadlock safety +//! +//! This module contains three locks: +//! +//! 1. `RwLock`: Contains `proto_array` fork choice. +//! 2. `RwLock`: Contains a cached block/state from the last run of `proto_array`. +//! 3. `Mutex<()>`: Is used to prevent concurrent execution of `BeaconChain::recompute_head`. +//! +//! This module has to take great efforts to avoid causing a deadlock with these three methods. Any +//! developers working in this module should tread carefully and seek a detailed review. +//! +//! To encourage safe use of this module, it should **only ever return a read or write lock for the +//! fork choice lock (lock 1)**. Whilst public functions might indirectly utilise locks (2) and (3), +//! the fundamental `RwLockWriteGuard` or `RwLockReadGuard` should never be exposed. This prevents +//! external functions from acquiring these locks in conflicting orders and causing a deadlock. +//! +//! ## Design Considerations +//! +//! We separate the `BeaconForkChoice` and `CachedHead` into two `RwLocks` because we want to ensure +//! fast access to the `CachedHead`. If we were to put them both under the same lock, we would need +//! to take an exclusive write-lock on it in order to run `ForkChoice::get_head`. This can take tens +//! of milliseconds and would block all downstream functions that want to know simple things like +//! the head block root. This is unacceptable for fast-responding functions like the networking +//! stack. + +use crate::persisted_fork_choice::PersistedForkChoice; +use crate::{ + beacon_chain::{ + BeaconForkChoice, BeaconStore, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, FORK_CHOICE_DB_KEY, + }, + block_times_cache::BlockTimesCache, + events::ServerSentEventHandler, + metrics, + validator_monitor::{get_slot_delay_ms, timestamp_now}, + BeaconChain, BeaconChainError as Error, BeaconChainTypes, BeaconSnapshot, +}; +use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; +use fork_choice::{ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock}; +use itertools::process_results; +use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use slog::{crit, debug, error, warn, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use std::time::Duration; +use store::{iter::StateRootsIterator, KeyValueStoreOp, StoreItem}; +use task_executor::{JoinHandle, ShutdownReason}; +use types::*; + +/// Simple wrapper around `RwLock` that uses private visibility to prevent any other modules from +/// accessing the contained lock without it being explicitly noted in this module. +pub struct CanonicalHeadRwLock(RwLock); + +impl From> for CanonicalHeadRwLock { + fn from(rw_lock: RwLock) -> Self { + Self(rw_lock) + } +} + +impl CanonicalHeadRwLock { + fn new(item: T) -> Self { + Self::from(RwLock::new(item)) + } + + fn read(&self) -> RwLockReadGuard { + self.0.read() + } + + fn write(&self) -> RwLockWriteGuard { + self.0.write() + } +} + +/// Provides a series of cached values from the last time `BeaconChain::recompute_head` was run. +/// +/// This struct is designed to be cheap-to-clone, any large fields should be wrapped in an `Arc` (or +/// similar). +#[derive(Clone)] +pub struct CachedHead { + /// Provides the head block and state from the last time the head was updated. + pub snapshot: Arc>, + /// The justified checkpoint as per `self.fork_choice`. + /// + /// This value may be distinct to the `self.snapshot.beacon_state.justified_checkpoint`. + /// This value should be used over the beacon state value in practically all circumstances. + justified_checkpoint: Checkpoint, + /// The finalized checkpoint as per `self.fork_choice`. + /// + /// This value may be distinct to the `self.snapshot.beacon_state.finalized_checkpoint`. + /// This value should be used over the beacon state value in practically all circumstances. + finalized_checkpoint: Checkpoint, + /// The `execution_payload.block_hash` of the block at the head of the chain. Set to `None` + /// before Bellatrix. + head_hash: Option, + /// The `execution_payload.block_hash` of the finalized block. Set to `None` before Bellatrix. + finalized_hash: Option, +} + +impl CachedHead { + /// Returns root of the block at the head of the beacon chain. + pub fn head_block_root(&self) -> Hash256 { + self.snapshot.beacon_block_root + } + + /// Returns root of the `BeaconState` at the head of the beacon chain. + /// + /// ## Note + /// + /// This `BeaconState` has *not* been advanced to the current slot, it has the same slot as the + /// head block. + pub fn head_state_root(&self) -> Hash256 { + self.snapshot.beacon_state_root() + } + + /// Returns slot of the block at the head of the beacon chain. + /// + /// ## Notes + /// + /// This is *not* the current slot as per the system clock. Use `BeaconChain::slot` for the + /// system clock (aka "wall clock") slot. + pub fn head_slot(&self) -> Slot { + self.snapshot.beacon_block.slot() + } + + /// Returns the `Fork` from the `BeaconState` at the head of the chain. + pub fn head_fork(&self) -> Fork { + self.snapshot.beacon_state.fork() + } + + /// Returns the randao mix for the block at the head of the chain. + pub fn head_random(&self) -> Result { + let state = &self.snapshot.beacon_state; + let root = *state.get_randao_mix(state.current_epoch())?; + Ok(root) + } + + /// Returns the active validator count for the current epoch of the head state. + /// + /// Should only return `None` if the caches have not been built on the head state (this should + /// never happen). + pub fn active_validator_count(&self) -> Option { + self.snapshot + .beacon_state + .get_cached_active_validator_indices(RelativeEpoch::Current) + .map(|indices| indices.len()) + .ok() + } + + /// Returns the finalized checkpoint, as determined by fork choice. + /// + /// ## Note + /// + /// This is *not* the finalized checkpoint of the `head_snapshot.beacon_state`, rather it is the + /// best finalized checkpoint that has been observed by `self.fork_choice`. It is possible that + /// the `head_snapshot.beacon_state` finalized value is earlier than the one returned here. + pub fn finalized_checkpoint(&self) -> Checkpoint { + self.finalized_checkpoint + } + + /// Returns the justified checkpoint, as determined by fork choice. + /// + /// ## Note + /// + /// This is *not* the "current justified checkpoint" of the `head_snapshot.beacon_state`, rather + /// it is the justified checkpoint in the view of `self.fork_choice`. It is possible that the + /// `head_snapshot.beacon_state` justified value is different to, but not conflicting with, the + /// one returned here. + pub fn justified_checkpoint(&self) -> Checkpoint { + self.justified_checkpoint + } + + /// Returns the cached values of `ForkChoice::forkchoice_update_parameters`. + /// + /// Useful for supplying to the execution layer. + pub fn forkchoice_update_parameters(&self) -> ForkchoiceUpdateParameters { + ForkchoiceUpdateParameters { + head_root: self.snapshot.beacon_block_root, + head_hash: self.head_hash, + finalized_hash: self.finalized_hash, + } + } +} + +/// Represents the "canonical head" of the beacon chain. +/// +/// The `cached_head` is elected by the `fork_choice` algorithm contained in this struct. +/// +/// There is no guarantee that the state of the `fork_choice` struct will always represent the +/// `cached_head` (i.e. we may call `fork_choice` *without* updating the cached values), however +/// there is a guarantee that the `cached_head` represents some past state of `fork_choice` (i.e. +/// `fork_choice` never lags *behind* the `cached_head`). +pub struct CanonicalHead { + /// Provides an in-memory representation of the non-finalized block tree and is used to run the + /// fork choice algorithm and determine the canonical head. + pub fork_choice: CanonicalHeadRwLock>, + /// Provides values cached from a previous execution of `self.fork_choice.get_head`. + /// + /// Although `self.fork_choice` might be slightly more advanced that this value, it is safe to + /// consider that these values represent the "canonical head" of the beacon chain. + pub cached_head: CanonicalHeadRwLock>, + /// A lock used to prevent concurrent runs of `BeaconChain::recompute_head`. + /// + /// This lock **should not be made public**, it should only be used inside this module. + recompute_head_lock: Mutex<()>, +} + +impl CanonicalHead { + /// Instantiate `Self`. + pub fn new( + fork_choice: BeaconForkChoice, + snapshot: Arc>, + ) -> Self { + let fork_choice_view = fork_choice.cached_fork_choice_view(); + let forkchoice_update_params = fork_choice.get_forkchoice_update_parameters(); + let cached_head = CachedHead { + snapshot, + justified_checkpoint: fork_choice_view.justified_checkpoint, + finalized_checkpoint: fork_choice_view.finalized_checkpoint, + head_hash: forkchoice_update_params.head_hash, + finalized_hash: forkchoice_update_params.finalized_hash, + }; + + Self { + fork_choice: CanonicalHeadRwLock::new(fork_choice), + cached_head: CanonicalHeadRwLock::new(cached_head), + recompute_head_lock: Mutex::new(()), + } + } + + /// Load a persisted version of `BeaconForkChoice` from the `store` and restore `self` to that + /// state. + /// + /// This is useful if some database corruption is expected and we wish to go back to our last + /// save-point. + pub(crate) fn restore_from_store( + &self, + // We don't actually need this value, however it's always present when we call this function + // and it needs to be dropped to prevent a dead-lock. Requiring it to be passed here is + // defensive programming. + mut fork_choice_write_lock: RwLockWriteGuard>, + store: &BeaconStore, + spec: &ChainSpec, + ) -> Result<(), Error> { + let fork_choice = >::load_fork_choice(store.clone(), spec)? + .ok_or(Error::MissingPersistedForkChoice)?; + let fork_choice_view = fork_choice.cached_fork_choice_view(); + let beacon_block_root = fork_choice_view.head_block_root; + let beacon_block = store + .get_full_block(&beacon_block_root)? + .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; + let beacon_state_root = beacon_block.state_root(); + let beacon_state = store + .get_state(&beacon_state_root, Some(beacon_block.slot()))? + .ok_or(Error::MissingBeaconState(beacon_state_root))?; + + let snapshot = BeaconSnapshot { + beacon_block_root, + beacon_block: Arc::new(beacon_block), + beacon_state, + }; + + let forkchoice_update_params = fork_choice.get_forkchoice_update_parameters(); + let cached_head = CachedHead { + snapshot: Arc::new(snapshot), + justified_checkpoint: fork_choice_view.justified_checkpoint, + finalized_checkpoint: fork_choice_view.finalized_checkpoint, + head_hash: forkchoice_update_params.head_hash, + finalized_hash: forkchoice_update_params.finalized_hash, + }; + + *fork_choice_write_lock = fork_choice; + // Avoid interleaving the fork choice and cached head locks. + drop(fork_choice_write_lock); + *self.cached_head.write() = cached_head; + + Ok(()) + } + + /// Returns the execution status of the block at the head of the beacon chain. + /// + /// This will only return `Err` in the scenario where `self.fork_choice` has advanced + /// significantly past the cached `head_snapshot`. In such a scenario it is likely prudent to + /// run `BeaconChain::recompute_head` to update the cached values. + pub fn head_execution_status(&self) -> Result { + let head_block_root = self.cached_head().head_block_root(); + self.fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(Error::HeadMissingFromForkChoice(head_block_root)) + } + + /// Returns a clone of `self.cached_head`. + /// + /// Takes a read-lock on `self.cached_head` for a short time (just long enough to clone it). + /// The `CachedHead` is designed to be fast-to-clone so this is preferred to passing back a + /// `RwLockReadGuard`, which may cause deadlock issues (see module-level documentation). + /// + /// This function is safe to be public since it does not expose any locks. + pub fn cached_head(&self) -> CachedHead { + self.cached_head_read_lock().clone() + } + + /// Access a read-lock for the cached head. + /// + /// This function is **not safe** to be public. See the module-level documentation for more + /// information about protecting from deadlocks. + fn cached_head_read_lock(&self) -> RwLockReadGuard> { + self.cached_head.read() + } + + /// Access a write-lock for the cached head. + /// + /// This function is **not safe** to be public. See the module-level documentation for more + /// information about protecting from deadlocks. + fn cached_head_write_lock(&self) -> RwLockWriteGuard> { + self.cached_head.write() + } + + /// Access a read-lock for fork choice. + pub fn fork_choice_read_lock(&self) -> RwLockReadGuard> { + self.fork_choice.read() + } + + /// Access a write-lock for fork choice. + pub fn fork_choice_write_lock(&self) -> RwLockWriteGuard> { + self.fork_choice.write() + } +} + +impl BeaconChain { + /// Contains the "best block"; the head of the canonical `BeaconChain`. + /// + /// It is important to note that the `snapshot.beacon_state` returned may not match the present slot. It + /// is the state as it was when the head block was received, which could be some slots prior to + /// now. + pub fn head(&self) -> CachedHead { + self.canonical_head.cached_head() + } + + /// Apply a function to an `Arc`-clone of the canonical head snapshot. + /// + /// This method is a relic from an old implementation where the canonical head was not behind + /// an `Arc` and the canonical head lock had to be held whenever it was read. This method is + /// fine to be left here, it just seems a bit weird. + pub fn with_head( + &self, + f: impl FnOnce(&BeaconSnapshot) -> Result, + ) -> Result + where + E: From, + { + let head_snapshot = self.head_snapshot(); + f(&head_snapshot) + } + + /// Returns the beacon block root at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_block_root(&self) -> Hash256 { + self.canonical_head + .cached_head_read_lock() + .snapshot + .beacon_block_root + } + + /// Returns the slot of the highest block in the canonical chain. + pub fn best_slot(&self) -> Slot { + self.canonical_head + .cached_head_read_lock() + .snapshot + .beacon_block + .slot() + } + + /// Returns a `Arc` of the `BeaconSnapshot` at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_snapshot(&self) -> Arc> { + self.canonical_head.cached_head_read_lock().snapshot.clone() + } + + /// Returns the beacon block at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_block(&self) -> Arc> { + self.canonical_head + .cached_head_read_lock() + .snapshot + .beacon_block + .clone() + } + + /// Returns a clone of the beacon state at the head of the canonical chain. + /// + /// Cloning the head state is expensive and should generally be avoided outside of tests. + /// + /// See `Self::head` for more information. + pub fn head_beacon_state_cloned(&self) -> BeaconState { + // Don't clone whilst holding the read-lock, take an Arc-clone to reduce lock contention. + let snapshot: Arc<_> = self.head_snapshot(); + snapshot + .beacon_state + .clone_with(CloneConfig::committee_caches_only()) + } + + /// Execute the fork choice algorithm and enthrone the result as the canonical head. + /// + /// This method replaces the old `BeaconChain::fork_choice` method. + pub async fn recompute_head_at_current_slot(self: &Arc) -> Result<(), Error> { + let current_slot = self.slot()?; + self.recompute_head_at_slot(current_slot).await + } + + /// Execute the fork choice algorithm and enthrone the result as the canonical head. + /// + /// The `current_slot` is specified rather than relying on the wall-clock slot. Using a + /// different slot to the wall-clock can be useful for pushing fork choice into the next slot + /// *just* before the start of the slot. This ensures that block production can use the correct + /// head value without being delayed. + pub async fn recompute_head_at_slot(self: &Arc, current_slot: Slot) -> Result<(), Error> { + metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); + + let chain = self.clone(); + match self + .spawn_blocking_handle( + move || chain.recompute_head_at_slot_internal(current_slot), + "recompute_head_internal", + ) + .await? + { + // Fork choice returned successfully and did not need to update the EL. + Ok(None) => Ok(()), + // Fork choice returned successfully and needed to update the EL. It has returned a + // join-handle from when it spawned some async tasks. We should await those tasks. + Ok(Some(join_handle)) => match join_handle.await { + // The async task completed successfully. + Ok(Some(())) => Ok(()), + // The async task did not complete successfully since the runtime is shutting down. + Ok(None) => { + debug!( + self.log, + "Did not update EL fork choice"; + "info" => "shutting down" + ); + Err(Error::RuntimeShutdown) + } + // The async task did not complete successfully, tokio returned an error. + Err(e) => { + error!( + self.log, + "Did not update EL fork choice"; + "error" => ?e + ); + Err(Error::TokioJoin(e)) + } + }, + // There was an error recomputing the head. + Err(e) => { + metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS); + Err(e) + } + } + } + + /// A non-async (blocking) function which recomputes the canonical head and spawns async tasks. + /// + /// This function performs long-running, heavy-lifting tasks which should not be performed on + /// the core `tokio` executor. + fn recompute_head_at_slot_internal( + self: &Arc, + current_slot: Slot, + ) -> Result>>, Error> { + let recompute_head_lock = self.canonical_head.recompute_head_lock.lock(); + + // Take a clone of the current ("old") head. + let old_cached_head = self.canonical_head.cached_head(); + + // Determine the current ("old") fork choice parameters. + // + // It is important to read the `fork_choice_view` from the cached head rather than from fork + // choice, since the fork choice value might have changed between calls to this function. We + // are interested in the changes since we last cached the head values, not since fork choice + // was last run. + let old_view = ForkChoiceView { + head_block_root: old_cached_head.head_block_root(), + justified_checkpoint: old_cached_head.justified_checkpoint(), + finalized_checkpoint: old_cached_head.finalized_checkpoint(), + }; + + let mut fork_choice_write_lock = self.canonical_head.fork_choice_write_lock(); + + // Recompute the current head via the fork choice algorithm. + fork_choice_write_lock.get_head(current_slot, &self.spec)?; + + // Downgrade the fork choice write-lock to a read lock, without allowing access to any + // other writers. + let fork_choice_read_lock = RwLockWriteGuard::downgrade(fork_choice_write_lock); + + // Read the current head value from the fork choice algorithm. + let new_view = fork_choice_read_lock.cached_fork_choice_view(); + + // Check to ensure that the finalized block hasn't been marked as invalid. If it has, + // shut down Lighthouse. + let finalized_proto_block = fork_choice_read_lock.get_finalized_block()?; + check_finalized_payload_validity(self, &finalized_proto_block)?; + + // Sanity check the finalized checkpoint. + // + // The new finalized checkpoint must be either equal to or better than the previous + // finalized checkpoint. + check_against_finality_reversion(&old_view, &new_view)?; + + let new_head_proto_block = fork_choice_read_lock + .get_block(&new_view.head_block_root) + .ok_or(Error::HeadBlockMissingFromForkChoice( + new_view.head_block_root, + ))?; + + // Do not allow an invalid block to become the head. + // + // This check avoids the following infinite loop: + // + // 1. A new block is set as the head. + // 2. The EL is updated with the new head, and returns INVALID. + // 3. We call `process_invalid_execution_payload` and it calls this function. + // 4. This function elects an invalid block as the head. + // 5. GOTO 2 + // + // In theory, fork choice should never select an invalid head (i.e., step #3 is impossible). + // However, this check is cheap. + if new_head_proto_block.execution_status.is_invalid() { + return Err(Error::HeadHasInvalidPayload { + block_root: new_head_proto_block.root, + execution_status: new_head_proto_block.execution_status, + }); + } + + // Exit early if the head or justified/finalized checkpoints have not changed, there's + // nothing to do. + if new_view == old_view { + debug!( + self.log, + "No change in canonical head"; + "head" => ?new_view.head_block_root + ); + return Ok(None); + } + + // Get the parameters to update the execution layer since either the head or some finality + // parameters have changed. + let new_forkchoice_update_parameters = + fork_choice_read_lock.get_forkchoice_update_parameters(); + + perform_debug_logging::(&old_view, &new_view, &fork_choice_read_lock, &self.log); + + // Drop the read lock, it's no longer required and holding it any longer than necessary + // will just cause lock contention. + drop(fork_choice_read_lock); + + // If the head has changed, update `self.canonical_head`. + let new_cached_head = if new_view.head_block_root != old_view.head_block_root { + metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); + + // Try and obtain the snapshot for `beacon_block_root` from the snapshot cache, falling + // back to a database read if that fails. + let new_snapshot = self + .snapshot_cache + .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .and_then(|snapshot_cache| { + snapshot_cache.get_cloned( + new_view.head_block_root, + CloneConfig::committee_caches_only(), + ) + }) + .map::, _>(Ok) + .unwrap_or_else(|| { + let beacon_block = self + .store + .get_full_block(&new_view.head_block_root)? + .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; + + let beacon_state_root = beacon_block.state_root(); + let beacon_state: BeaconState = self + .get_state(&beacon_state_root, Some(beacon_block.slot()))? + .ok_or(Error::MissingBeaconState(beacon_state_root))?; + + Ok(BeaconSnapshot { + beacon_block: Arc::new(beacon_block), + beacon_block_root: new_view.head_block_root, + beacon_state, + }) + }) + .and_then(|mut snapshot| { + // Regardless of where we got the state from, attempt to build the committee + // caches. + snapshot + .beacon_state + .build_all_committee_caches(&self.spec) + .map_err(Into::into) + .map(|()| snapshot) + })?; + + let new_cached_head = CachedHead { + snapshot: Arc::new(new_snapshot), + justified_checkpoint: new_view.justified_checkpoint, + finalized_checkpoint: new_view.finalized_checkpoint, + head_hash: new_forkchoice_update_parameters.head_hash, + finalized_hash: new_forkchoice_update_parameters.finalized_hash, + }; + + let new_head = { + // Now the new snapshot has been obtained, take a write-lock on the cached head so + // we can update it quickly. + let mut cached_head_write_lock = self.canonical_head.cached_head_write_lock(); + // Enshrine the new head as the canonical cached head. + *cached_head_write_lock = new_cached_head; + // Take a clone of the cached head for later use. It is cloned whilst + // holding the write-lock to ensure we get exactly the head we just enshrined. + cached_head_write_lock.clone() + }; + + // Clear the early attester cache in case it conflicts with `self.canonical_head`. + self.early_attester_cache.clear(); + + new_head + } else { + let new_cached_head = CachedHead { + // The head hasn't changed, take a relatively cheap `Arc`-clone of the existing + // head. + snapshot: old_cached_head.snapshot.clone(), + justified_checkpoint: new_view.justified_checkpoint, + finalized_checkpoint: new_view.finalized_checkpoint, + head_hash: new_forkchoice_update_parameters.head_hash, + finalized_hash: new_forkchoice_update_parameters.finalized_hash, + }; + + let mut cached_head_write_lock = self.canonical_head.cached_head_write_lock(); + + // Enshrine the new head as the canonical cached head. Whilst the head block hasn't + // changed, the FFG checkpoints must have changed. + *cached_head_write_lock = new_cached_head; + + // Take a clone of the cached head for later use. It is cloned whilst + // holding the write-lock to ensure we get exactly the head we just enshrined. + cached_head_write_lock.clone() + }; + + // Alias for readability. + let new_snapshot = &new_cached_head.snapshot; + let old_snapshot = &old_cached_head.snapshot; + + // If the head changed, perform some updates. + if new_snapshot.beacon_block_root != old_snapshot.beacon_block_root { + if let Err(e) = + self.after_new_head(&old_cached_head, &new_cached_head, new_head_proto_block) + { + crit!( + self.log, + "Error updating canonical head"; + "error" => ?e + ); + } + } + + // Drop the old cache head nice and early to try and free the memory as soon as possible. + drop(old_cached_head); + + // If the finalized checkpoint changed, perform some updates. + if new_view.finalized_checkpoint != old_view.finalized_checkpoint { + if let Err(e) = + self.after_finalization(&new_cached_head, new_view, finalized_proto_block) + { + crit!( + self.log, + "Error updating finalization"; + "error" => ?e + ); + } + } + + // The execution layer updates might attempt to take a write-lock on fork choice, so it's + // important to ensure the fork-choice lock isn't being held. + let el_update_handle = + spawn_execution_layer_updates(self.clone(), new_forkchoice_update_parameters)?; + + // We have completed recomputing the head and it's now valid for another process to do the + // same. + drop(recompute_head_lock); + + Ok(Some(el_update_handle)) + } + + /// Perform updates to caches and other components after the canonical head has been changed. + fn after_new_head( + self: &Arc, + old_cached_head: &CachedHead, + new_cached_head: &CachedHead, + new_head_proto_block: ProtoBlock, + ) -> Result<(), Error> { + let old_snapshot = &old_cached_head.snapshot; + let new_snapshot = &new_cached_head.snapshot; + + // Detect and potentially report any re-orgs. + let reorg_distance = detect_reorg( + &old_snapshot.beacon_state, + old_snapshot.beacon_block_root, + &new_snapshot.beacon_state, + new_snapshot.beacon_block_root, + &self.spec, + &self.log, + ); + + // Determine if the new head is in a later epoch to the previous head. + let is_epoch_transition = old_snapshot + .beacon_block + .slot() + .epoch(T::EthSpec::slots_per_epoch()) + < new_snapshot + .beacon_state + .slot() + .epoch(T::EthSpec::slots_per_epoch()); + + // These fields are used for server-sent events. + let state_root = new_snapshot.beacon_state_root(); + let head_slot = new_snapshot.beacon_state.slot(); + let dependent_root = new_snapshot + .beacon_state + .proposer_shuffling_decision_root(self.genesis_block_root); + let prev_dependent_root = new_snapshot + .beacon_state + .attester_shuffling_decision_root(self.genesis_block_root, RelativeEpoch::Current); + + // Update the snapshot cache with the latest head value. + // + // This *could* be done inside `recompute_head`, however updating the head on the snapshot + // cache is not critical so we avoid placing it on a critical path. Note that this function + // will not return an error if the update fails, it will just log an error. + self.snapshot_cache + .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .map(|mut snapshot_cache| { + snapshot_cache.update_head(new_snapshot.beacon_block_root); + }) + .unwrap_or_else(|| { + error!( + self.log, + "Failed to obtain cache write lock"; + "lock" => "snapshot_cache", + "task" => "update head" + ); + }); + + observe_head_block_delays( + &mut self.block_times_cache.write(), + &new_head_proto_block, + new_snapshot.beacon_block.message().proposer_index(), + new_snapshot + .beacon_block + .message() + .body() + .graffiti() + .as_utf8_lossy(), + &self.slot_clock, + self.event_handler.as_ref(), + &self.log, + ); + + if is_epoch_transition || reorg_distance.is_some() { + self.persist_head_and_fork_choice()?; + self.op_pool.prune_attestations(self.epoch()?); + } + + // Register server-sent-events for a new head. + if let Some(event_handler) = self + .event_handler + .as_ref() + .filter(|handler| handler.has_head_subscribers()) + { + match (dependent_root, prev_dependent_root) { + (Ok(current_duty_dependent_root), Ok(previous_duty_dependent_root)) => { + event_handler.register(EventKind::Head(SseHead { + slot: head_slot, + block: new_snapshot.beacon_block_root, + state: state_root, + current_duty_dependent_root, + previous_duty_dependent_root, + epoch_transition: is_epoch_transition, + })); + } + (Err(e), _) | (_, Err(e)) => { + warn!( + self.log, + "Unable to find dependent roots, cannot register head event"; + "error" => ?e + ); + } + } + } + + // Register a server-sent-event for a reorg (if necessary). + if let Some(depth) = reorg_distance { + if let Some(event_handler) = self + .event_handler + .as_ref() + .filter(|handler| handler.has_reorg_subscribers()) + { + event_handler.register(EventKind::ChainReorg(SseChainReorg { + slot: head_slot, + depth: depth.as_u64(), + old_head_block: old_snapshot.beacon_block_root, + old_head_state: old_snapshot.beacon_state_root(), + new_head_block: new_snapshot.beacon_block_root, + new_head_state: new_snapshot.beacon_state_root(), + epoch: head_slot.epoch(T::EthSpec::slots_per_epoch()), + })); + } + } + + Ok(()) + } + + /// Perform updates to caches and other components after the finalized checkpoint has been + /// changed. + fn after_finalization( + self: &Arc, + new_cached_head: &CachedHead, + new_view: ForkChoiceView, + finalized_proto_block: ProtoBlock, + ) -> Result<(), Error> { + let new_snapshot = &new_cached_head.snapshot; + + self.op_pool + .prune_all(&new_snapshot.beacon_state, self.epoch()?); + + self.observed_block_producers.write().prune( + new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); + + self.snapshot_cache + .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .map(|mut snapshot_cache| { + snapshot_cache.prune(new_view.finalized_checkpoint.epoch); + debug!( + self.log, + "Snapshot cache pruned"; + "new_len" => snapshot_cache.len(), + "remaining_roots" => ?snapshot_cache.beacon_block_roots(), + ); + }) + .unwrap_or_else(|| { + error!( + self.log, + "Failed to obtain cache write lock"; + "lock" => "snapshot_cache", + "task" => "prune" + ); + }); + + self.attester_cache + .prune_below(new_view.finalized_checkpoint.epoch); + + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_finalized_subscribers() { + event_handler.register(EventKind::FinalizedCheckpoint(SseFinalizedCheckpoint { + epoch: new_view.finalized_checkpoint.epoch, + block: new_view.finalized_checkpoint.root, + // Provide the state root of the latest finalized block, rather than the + // specific state root at the first slot of the finalized epoch (which + // might be a skip slot). + state: finalized_proto_block.state_root, + })); + } + } + + // The store migration task requires the *state at the slot of the finalized epoch*, + // rather than the state of the latest finalized block. These two values will only + // differ when the first slot of the finalized epoch is a skip slot. + // + // Use the `StateRootsIterator` directly rather than `BeaconChain::state_root_at_slot` + // to ensure we use the same state that we just set as the head. + let new_finalized_slot = new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let new_finalized_state_root = process_results( + StateRootsIterator::new(&self.store, &new_snapshot.beacon_state), + |mut iter| { + iter.find_map(|(state_root, slot)| { + if slot == new_finalized_slot { + Some(state_root) + } else { + None + } + }) + }, + )? + .ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))?; + + self.store_migrator.process_finalization( + new_finalized_state_root.into(), + new_view.finalized_checkpoint, + self.head_tracker.clone(), + )?; + + Ok(()) + } + + /// Return a database operation for writing fork choice to disk. + pub fn persist_fork_choice_in_batch(&self) -> KeyValueStoreOp { + Self::persist_fork_choice_in_batch_standalone(&self.canonical_head.fork_choice_read_lock()) + } + + /// Return a database operation for writing fork choice to disk. + pub fn persist_fork_choice_in_batch_standalone( + fork_choice: &BeaconForkChoice, + ) -> KeyValueStoreOp { + let persisted_fork_choice = PersistedForkChoice { + fork_choice: fork_choice.to_persisted(), + fork_choice_store: fork_choice.fc_store().to_persisted(), + }; + persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY) + } +} + +/// Check to see if the `finalized_proto_block` has an invalid execution payload. If so, shut down +/// Lighthouse. +/// +/// ## Notes +/// +/// This function is called whilst holding a write-lock on the `canonical_head`. To ensure dead-lock +/// safety, **do not take any other locks inside this function**. +fn check_finalized_payload_validity( + chain: &BeaconChain, + finalized_proto_block: &ProtoBlock, +) -> Result<(), Error> { + if let ExecutionStatus::Invalid(block_hash) = finalized_proto_block.execution_status { + crit!( + chain.log, + "Finalized block has an invalid payload"; + "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ + You may be on a hostile network.", + "block_hash" => ?block_hash + ); + let mut shutdown_sender = chain.shutdown_sender(); + shutdown_sender + .try_send(ShutdownReason::Failure( + "Finalized block has an invalid execution payload.", + )) + .map_err(Error::InvalidFinalizedPayloadShutdownError)?; + + // Exit now, the node is in an invalid state. + return Err(Error::InvalidFinalizedPayload { + finalized_root: finalized_proto_block.root, + execution_block_hash: block_hash, + }); + } + + Ok(()) +} + +/// Check to ensure that the transition from `old_view` to `new_view` will not revert finality. +fn check_against_finality_reversion( + old_view: &ForkChoiceView, + new_view: &ForkChoiceView, +) -> Result<(), Error> { + let finalization_equal = new_view.finalized_checkpoint == old_view.finalized_checkpoint; + let finalization_advanced = + new_view.finalized_checkpoint.epoch > old_view.finalized_checkpoint.epoch; + + if finalization_equal || finalization_advanced { + Ok(()) + } else { + Err(Error::RevertedFinalizedEpoch { + old: old_view.finalized_checkpoint, + new: new_view.finalized_checkpoint, + }) + } +} + +fn perform_debug_logging( + old_view: &ForkChoiceView, + new_view: &ForkChoiceView, + fork_choice: &BeaconForkChoice, + log: &Logger, +) { + if new_view.head_block_root != old_view.head_block_root { + debug!( + log, + "Fork choice updated head"; + "new_head_weight" => ?fork_choice + .get_block_weight(&new_view.head_block_root), + "new_head" => ?new_view.head_block_root, + "old_head_weight" => ?fork_choice + .get_block_weight(&old_view.head_block_root), + "old_head" => ?old_view.head_block_root, + ) + } + if new_view.justified_checkpoint != old_view.justified_checkpoint { + debug!( + log, + "Fork choice justified"; + "new_root" => ?new_view.justified_checkpoint.root, + "new_epoch" => new_view.justified_checkpoint.epoch, + "old_root" => ?old_view.justified_checkpoint.root, + "old_epoch" => old_view.justified_checkpoint.epoch, + ) + } + if new_view.finalized_checkpoint != old_view.finalized_checkpoint { + debug!( + log, + "Fork choice finalized"; + "new_root" => ?new_view.finalized_checkpoint.root, + "new_epoch" => new_view.finalized_checkpoint.epoch, + "old_root" => ?old_view.finalized_checkpoint.root, + "old_epoch" => old_view.finalized_checkpoint.epoch, + ) + } +} + +fn spawn_execution_layer_updates( + chain: Arc>, + forkchoice_update_params: ForkchoiceUpdateParameters, +) -> Result>, Error> { + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or(Error::UnableToReadSlot)?; + + chain + .task_executor + .clone() + .spawn_handle( + async move { + // Avoids raising an error before Bellatrix. + // + // See `Self::prepare_beacon_proposer` for more detail. + if chain.slot_is_prior_to_bellatrix(current_slot + 1) { + return; + } + + if let Err(e) = chain + .update_execution_engine_forkchoice(current_slot, forkchoice_update_params) + .await + { + crit!( + chain.log, + "Failed to update execution head"; + "error" => ?e + ); + } + + // Update the mechanism for preparing for block production on the execution layer. + // + // Performing this call immediately after `update_execution_engine_forkchoice_blocking` + // might result in two calls to fork choice updated, one *without* payload attributes and + // then a second *with* payload attributes. + // + // This seems OK. It's not a significant waste of EL<>CL bandwidth or resources, as far as I + // know. + if let Err(e) = chain.prepare_beacon_proposer(current_slot).await { + crit!( + chain.log, + "Failed to prepare proposers after fork choice"; + "error" => ?e + ); + } + }, + "update_el_forkchoice", + ) + .ok_or(Error::RuntimeShutdown) +} + +/// Attempt to detect if the new head is not on the same chain as the previous block +/// (i.e., a re-org). +/// +/// Note: this will declare a re-org if we skip `SLOTS_PER_HISTORICAL_ROOT` blocks +/// between calls to fork choice without swapping between chains. This seems like an +/// extreme-enough scenario that a warning is fine. +fn detect_reorg( + old_state: &BeaconState, + old_block_root: Hash256, + new_state: &BeaconState, + new_block_root: Hash256, + spec: &ChainSpec, + log: &Logger, +) -> Option { + let is_reorg = new_state + .get_block_root(old_state.slot()) + .map_or(true, |root| *root != old_block_root); + + if is_reorg { + let reorg_distance = + match find_reorg_slot(old_state, old_block_root, new_state, new_block_root, spec) { + Ok(slot) => old_state.slot().saturating_sub(slot), + Err(e) => { + warn!( + log, + "Could not find re-org depth"; + "error" => format!("{:?}", e), + ); + return None; + } + }; + + metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT); + metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT_INTEROP); + warn!( + log, + "Beacon chain re-org"; + "previous_head" => ?old_block_root, + "previous_slot" => old_state.slot(), + "new_head" => ?new_block_root, + "new_slot" => new_state.slot(), + "reorg_distance" => reorg_distance, + ); + + Some(reorg_distance) + } else { + None + } +} + +/// Iterate through the current chain to find the slot intersecting with the given beacon state. +/// The maximum depth this will search is `SLOTS_PER_HISTORICAL_ROOT`, and if that depth is reached +/// and no intersection is found, the finalized slot will be returned. +pub fn find_reorg_slot( + old_state: &BeaconState, + old_block_root: Hash256, + new_state: &BeaconState, + new_block_root: Hash256, + spec: &ChainSpec, +) -> Result { + // The earliest slot for which the two chains may have a common history. + let lowest_slot = std::cmp::min(new_state.slot(), old_state.slot()); + + // Create an iterator across `$state`, assuming that the block at `$state.slot` has the + // block root of `$block_root`. + // + // The iterator will be skipped until the next value returns `lowest_slot`. + // + // This is a macro instead of a function or closure due to the complex types invloved + // in all the iterator wrapping. + macro_rules! aligned_roots_iter { + ($state: ident, $block_root: ident) => { + std::iter::once(Ok(($state.slot(), $block_root))) + .chain($state.rev_iter_block_roots(spec)) + .skip_while(|result| { + result + .as_ref() + .map_or(false, |(slot, _)| *slot > lowest_slot) + }) + }; + } + + // Create iterators across old/new roots where iterators both start at the same slot. + let mut new_roots = aligned_roots_iter!(new_state, new_block_root); + let mut old_roots = aligned_roots_iter!(old_state, old_block_root); + + // Whilst *both* of the iterators are still returning values, try and find a common + // ancestor between them. + while let (Some(old), Some(new)) = (old_roots.next(), new_roots.next()) { + let (old_slot, old_root) = old?; + let (new_slot, new_root) = new?; + + // Sanity check to detect programming errors. + if old_slot != new_slot { + return Err(Error::InvalidReorgSlotIter { new_slot, old_slot }); + } + + if old_root == new_root { + // A common ancestor has been found. + return Ok(old_slot); + } + } + + // If no common ancestor is found, declare that the re-org happened at the previous + // finalized slot. + // + // Sometimes this will result in the return slot being *lower* than the actual reorg + // slot. However, assuming we don't re-org through a finalized slot, it will never be + // *higher*. + // + // We provide this potentially-inaccurate-but-safe information to avoid onerous + // database reads during times of deep reorgs. + Ok(old_state + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch())) +} + +fn observe_head_block_delays( + block_times_cache: &mut BlockTimesCache, + head_block: &ProtoBlock, + head_block_proposer_index: u64, + head_block_graffiti: String, + slot_clock: &S, + event_handler: Option<&ServerSentEventHandler>, + log: &Logger, +) { + let block_time_set_as_head = timestamp_now(); + let head_block_root = head_block.root; + let head_block_slot = head_block.slot; + + // Calculate the total delay between the start of the slot and when it was set as head. + let block_delay_total = get_slot_delay_ms(block_time_set_as_head, head_block_slot, slot_clock); + + // Do not write to the cache for blocks older than 2 epochs, this helps reduce writes to + // the cache during sync. + if block_delay_total < slot_clock.slot_duration() * 64 { + block_times_cache.set_time_set_as_head( + head_block_root, + head_block_slot, + block_time_set_as_head, + ); + } + + // If a block comes in from over 4 slots ago, it is most likely a block from sync. + let block_from_sync = block_delay_total > slot_clock.slot_duration() * 4; + + // Determine whether the block has been set as head too late for proper attestation + // production. + let late_head = block_delay_total >= slot_clock.unagg_attestation_production_delay(); + + // Do not store metrics if the block was > 4 slots old, this helps prevent noise during + // sync. + if !block_from_sync { + // Observe the total block delay. This is the delay between the time the slot started + // and when the block was set as head. + metrics::observe_duration( + &metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME, + block_delay_total, + ); + + // Observe the delay between when we imported the block and when we set the block as + // head. + let block_delays = block_times_cache.get_block_delays( + head_block_root, + slot_clock + .start_of(head_block_slot) + .unwrap_or_else(|| Duration::from_secs(0)), + ); + + metrics::observe_duration( + &metrics::BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME, + block_delays + .observed + .unwrap_or_else(|| Duration::from_secs(0)), + ); + + metrics::observe_duration( + &metrics::BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME, + block_delays + .set_as_head + .unwrap_or_else(|| Duration::from_secs(0)), + ); + + // If the block was enshrined as head too late for attestations to be created for it, + // log a debug warning and increment a metric. + if late_head { + metrics::inc_counter(&metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_EXCEEDED_TOTAL); + debug!( + log, + "Delayed head block"; + "block_root" => ?head_block_root, + "proposer_index" => head_block_proposer_index, + "slot" => head_block_slot, + "block_delay" => ?block_delay_total, + "observed_delay" => ?block_delays.observed, + "imported_delay" => ?block_delays.imported, + "set_as_head_delay" => ?block_delays.set_as_head, + ); + } + } + + if let Some(event_handler) = event_handler { + if !block_from_sync && late_head && event_handler.has_late_head_subscribers() { + let peer_info = block_times_cache.get_peer_info(head_block_root); + let block_delays = block_times_cache.get_block_delays( + head_block_root, + slot_clock + .start_of(head_block_slot) + .unwrap_or_else(|| Duration::from_secs(0)), + ); + event_handler.register(EventKind::LateHead(SseLateHead { + slot: head_block_slot, + block: head_block_root, + peer_id: peer_info.id, + peer_client: peer_info.client, + proposer_index: head_block_proposer_index, + proposer_graffiti: head_block_graffiti, + block_delay: block_delay_total, + observed_delay: block_delays.observed, + imported_delay: block_delays.imported, + set_as_head_delay: block_delays.set_as_head, + })); + } + } +} diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index f589585f8a6..62b584968fc 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -4,6 +4,7 @@ use crate::{ }; use parking_lot::RwLock; use proto_array::Block as ProtoBlock; +use std::sync::Arc; use types::*; pub struct CacheItem { @@ -18,7 +19,7 @@ pub struct CacheItem { /* * Values used to make the block available. */ - block: SignedBeaconBlock, + block: Arc>, proto_block: ProtoBlock, } @@ -48,7 +49,7 @@ impl EarlyAttesterCache { pub fn add_head_block( &self, beacon_block_root: Hash256, - block: SignedBeaconBlock, + block: Arc>, proto_block: ProtoBlock, state: &BeaconState, spec: &ChainSpec, @@ -146,7 +147,7 @@ impl EarlyAttesterCache { } /// Returns the block, if `block_root` matches the cached item. - pub fn get_block(&self, block_root: Hash256) -> Option> { + pub fn get_block(&self, block_root: Hash256) -> Option>> { self.item .read() .as_ref() diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 834823992ac..d3337dfafe2 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -45,8 +45,8 @@ pub enum BeaconChainError { UnableToReadSlot, UnableToComputeTimeAtSlot, RevertedFinalizedEpoch { - previous_epoch: Epoch, - new_epoch: Epoch, + old: Checkpoint, + new: Checkpoint, }, SlotClockDidNotStart, NoStateForSlot(Slot), @@ -161,6 +161,7 @@ pub enum BeaconChainError { BlockRewardSyncError, HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), + HeadBlockMissingFromForkChoice(Hash256), InvalidFinalizedPayload { finalized_root: Hash256, execution_block_hash: ExecutionBlockHash, @@ -184,11 +185,19 @@ pub enum BeaconChainError { beacon_block_root: Hash256, }, RuntimeShutdown, + TokioJoin(tokio::task::JoinError), ProcessInvalidExecutionPayload(JoinError), ForkChoiceSignalOutOfOrder { current: Slot, latest: Slot, }, + ForkchoiceUpdateParamsMissing, + HeadHasInvalidPayload { + block_root: Hash256, + execution_status: ExecutionStatus, + }, + AttestationHeadNotInForkChoice(Hash256), + MissingPersistedForkChoice, } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -214,7 +223,6 @@ easy_from_to!(BlockReplayError, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { - UnableToGetHeadInfo(BeaconChainError), UnableToGetBlockRootFromState, UnableToReadSlot, UnableToProduceAtSlot(Slot), @@ -239,6 +247,11 @@ pub enum BlockProductionError { MissingFinalizedBlock(Hash256), BlockTooLarge(usize), ForkChoiceError(BeaconChainError), + ShuttingDown, + MissingSyncAggregate, + MissingExecutionPayload, + TokioJoin(tokio::task::JoinError), + BeaconChain(BeaconChainError), } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 7085fc6500f..747b8a468d6 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -21,8 +21,59 @@ use state_processing::per_block_processing::{ partially_verify_execution_payload, }; use std::sync::Arc; +use tokio::task::JoinHandle; use types::*; +pub type PreparePayloadResult = Result; +pub type PreparePayloadHandle = JoinHandle>>; + +/// Used to await the result of executing payload with a remote EE. +pub struct PayloadNotifier { + pub chain: Arc>, + pub block: Arc>, + payload_verification_status: Option, +} + +impl PayloadNotifier { + pub fn new( + chain: Arc>, + block: Arc>, + state: &BeaconState, + ) -> Result> { + let payload_verification_status = if is_execution_enabled(state, block.message().body()) { + // Perform the initial stages of payload verification. + // + // We will duplicate these checks again during `per_block_processing`, however these checks + // are cheap and doing them here ensures we protect the execution engine from junk. + partially_verify_execution_payload( + state, + block.message().execution_payload()?, + &chain.spec, + ) + .map_err(BlockError::PerBlockProcessingError)?; + None + } else { + Some(PayloadVerificationStatus::Irrelevant) + }; + + Ok(Self { + chain, + block, + payload_verification_status, + }) + } + + pub async fn notify_new_payload( + self, + ) -> Result> { + if let Some(precomputed_status) = self.payload_verification_status { + Ok(precomputed_status) + } else { + notify_new_payload(&self.chain, self.block.message()).await + } + } +} + /// Verify that `execution_payload` contained by `block` is considered valid by an execution /// engine. /// @@ -32,31 +83,20 @@ use types::*; /// contains a few extra checks by running `partially_verify_execution_payload` first: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#notify_new_payload -pub fn notify_new_payload( +async fn notify_new_payload<'a, T: BeaconChainTypes>( chain: &Arc>, - state: &BeaconState, - block: BeaconBlockRef, + block: BeaconBlockRef<'a, T::EthSpec>, ) -> Result> { - if !is_execution_enabled(state, block.body()) { - return Ok(PayloadVerificationStatus::Irrelevant); - } - let execution_payload = block.execution_payload()?; - // Perform the initial stages of payload verification. - // - // We will duplicate these checks again during `per_block_processing`, however these checks - // are cheap and doing them here ensures we protect the execution payload from junk. - partially_verify_execution_payload(state, execution_payload, &chain.spec) - .map_err(BlockError::PerBlockProcessingError)?; - let execution_layer = chain .execution_layer .as_ref() .ok_or(ExecutionPayloadError::NoExecutionConnection)?; - let new_payload_response = execution_layer.block_on(|execution_layer| { - execution_layer.notify_new_payload(&execution_payload.execution_payload) - }); + + let new_payload_response = execution_layer + .notify_new_payload(&execution_payload.execution_payload) + .await; match new_payload_response { Ok(status) => match status { @@ -70,13 +110,13 @@ pub fn notify_new_payload( // This block has not yet been applied to fork choice, so the latest block that was // imported to fork choice was the parent. let latest_root = block.parent_root(); - chain.process_invalid_execution_payload( - &InvalidationOperation::InvalidateMany { + chain + .process_invalid_execution_payload(&InvalidationOperation::InvalidateMany { head_block_root: latest_root, always_invalidate_head: false, latest_valid_ancestor: latest_valid_hash, - }, - )?; + }) + .await?; Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) } @@ -103,9 +143,9 @@ pub fn notify_new_payload( /// Equivalent to the `validate_merge_block` function in the merge Fork Choice Changes: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/fork-choice.md#validate_merge_block -pub fn validate_merge_block( - chain: &BeaconChain, - block: BeaconBlockRef, +pub async fn validate_merge_block<'a, T: BeaconChainTypes>( + chain: &Arc>, + block: BeaconBlockRef<'a, T::EthSpec>, ) -> Result<(), BlockError> { let spec = &chain.spec; let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); @@ -137,9 +177,8 @@ pub fn validate_merge_block( .ok_or(ExecutionPayloadError::NoExecutionConnection)?; let is_valid_terminal_pow_block = execution_layer - .block_on(|execution_layer| { - execution_layer.is_valid_terminal_pow_block_hash(execution_payload.parent_hash(), spec) - }) + .is_valid_terminal_pow_block_hash(execution_payload.parent_hash(), spec) + .await .map_err(ExecutionPayloadError::from)?; match is_valid_terminal_pow_block { @@ -149,23 +188,7 @@ pub fn validate_merge_block( } .into()), None => { - let current_slot = chain - .slot_clock - .now() - .ok_or(BeaconChainError::UnableToReadSlot)?; - - // Ensure the block is a candidate for optimistic import. - if chain - .fork_choice - .read() - .is_optimistic_candidate_block( - current_slot, - block.slot(), - &block.parent_root(), - &chain.spec, - ) - .map_err(BeaconChainError::from)? - { + if is_optimistic_candidate_block(chain, block.slot(), block.parent_root()).await? { debug!( chain.log, "Optimistically accepting terminal block"; @@ -180,6 +203,36 @@ pub fn validate_merge_block( } } +/// Check to see if a block with the given parameters is valid to be imported optimistically. +pub async fn is_optimistic_candidate_block( + chain: &Arc>, + block_slot: Slot, + block_parent_root: Hash256, +) -> Result { + let current_slot = chain.slot()?; + let inner_chain = chain.clone(); + + // Use a blocking task to check if the block is an optimistic candidate. Interacting + // with the `fork_choice` lock in an async task can block the core executor. + chain + .spawn_blocking_handle( + move || { + inner_chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_candidate_block( + current_slot, + block_slot, + &block_parent_root, + &inner_chain.spec, + ) + }, + "validate_merge_block_optimistic_candidate", + ) + .await? + .map_err(BeaconChainError::from) +} + /// Validate the gossip block's execution_payload according to the checks described here: /// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#beacon_block pub fn validate_execution_payload_for_gossip( @@ -243,35 +296,52 @@ pub fn validate_execution_payload_for_gossip( /// Equivalent to the `get_execution_payload` function in the Validator Guide: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal -pub fn get_execution_payload>( - chain: &BeaconChain, - state: &BeaconState, - proposer_index: u64, - pubkey: Option, -) -> Result { - Ok( - prepare_execution_payload_blocking::(chain, state, proposer_index, pubkey)? - .unwrap_or_default(), - ) -} - -/// Wraps the async `prepare_execution_payload` function as a blocking task. -pub fn prepare_execution_payload_blocking>( - chain: &BeaconChain, +pub fn get_execution_payload< + T: BeaconChainTypes, + Payload: ExecPayload + Default + Send + 'static, +>( + chain: Arc>, state: &BeaconState, + finalized_checkpoint: Checkpoint, proposer_index: u64, pubkey: Option, -) -> Result, BlockProductionError> { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BlockProductionError::ExecutionLayerMissing)?; +) -> Result, BlockProductionError> { + // Compute all required values from the `state` now to avoid needing to pass it into a spawned + // task. + let spec = &chain.spec; + let slot = state.slot(); + let current_epoch = state.current_epoch(); + let is_merge_transition_complete = is_merge_transition_complete(state); + let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; + let random = *state.get_randao_mix(current_epoch)?; + let latest_execution_payload_header_block_hash = + state.latest_execution_payload_header()?.block_hash; + + // Spawn a task to obtain the execution payload from the EL via a series of async calls. The + // `join_handle` can be used to await the result of the function. + let join_handle = chain + .task_executor + .clone() + .spawn_handle( + async move { + prepare_execution_payload::( + &chain, + slot, + is_merge_transition_complete, + timestamp, + random, + finalized_checkpoint, + proposer_index, + pubkey, + latest_execution_payload_header_block_hash, + ) + .await + }, + "get_execution_payload", + ) + .ok_or(BlockProductionError::ShuttingDown)?; - execution_layer - .block_on_generic(|_| async { - prepare_execution_payload::(chain, state, proposer_index, pubkey).await - }) - .map_err(BlockProductionError::BlockingFailed)? + Ok(join_handle) } /// Prepares an execution payload for inclusion in a block. @@ -288,25 +358,38 @@ pub fn prepare_execution_payload_blocking>( - chain: &BeaconChain, - state: &BeaconState, +#[allow(clippy::too_many_arguments)] +pub async fn prepare_execution_payload( + chain: &Arc>, + slot: Slot, + is_merge_transition_complete: bool, + timestamp: u64, + random: Hash256, + finalized_checkpoint: Checkpoint, proposer_index: u64, pubkey: Option, -) -> Result, BlockProductionError> { + latest_execution_payload_header_block_hash: ExecutionBlockHash, +) -> Result +where + T: BeaconChainTypes, + Payload: ExecPayload + Default, +{ + let current_epoch = slot.epoch(T::EthSpec::slots_per_epoch()); let spec = &chain.spec; let execution_layer = chain .execution_layer .as_ref() .ok_or(BlockProductionError::ExecutionLayerMissing)?; - let parent_hash = if !is_merge_transition_complete(state) { + let parent_hash = if !is_merge_transition_complete { let is_terminal_block_hash_set = spec.terminal_block_hash != ExecutionBlockHash::zero(); let is_activation_epoch_reached = - state.current_epoch() >= spec.terminal_block_hash_activation_epoch; + current_epoch >= spec.terminal_block_hash_activation_epoch; if is_terminal_block_hash_set && !is_activation_epoch_reached { - return Ok(None); + // Use the "empty" payload if there's a terminal block hash, but we haven't reached the + // terminal block epoch yet. + return Ok(<_>::default()); } let terminal_pow_block_hash = execution_layer @@ -317,36 +400,55 @@ pub async fn prepare_execution_payload::default()); } } else { - state.latest_execution_payload_header()?.block_hash + latest_execution_payload_header_block_hash }; - let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; - let random = *state.get_randao_mix(state.current_epoch())?; - let finalized_root = state.finalized_checkpoint().root; + // Try to obtain the finalized proto block from fork choice. + // + // Use a blocking task to interact with the `fork_choice` lock otherwise we risk blocking the + // core `tokio` executor. + let inner_chain = chain.clone(); + let finalized_proto_block = chain + .spawn_blocking_handle( + move || { + inner_chain + .canonical_head + .fork_choice_read_lock() + .get_block(&finalized_checkpoint.root) + }, + "prepare_execution_payload_finalized_hash", + ) + .await + .map_err(BlockProductionError::BeaconChain)?; // The finalized block hash is not included in the specification, however we provide this // parameter so that the execution layer can produce a payload id if one is not already known // (e.g., due to a recent reorg). - let finalized_block_hash = - if let Some(block) = chain.fork_choice.read().get_block(&finalized_root) { - block.execution_status.block_hash() - } else { - chain - .store - .get_blinded_block(&finalized_root) - .map_err(BlockProductionError::FailedToReadFinalizedBlock)? - .ok_or(BlockProductionError::MissingFinalizedBlock(finalized_root))? - .message() - .body() - .execution_payload() - .ok() - .map(|ep| ep.block_hash()) - }; + let finalized_block_hash = if let Some(block) = finalized_proto_block { + block.execution_status.block_hash() + } else { + chain + .store + .get_blinded_block(&finalized_checkpoint.root) + .map_err(BlockProductionError::FailedToReadFinalizedBlock)? + .ok_or(BlockProductionError::MissingFinalizedBlock( + finalized_checkpoint.root, + ))? + .message() + .body() + .execution_payload() + .ok() + .map(|ep| ep.block_hash()) + }; // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. + // + // This future is not executed here, it's up to the caller to await it. let execution_payload = execution_layer .get_payload::( parent_hash, @@ -355,10 +457,10 @@ pub async fn prepare_execution_payload, Cold: It head_block_root: Hash256, head_state: &BeaconState, store: Arc>, + current_slot: Option, spec: &ChainSpec, ) -> Result, E>, String> { // Fetch finalized block. @@ -138,7 +139,7 @@ pub fn reset_fork_choice_to_finalization, Cold: It })?; let finalized_snapshot = BeaconSnapshot { beacon_block_root: finalized_block_root, - beacon_block: finalized_block, + beacon_block: Arc::new(finalized_block), beacon_state: finalized_state, }; @@ -149,6 +150,8 @@ pub fn reset_fork_choice_to_finalization, Cold: It finalized_block_root, &finalized_snapshot.beacon_block, &finalized_snapshot.beacon_state, + current_slot, + spec, ) .map_err(|e| format!("Unable to reset fork choice for revert: {:?}", e))?; @@ -180,11 +183,10 @@ pub fn reset_fork_choice_to_finalization, Cold: It // This scenario is so rare that it seems OK to double-verify some blocks. let payload_verification_status = PayloadVerificationStatus::Optimistic; - let (block, _) = block.deconstruct(); fork_choice .on_block( block.slot(), - &block, + block.message(), block.canonical_root(), // Reward proposer boost. We are reinforcing the canonical chain. Duration::from_secs(0), diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 1891362ebbd..cc45a6bb9a9 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -7,6 +7,7 @@ use state_processing::{ }; use std::borrow::Cow; use std::iter; +use std::sync::Arc; use std::time::Duration; use store::{chunked_vector::BlockRoots, AnchorInfo, ChunkWriter, KeyValueStore}; use types::{Hash256, SignedBlindedBeaconBlock, Slot}; @@ -58,7 +59,7 @@ impl BeaconChain { /// Return the number of blocks successfully imported. pub fn import_historical_block_batch( &self, - blocks: Vec>, + blocks: Vec>>, ) -> Result { let anchor_info = self .store diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 579020b1d1e..b82b690d20c 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -9,6 +9,7 @@ pub mod block_reward; mod block_times_cache; mod block_verification; pub mod builder; +pub mod canonical_head; pub mod chain_config; mod early_attester_cache; mod errors; @@ -42,8 +43,8 @@ mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - ForkChoiceError, HeadInfo, HeadSafetyStatus, ProduceBlockVerification, StateSkipConfig, - WhenSlotSkipped, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + ForkChoiceError, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; pub use self::chain_config::ChainConfig; @@ -52,8 +53,10 @@ pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; pub use block_verification::{BlockError, ExecutionPayloadError, GossipVerifiedBlock}; +pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; +pub use fork_choice::ExecutionStatus; pub use metrics::scrape_for_metrics; pub use parking_lot; pub use slot_clock; diff --git a/beacon_node/beacon_chain/src/proposer_prep_service.rs b/beacon_node/beacon_chain/src/proposer_prep_service.rs index 18abbc8c5bf..9cd177b3409 100644 --- a/beacon_node/beacon_chain/src/proposer_prep_service.rs +++ b/beacon_node/beacon_chain/src/proposer_prep_service.rs @@ -51,9 +51,7 @@ async fn proposer_prep_service( executor.spawn( async move { if let Ok(current_slot) = inner_chain.slot() { - if let Err(e) = inner_chain - .prepare_beacon_proposer_async(current_slot) - .await + if let Err(e) = inner_chain.prepare_beacon_proposer(current_slot).await { error!( inner_chain.log, diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 8fb4f82bed1..a48f1d3756e 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -7,6 +7,7 @@ mod types; use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; +use crate::types::ChainSpec; use slog::{warn, Logger}; use std::path::Path; use std::sync::Arc; @@ -21,6 +22,7 @@ pub fn migrate_schema( from: SchemaVersion, to: SchemaVersion, log: Logger, + spec: &ChainSpec, ) -> Result<(), StoreError> { match (from, to) { // Migrating from the current schema version to iself is always OK, a no-op. @@ -28,8 +30,8 @@ pub fn migrate_schema( // Upgrade across multiple versions by recursively migrating one step at a time. (_, _) if from.as_u64() + 1 < to.as_u64() => { let next = SchemaVersion(from.as_u64() + 1); - migrate_schema::(db.clone(), datadir, from, next, log.clone())?; - migrate_schema::(db, datadir, next, to, log) + migrate_schema::(db.clone(), datadir, from, next, log.clone(), spec)?; + migrate_schema::(db, datadir, next, to, log, spec) } // @@ -89,6 +91,7 @@ pub fn migrate_schema( migration_schema_v7::update_with_reinitialized_fork_choice::( &mut persisted_fork_choice_v7, db.clone(), + spec, ) .map_err(StoreError::SchemaMigrationError)?; } diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs index 4cede798eaa..9222266ba94 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs @@ -3,8 +3,7 @@ use crate::beacon_chain::BeaconChainTypes; use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7}; use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; use crate::schema_change::types::{ProtoNodeV6, SszContainerV6, SszContainerV7}; -use crate::types::{Checkpoint, Epoch, Hash256}; -use crate::types::{EthSpec, Slot}; +use crate::types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; use crate::{BeaconForkChoiceStore, BeaconSnapshot}; use fork_choice::ForkChoice; use proto_array::{core::ProtoNode, core::SszContainer, ProtoArrayForkChoice}; @@ -25,6 +24,7 @@ four_byte_option_impl!(four_byte_option_usize, usize); pub(crate) fn update_with_reinitialized_fork_choice( persisted_fork_choice: &mut PersistedForkChoiceV7, db: Arc>, + spec: &ChainSpec, ) -> Result<(), String> { let anchor_block_root = persisted_fork_choice .fork_choice_store @@ -39,7 +39,7 @@ pub(crate) fn update_with_reinitialized_fork_choice( .map_err(|e| format!("{:?}", e))? .ok_or_else(|| "Missing anchor beacon state".to_string())?; let snapshot = BeaconSnapshot { - beacon_block: anchor_block, + beacon_block: Arc::new(anchor_block), beacon_block_root: anchor_block_root, beacon_state: anchor_state, }; @@ -49,6 +49,10 @@ pub(crate) fn update_with_reinitialized_fork_choice( anchor_block_root, &snapshot.beacon_block, &snapshot.beacon_state, + // Don't provide the current slot here, just use what's in the store. We don't need to know + // the head here, plus it's nice to avoid mutating fork choice during this process. + None, + spec, ) .map_err(|e| format!("{:?}", e))?; persisted_fork_choice.fork_choice = fork_choice.to_persisted(); diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index 5a287daf0f1..0bbd4419b9d 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -47,6 +47,12 @@ impl ShufflingCache { } } +impl Default for ShufflingCache { + fn default() -> Self { + Self::new() + } +} + /// Contains the shuffling IDs for a beacon block. pub struct BlockShufflingIds { pub current: AttestationShufflingId, diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index d5b41366ccd..40b73451cb0 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -1,6 +1,7 @@ use crate::BeaconSnapshot; use itertools::process_results; use std::cmp; +use std::sync::Arc; use std::time::Duration; use types::{ beacon_state::CloneConfig, BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, @@ -33,7 +34,7 @@ impl From> for PreProcessingSnapshot { Self { pre_state: snapshot.beacon_state, beacon_state_root, - beacon_block: snapshot.beacon_block.into(), + beacon_block: snapshot.beacon_block.clone_as_blinded(), beacon_block_root: snapshot.beacon_block_root, } } @@ -63,7 +64,7 @@ impl CacheItem { Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); PreProcessingSnapshot { - beacon_block: self.beacon_block.into(), + beacon_block: self.beacon_block.clone_as_blinded(), beacon_block_root: self.beacon_block_root, pre_state: self.pre_state.unwrap_or(self.beacon_state), beacon_state_root, @@ -76,7 +77,7 @@ impl CacheItem { Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); PreProcessingSnapshot { - beacon_block: self.beacon_block.clone().into(), + beacon_block: self.beacon_block.clone_as_blinded(), beacon_block_root: self.beacon_block_root, pre_state: self .pre_state @@ -116,7 +117,7 @@ pub enum StateAdvance { /// The item stored in the `SnapshotCache`. pub struct CacheItem { - beacon_block: SignedBeaconBlock, + beacon_block: Arc>, beacon_block_root: Hash256, /// This state is equivalent to `self.beacon_block.state_root()`. beacon_state: BeaconState, @@ -185,7 +186,7 @@ impl SnapshotCache { ) { let parent_root = snapshot.beacon_block.message().parent_root(); let item = CacheItem { - beacon_block: snapshot.beacon_block, + beacon_block: snapshot.beacon_block.clone(), beacon_block_root: snapshot.beacon_block_root, beacon_state: snapshot.beacon_state, pre_state, @@ -384,7 +385,7 @@ mod test { fn get_snapshot(i: u64) -> BeaconSnapshot { let spec = MainnetEthSpec::default_spec(); - let beacon_state = get_harness().chain.head_beacon_state().unwrap(); + let beacon_state = get_harness().chain.head_beacon_state_cloned(); let signed_beacon_block = SignedBeaconBlock::from_block( BeaconBlock::empty(&spec), @@ -395,7 +396,7 @@ mod test { BeaconSnapshot { beacon_state, - beacon_block: signed_beacon_block, + beacon_block: Arc::new(signed_beacon_block), beacon_block_root: Hash256::from_low_u64_be(i), } } diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 030507a83a0..5abec988775 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -213,16 +213,14 @@ async fn state_advance_timer( let log = log.clone(); let beacon_chain = beacon_chain.clone(); let next_slot = current_slot + 1; - executor.spawn_blocking( - move || { + executor.spawn( + async move { // Don't run fork choice during sync. - if beacon_chain.best_slot().map_or(true, |head_slot| { - head_slot + MAX_FORK_CHOICE_DISTANCE < current_slot - }) { + if beacon_chain.best_slot() + MAX_FORK_CHOICE_DISTANCE < current_slot { return; } - if let Err(e) = beacon_chain.fork_choice_at_slot(next_slot) { + if let Err(e) = beacon_chain.recompute_head_at_slot(next_slot).await { warn!( log, "Error updating fork choice for next slot"; @@ -231,17 +229,24 @@ async fn state_advance_timer( ); } - // Signal block proposal for the next slot (if it happens to be waiting). - if let Some(tx) = &beacon_chain.fork_choice_signal_tx { - if let Err(e) = tx.notify_fork_choice_complete(next_slot) { - warn!( - log, - "Error signalling fork choice waiter"; - "error" => ?e, - "slot" => next_slot, - ); - } - } + // Use a blocking task to avoid blocking the core executor whilst waiting for locks + // in `ForkChoiceSignalTx`. + beacon_chain.task_executor.clone().spawn_blocking( + move || { + // Signal block proposal for the next slot (if it happens to be waiting). + if let Some(tx) = &beacon_chain.fork_choice_signal_tx { + if let Err(e) = tx.notify_fork_choice_complete(next_slot) { + warn!( + log, + "Error signalling fork choice waiter"; + "error" => ?e, + "slot" => next_slot, + ); + } + } + }, + "fork_choice_advance_signal_tx", + ); }, "fork_choice_advance", ); @@ -264,7 +269,7 @@ fn advance_head( // // Fork-choice is not run *before* this function to avoid unnecessary calls whilst syncing. { - let head_slot = beacon_chain.head_info()?.slot; + let head_slot = beacon_chain.best_slot(); // Don't run this when syncing or if lagging too far behind. if head_slot + MAX_ADVANCE_DISTANCE < current_slot { @@ -275,7 +280,7 @@ fn advance_head( } } - let head_root = beacon_chain.head_info()?.block_root; + let head_root = beacon_chain.head_beacon_block_root(); let (head_slot, head_state_root, mut state) = match beacon_chain .snapshot_cache diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 980de25cf3c..62765c22224 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -515,13 +515,38 @@ where } pub fn get_current_state(&self) -> BeaconState { - self.chain.head().unwrap().beacon_state + self.chain.head_beacon_state_cloned() } pub fn get_current_state_and_root(&self) -> (BeaconState, Hash256) { - let head = self.chain.head().unwrap(); + let head = self.chain.head_snapshot(); let state_root = head.beacon_state_root(); - (head.beacon_state, state_root) + ( + head.beacon_state.clone_with_only_committee_caches(), + state_root, + ) + } + + pub fn head_slot(&self) -> Slot { + self.chain.canonical_head.cached_head().head_slot() + } + + pub fn head_block_root(&self) -> Hash256 { + self.chain.canonical_head.cached_head().head_block_root() + } + + pub fn finalized_checkpoint(&self) -> Checkpoint { + self.chain + .canonical_head + .cached_head() + .finalized_checkpoint() + } + + pub fn justified_checkpoint(&self) -> Checkpoint { + self.chain + .canonical_head + .cached_head() + .justified_checkpoint() } pub fn get_current_slot(&self) -> Slot { @@ -565,7 +590,7 @@ where state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() } - pub fn make_block( + pub async fn make_block( &self, mut state: BeaconState, slot: Slot, @@ -599,6 +624,7 @@ where Some(graffiti), ProduceBlockVerification::VerifyRandao, ) + .await .unwrap(); let signed_block = block.sign( @@ -613,7 +639,7 @@ where /// Useful for the `per_block_processing` tests. Creates a block, and returns the state after /// caches are built but before the generated block is processed. - pub fn make_block_return_pre_state( + pub async fn make_block_return_pre_state( &self, mut state: BeaconState, slot: Slot, @@ -649,6 +675,7 @@ where Some(graffiti), ProduceBlockVerification::VerifyRandao, ) + .await .unwrap(); let signed_block = block.sign( @@ -1098,11 +1125,11 @@ where let mut attestation_2 = attestation_1.clone(); attestation_2.data.index += 1; + let fork = self.chain.canonical_head.cached_head().head_fork(); for attestation in &mut [&mut attestation_1, &mut attestation_2] { for &i in &attestation.attesting_indices { let sk = &self.validator_keypairs[i as usize].sk; - let fork = self.chain.head_info().unwrap().fork; let genesis_validators_root = self.chain.genesis_validators_root; let domain = self.chain.spec.get_domain( @@ -1156,11 +1183,11 @@ where attestation_2.data.index += 1; + let fork = self.chain.canonical_head.cached_head().head_fork(); for attestation in &mut [&mut attestation_1, &mut attestation_2] { for &i in &attestation.attesting_indices { let sk = &self.validator_keypairs[i as usize].sk; - let fork = self.chain.head_info().unwrap().fork; let genesis_validators_root = self.chain.genesis_validators_root; let domain = self.chain.spec.get_domain( @@ -1182,19 +1209,14 @@ where } pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing { - let mut block_header_1 = self - .chain - .head_beacon_block() - .unwrap() - .message() - .block_header(); + let mut block_header_1 = self.chain.head_beacon_block().message().block_header(); block_header_1.proposer_index = validator_index; let mut block_header_2 = block_header_1.clone(); block_header_2.state_root = Hash256::zero(); let sk = &self.validator_keypairs[validator_index as usize].sk; - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; let mut signed_block_headers = vec![block_header_1, block_header_2] @@ -1212,7 +1234,7 @@ where pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit { let sk = &self.validator_keypairs[validator_index as usize].sk; - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; VoluntaryExit { @@ -1235,7 +1257,7 @@ where /// Create a new block, apply `block_modifier` to it, sign it and return it. /// /// The state returned is a pre-block state at the same slot as the produced block. - pub fn make_block_with_modifier( + pub async fn make_block_with_modifier( &self, state: BeaconState, slot: Slot, @@ -1244,7 +1266,7 @@ where assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); - let (block, state) = self.make_block_return_pre_state(state, slot); + let (block, state) = self.make_block_return_pre_state(state, slot).await; let (mut block, _) = block.deconstruct(); block_modifier(&mut block); @@ -1332,23 +1354,25 @@ where (deposits, state) } - pub fn process_block( + pub async fn process_block( &self, slot: Slot, block: SignedBeaconBlock, ) -> Result> { self.set_current_slot(slot); - let block_hash: SignedBeaconBlockHash = self.chain.process_block(block)?.into(); - self.chain.fork_choice()?; + let block_hash: SignedBeaconBlockHash = + self.chain.process_block(Arc::new(block)).await?.into(); + self.chain.recompute_head_at_current_slot().await?; Ok(block_hash) } - pub fn process_block_result( + pub async fn process_block_result( &self, block: SignedBeaconBlock, ) -> Result> { - let block_hash: SignedBeaconBlockHash = self.chain.process_block(block)?.into(); - self.chain.fork_choice().unwrap(); + let block_hash: SignedBeaconBlockHash = + self.chain.process_block(Arc::new(block)).await?.into(); + self.chain.recompute_head_at_current_slot().await?; Ok(block_hash) } @@ -1403,14 +1427,14 @@ where self.chain.slot_clock.set_slot(slot.into()); } - pub fn add_block_at_slot( + pub async fn add_block_at_slot( &self, slot: Slot, state: BeaconState, ) -> Result<(SignedBeaconBlockHash, SignedBeaconBlock, BeaconState), BlockError> { self.set_current_slot(slot); - let (block, new_state) = self.make_block(state, slot); - let block_hash = self.process_block(slot, block.clone())?; + let (block, new_state) = self.make_block(state, slot).await; + let block_hash = self.process_block(slot, block.clone()).await?; Ok((block_hash, block, new_state)) } @@ -1427,19 +1451,19 @@ where self.process_attestations(attestations); } - pub fn add_attested_block_at_slot( + pub async fn add_attested_block_at_slot( &self, slot: Slot, state: BeaconState, state_root: Hash256, validators: &[usize], ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { - let (block_hash, block, state) = self.add_block_at_slot(slot, state)?; + let (block_hash, block, state) = self.add_block_at_slot(slot, state).await?; self.attest_block(&state, state_root, block_hash, &block, validators); Ok((block_hash, state)) } - pub fn add_attested_blocks_at_slots( + pub async fn add_attested_blocks_at_slots( &self, state: BeaconState, state_root: Hash256, @@ -1448,9 +1472,10 @@ where ) -> AddBlocksResult { assert!(!slots.is_empty()); self.add_attested_blocks_at_slots_given_lbh(state, state_root, slots, validators, None) + .await } - fn add_attested_blocks_at_slots_given_lbh( + async fn add_attested_blocks_at_slots_given_lbh( &self, mut state: BeaconState, state_root: Hash256, @@ -1467,6 +1492,7 @@ where for slot in slots { let (block_hash, new_state) = self .add_attested_block_at_slot(*slot, state, state_root, validators) + .await .unwrap(); state = new_state; block_hash_from_slot.insert(*slot, block_hash); @@ -1488,7 +1514,7 @@ where /// epoch at a time. /// /// Chains is a vec of `(state, slots, validators)` tuples. - pub fn add_blocks_on_multiple_chains( + pub async fn add_blocks_on_multiple_chains( &self, chains: Vec<(BeaconState, Vec, Vec)>, ) -> Vec> { @@ -1547,7 +1573,8 @@ where &epoch_slots, &validators, Some(head_block), - ); + ) + .await; block_hashes.extend(new_block_hashes); state_hashes.extend(new_state_hashes); @@ -1596,18 +1623,18 @@ where /// Deprecated: Use make_block() instead /// /// Returns a newly created block, signed by the proposer for the given slot. - pub fn build_block( + pub async fn build_block( &self, state: BeaconState, slot: Slot, _block_strategy: BlockStrategy, ) -> (SignedBeaconBlock, BeaconState) { - self.make_block(state, slot) + self.make_block(state, slot).await } /// Uses `Self::extend_chain` to build the chain out to the `target_slot`. - pub fn extend_to_slot(&self, target_slot: Slot) -> Hash256 { - if self.chain.slot().unwrap() == self.chain.head_info().unwrap().slot { + pub async fn extend_to_slot(&self, target_slot: Slot) -> Hash256 { + if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { self.advance_slot(); } @@ -1618,7 +1645,7 @@ where .checked_add(1) .unwrap(); - self.extend_slots(num_slots) + self.extend_slots(num_slots).await } /// Uses `Self::extend_chain` to `num_slots` blocks. @@ -1627,8 +1654,8 @@ where /// /// - BlockStrategy::OnCanonicalHead, /// - AttestationStrategy::AllValidators, - pub fn extend_slots(&self, num_slots: usize) -> Hash256 { - if self.chain.slot().unwrap() == self.chain.head_info().unwrap().slot { + pub async fn extend_slots(&self, num_slots: usize) -> Hash256 { + if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { self.advance_slot(); } @@ -1637,6 +1664,7 @@ where BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, ) + .await } /// Deprecated: Use add_attested_blocks_at_slots() instead @@ -1650,7 +1678,7 @@ where /// /// The `attestation_strategy` dictates which validators will attest to the newly created /// blocks. - pub fn extend_chain( + pub async fn extend_chain( &self, num_blocks: usize, block_strategy: BlockStrategy, @@ -1685,8 +1713,9 @@ where AttestationStrategy::SomeValidators(vals) => vals, }; let state_root = state.update_tree_hash_cache().unwrap(); - let (_, _, last_produced_block_hash, _) = - self.add_attested_blocks_at_slots(state, state_root, &slots, &validators); + let (_, _, last_produced_block_hash, _) = self + .add_attested_blocks_at_slots(state, state_root, &slots, &validators) + .await; last_produced_block_hash.into() } @@ -1700,41 +1729,40 @@ where /// then built `faulty_fork_blocks`. /// /// Returns `(honest_head, faulty_head)`, the roots of the blocks at the top of each chain. - pub fn generate_two_forks_by_skipping_a_block( + pub async fn generate_two_forks_by_skipping_a_block( &self, honest_validators: &[usize], faulty_validators: &[usize], honest_fork_blocks: usize, faulty_fork_blocks: usize, ) -> (Hash256, Hash256) { - let initial_head_slot = self - .chain - .head() - .expect("should get head") - .beacon_block - .slot(); + let initial_head_slot = self.chain.head_snapshot().beacon_block.slot(); // Move to the next slot so we may produce some more blocks on the head. self.advance_slot(); // Extend the chain with blocks where only honest validators agree. - let honest_head = self.extend_chain( - honest_fork_blocks, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(honest_validators.to_vec()), - ); + let honest_head = self + .extend_chain( + honest_fork_blocks, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(honest_validators.to_vec()), + ) + .await; // Go back to the last block where all agreed, and build blocks upon it where only faulty nodes // agree. - let faulty_head = self.extend_chain( - faulty_fork_blocks, - BlockStrategy::ForkCanonicalChainAt { - previous_slot: initial_head_slot, - // `initial_head_slot + 2` means one slot is skipped. - first_slot: initial_head_slot + 2, - }, - AttestationStrategy::SomeValidators(faulty_validators.to_vec()), - ); + let faulty_head = self + .extend_chain( + faulty_fork_blocks, + BlockStrategy::ForkCanonicalChainAt { + previous_slot: initial_head_slot, + // `initial_head_slot + 2` means one slot is skipped. + first_slot: initial_head_slot + 2, + }, + AttestationStrategy::SomeValidators(faulty_validators.to_vec()), + ) + .await; assert_ne!(honest_head, faulty_head, "forks should be distinct"); diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index b1d1f71d6cd..85e4f1f093a 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -3,6 +3,7 @@ use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::{StateSkipConfig, WhenSlotSkipped}; use lazy_static::lazy_static; +use std::sync::Arc; use tree_hash::TreeHash; use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot}; @@ -17,8 +18,8 @@ lazy_static! { /// attestation at each slot from genesis through to three epochs past the head. /// /// It checks the produced attestation against some locally computed values. -#[test] -fn produces_attestations() { +#[tokio::test] +async fn produces_attestations() { let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 4; let additional_slots_tested = MainnetEthSpec::slots_per_epoch() * 3; @@ -37,11 +38,13 @@ fn produces_attestations() { if slot > 0 && slot <= num_blocks_produced { harness.advance_slot(); - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; } let slot = Slot::from(slot); @@ -129,10 +132,20 @@ fn produces_attestations() { assert_eq!(data.target.root, target_root, "bad target root"); let early_attestation = { - let proto_block = chain.fork_choice.read().get_block(&block_root).unwrap(); + let proto_block = chain + .canonical_head + .fork_choice_read_lock() + .get_block(&block_root) + .unwrap(); chain .early_attester_cache - .add_head_block(block_root, block.clone(), proto_block, &state, &chain.spec) + .add_head_block( + block_root, + Arc::new(block.clone()), + proto_block, + &state, + &chain.spec, + ) .unwrap(); chain .early_attester_cache @@ -151,8 +164,8 @@ fn produces_attestations() { /// Ensures that the early attester cache wont create an attestation to a block in a later slot than /// the one requested. -#[test] -fn early_attester_cache_old_request() { +#[tokio::test] +async fn early_attester_cache_old_request() { let harness = BeaconChainHarness::builder(MainnetEthSpec) .default_spec() .keypairs(KEYPAIRS[..].to_vec()) @@ -162,18 +175,20 @@ fn early_attester_cache_old_request() { harness.advance_slot(); - harness.extend_chain( - 2, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!(head.beacon_block.slot(), 2); let head_proto_block = harness .chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&head.beacon_block_root) .unwrap(); diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 2fe8818a9aa..6a9e6047938 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -56,7 +56,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness( chain: &BeaconChain, ) -> (Attestation, usize, usize, SecretKey, SubnetId) { - let head = chain.head().expect("should get head"); + let head = chain.head_snapshot(); let current_slot = chain.slot().expect("should get slot"); let mut valid_attestation = chain @@ -106,7 +106,8 @@ fn get_valid_aggregated_attestation( chain: &BeaconChain, aggregate: Attestation, ) -> (SignedAggregateAndProof, usize, SecretKey) { - let state = &chain.head().expect("should get head").beacon_state; + let head = chain.head_snapshot(); + let state = &head.beacon_state; let current_slot = chain.slot().expect("should get slot"); let committee = state @@ -155,7 +156,8 @@ fn get_non_aggregator( chain: &BeaconChain, aggregate: &Attestation, ) -> (usize, SecretKey) { - let state = &chain.head().expect("should get head").beacon_state; + let head = chain.head_snapshot(); + let state = &head.beacon_state; let current_slot = chain.slot().expect("should get slot"); let committee = state @@ -213,15 +215,17 @@ struct GossipTester { } impl GossipTester { - pub fn new() -> Self { + pub async fn new() -> Self { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Advance into a slot where there have not been blocks or attestations produced. harness.advance_slot(); @@ -395,9 +399,10 @@ impl GossipTester { } } /// Tests verification of `SignedAggregateAndProof` from the gossip network. -#[test] -fn aggregated_gossip_verification() { +#[tokio::test] +async fn aggregated_gossip_verification() { GossipTester::new() + .await /* * The following two tests ensure: * @@ -511,8 +516,7 @@ fn aggregated_gossip_verification() { let committee_len = tester .harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_state .get_beacon_committee(tester.slot(), a.message.aggregate.data.index) .expect("should get committees") @@ -612,7 +616,7 @@ fn aggregated_gossip_verification() { tester.valid_aggregate.message.aggregate.clone(), None, &sk, - &chain.head_info().unwrap().fork, + &chain.canonical_head.cached_head().head_fork(), chain.genesis_validators_root, &chain.spec, ) @@ -669,9 +673,10 @@ fn aggregated_gossip_verification() { } /// Tests the verification conditions for an unaggregated attestation on the gossip network. -#[test] -fn unaggregated_gossip_verification() { +#[tokio::test] +async fn unaggregated_gossip_verification() { GossipTester::new() + .await /* * The following test ensures: * @@ -684,8 +689,7 @@ fn unaggregated_gossip_verification() { a.data.index = tester .harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_state .get_committee_count_at_slot(a.data.slot) .unwrap() @@ -924,16 +928,18 @@ fn unaggregated_gossip_verification() { /// Ensures that an attestation that skips epochs can still be processed. /// /// This also checks that we can do a state lookup if we don't get a hit from the shuffling cache. -#[test] -fn attestation_that_skips_epochs() { +#[tokio::test] +async fn attestation_that_skips_epochs() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 + 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; let current_slot = harness.chain.slot().expect("should get slot"); let current_epoch = harness.chain.epoch().expect("should get epoch"); @@ -992,16 +998,18 @@ fn attestation_that_skips_epochs() { .expect("should gossip verify attestation that skips slots"); } -#[test] -fn attestation_to_finalized_block() { +#[tokio::test] +async fn attestation_to_finalized_block() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 4 + 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 4 + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let finalized_checkpoint = harness .chain @@ -1067,16 +1075,18 @@ fn attestation_to_finalized_block() { .contains(earlier_block_root)); } -#[test] -fn verify_aggregate_for_gossip_doppelganger_detection() { +#[tokio::test] +async fn verify_aggregate_for_gossip_doppelganger_detection() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Advance into a slot where there have not been blocks or attestations produced. harness.advance_slot(); @@ -1124,16 +1134,18 @@ fn verify_aggregate_for_gossip_doppelganger_detection() { .expect("should check if gossip aggregator was observed")); } -#[test] -fn verify_attestation_for_gossip_doppelganger_detection() { +#[tokio::test] +async fn verify_attestation_for_gossip_doppelganger_detection() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Advance into a slot where there have not been blocks or attestations produced. harness.advance_slot(); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index ca65b05fd8b..4b3e1e72fe1 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -27,19 +27,18 @@ const BLOCK_INDICES: &[usize] = &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT_LENGT lazy_static! { /// A cached set of keys. static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); - - /// A cached set of valid blocks - static ref CHAIN_SEGMENT: Vec> = get_chain_segment(); } -fn get_chain_segment() -> Vec> { +async fn get_chain_segment() -> Vec> { let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - CHAIN_SEGMENT_LENGTH, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + CHAIN_SEGMENT_LENGTH, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness .chain @@ -50,11 +49,14 @@ fn get_chain_segment() -> Vec> { let full_block = harness .chain .store - .make_full_block(&snapshot.beacon_block_root, snapshot.beacon_block) + .make_full_block( + &snapshot.beacon_block_root, + snapshot.beacon_block.as_ref().clone(), + ) .unwrap(); BeaconSnapshot { beacon_block_root: snapshot.beacon_block_root, - beacon_block: full_block, + beacon_block: Arc::new(full_block), beacon_state: snapshot.beacon_state, } }) @@ -75,8 +77,8 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness Vec> { - CHAIN_SEGMENT +fn chain_segment_blocks(chain_segment: &[BeaconSnapshot]) -> Vec>> { + chain_segment .iter() .map(|snapshot| snapshot.beacon_block.clone()) .collect() @@ -110,13 +112,13 @@ fn update_proposal_signatures( .get(proposer_index) .expect("proposer keypair should be available"); - let (block, _) = snapshot.beacon_block.clone().deconstruct(); - snapshot.beacon_block = block.sign( + let (block, _) = snapshot.beacon_block.as_ref().clone().deconstruct(); + snapshot.beacon_block = Arc::new(block.sign( &keypair.sk, &state.fork(), state.genesis_validators_root(), spec, - ); + )); } } @@ -124,17 +126,18 @@ fn update_parent_roots(snapshots: &mut [BeaconSnapshot]) { for i in 0..snapshots.len() { let root = snapshots[i].beacon_block.canonical_root(); if let Some(child) = snapshots.get_mut(i + 1) { - let (mut block, signature) = child.beacon_block.clone().deconstruct(); + let (mut block, signature) = child.beacon_block.as_ref().clone().deconstruct(); *block.parent_root_mut() = root; - child.beacon_block = SignedBeaconBlock::from_block(block, signature) + child.beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)) } } } -#[test] -fn chain_segment_full_segment() { +#[tokio::test] +async fn chain_segment_full_segment() { let harness = get_harness(VALIDATOR_COUNT); - let blocks = chain_segment_blocks(); + let chain_segment = get_chain_segment().await; + let blocks = chain_segment_blocks(&chain_segment); harness .chain @@ -145,33 +148,36 @@ fn chain_segment_full_segment() { harness .chain .process_chain_segment(vec![]) + .await .into_block_error() .expect("should import empty chain segment"); harness .chain .process_chain_segment(blocks.clone()) + .await .into_block_error() .expect("should import chain segment"); - harness.chain.fork_choice().expect("should run fork choice"); + harness + .chain + .recompute_head_at_current_slot() + .await + .expect("should run fork choice"); assert_eq!( - harness - .chain - .head_info() - .expect("should get harness b head") - .block_root, + harness.head_block_root(), blocks.last().unwrap().canonical_root(), "harness should have last block as head" ); } -#[test] -fn chain_segment_varying_chunk_size() { +#[tokio::test] +async fn chain_segment_varying_chunk_size() { for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] { let harness = get_harness(VALIDATOR_COUNT); - let blocks = chain_segment_blocks(); + let chain_segment = get_chain_segment().await; + let blocks = chain_segment_blocks(&chain_segment); harness .chain @@ -182,36 +188,39 @@ fn chain_segment_varying_chunk_size() { harness .chain .process_chain_segment(chunk.to_vec()) + .await .into_block_error() .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); } - harness.chain.fork_choice().expect("should run fork choice"); + harness + .chain + .recompute_head_at_current_slot() + .await + .expect("should run fork choice"); assert_eq!( - harness - .chain - .head_info() - .expect("should get harness b head") - .block_root, + harness.head_block_root(), blocks.last().unwrap().canonical_root(), "harness should have last block as head" ); } } -#[test] -fn chain_segment_non_linear_parent_roots() { +#[tokio::test] +async fn chain_segment_non_linear_parent_roots() { let harness = get_harness(VALIDATOR_COUNT); + let chain_segment = get_chain_segment().await; + harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + .set_slot(chain_segment.last().unwrap().beacon_block.slot().as_u64()); /* * Test with a block removed. */ - let mut blocks = chain_segment_blocks(); + let mut blocks = chain_segment_blocks(&chain_segment); blocks.remove(2); assert!( @@ -219,6 +228,7 @@ fn chain_segment_non_linear_parent_roots() { harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::NonLinearParentRoots) ), @@ -228,16 +238,17 @@ fn chain_segment_non_linear_parent_roots() { /* * Test with a modified parent root. */ - let mut blocks = chain_segment_blocks(); - let (mut block, signature) = blocks[3].clone().deconstruct(); + let mut blocks = chain_segment_blocks(&chain_segment); + let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); *block.parent_root_mut() = Hash256::zero(); - blocks[3] = SignedBeaconBlock::from_block(block, signature); + blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); assert!( matches!( harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::NonLinearParentRoots) ), @@ -245,28 +256,30 @@ fn chain_segment_non_linear_parent_roots() { ); } -#[test] -fn chain_segment_non_linear_slots() { +#[tokio::test] +async fn chain_segment_non_linear_slots() { let harness = get_harness(VALIDATOR_COUNT); + let chain_segment = get_chain_segment().await; harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + .set_slot(chain_segment.last().unwrap().beacon_block.slot().as_u64()); /* * Test where a child is lower than the parent. */ - let mut blocks = chain_segment_blocks(); - let (mut block, signature) = blocks[3].clone().deconstruct(); + let mut blocks = chain_segment_blocks(&chain_segment); + let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); *block.slot_mut() = Slot::new(0); - blocks[3] = SignedBeaconBlock::from_block(block, signature); + blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); assert!( matches!( harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::NonLinearSlots) ), @@ -277,16 +290,17 @@ fn chain_segment_non_linear_slots() { * Test where a child is equal to the parent. */ - let mut blocks = chain_segment_blocks(); - let (mut block, signature) = blocks[3].clone().deconstruct(); + let mut blocks = chain_segment_blocks(&chain_segment); + let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); *block.slot_mut() = blocks[2].slot(); - blocks[3] = SignedBeaconBlock::from_block(block, signature); + blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); assert!( matches!( harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::NonLinearSlots) ), @@ -294,7 +308,8 @@ fn chain_segment_non_linear_slots() { ); } -fn assert_invalid_signature( +async fn assert_invalid_signature( + chain_segment: &[BeaconSnapshot], harness: &BeaconChainHarness>, block_index: usize, snapshots: &[BeaconSnapshot], @@ -311,6 +326,7 @@ fn assert_invalid_signature( harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::InvalidSignature) ), @@ -319,19 +335,20 @@ fn assert_invalid_signature( ); // Ensure the block will be rejected if imported on its own (without gossip checking). - let ancestor_blocks = CHAIN_SEGMENT + let ancestor_blocks = chain_segment .iter() .take(block_index) .map(|snapshot| snapshot.beacon_block.clone()) .collect(); // We don't care if this fails, we just call this to ensure that all prior blocks have been // imported prior to this test. - let _ = harness.chain.process_chain_segment(ancestor_blocks); + let _ = harness.chain.process_chain_segment(ancestor_blocks).await; assert!( matches!( harness .chain - .process_block(snapshots[block_index].beacon_block.clone()), + .process_block(snapshots[block_index].beacon_block.clone()) + .await, Err(BlockError::InvalidSignature) ), "should not import individual block with an invalid {} signature", @@ -346,25 +363,34 @@ fn assert_invalid_signature( // slot) tuple. } -fn get_invalid_sigs_harness() -> BeaconChainHarness> { +async fn get_invalid_sigs_harness( + chain_segment: &[BeaconSnapshot], +) -> BeaconChainHarness> { let harness = get_harness(VALIDATOR_COUNT); harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + .set_slot(chain_segment.last().unwrap().beacon_block.slot().as_u64()); harness } -#[test] -fn invalid_signature_gossip_block() { +#[tokio::test] +async fn invalid_signature_gossip_block() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { // Ensure the block will be rejected if imported on its own (without gossip checking). - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (block, _) = snapshots[block_index].beacon_block.clone().deconstruct(); - snapshots[block_index].beacon_block = - SignedBeaconBlock::from_block(block.clone(), junk_signature()); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (block, _) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); + snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block( + block.clone(), + junk_signature(), + )); // Import all the ancestors before the `block_index` block. - let ancestor_blocks = CHAIN_SEGMENT + let ancestor_blocks = chain_segment .iter() .take(block_index) .map(|snapshot| snapshot.beacon_block.clone()) @@ -372,13 +398,18 @@ fn invalid_signature_gossip_block() { harness .chain .process_chain_segment(ancestor_blocks) + .await .into_block_error() .expect("should import all blocks prior to the one being tested"); assert!( matches!( harness .chain - .process_block(SignedBeaconBlock::from_block(block, junk_signature())), + .process_block(Arc::new(SignedBeaconBlock::from_block( + block, + junk_signature() + ))) + .await, Err(BlockError::InvalidSignature) ), "should not import individual block with an invalid gossip signature", @@ -386,14 +417,21 @@ fn invalid_signature_gossip_block() { } } -#[test] -fn invalid_signature_block_proposal() { +#[tokio::test] +async fn invalid_signature_block_proposal() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (block, _) = snapshots[block_index].beacon_block.clone().deconstruct(); - snapshots[block_index].beacon_block = - SignedBeaconBlock::from_block(block.clone(), junk_signature()); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (block, _) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); + snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block( + block.clone(), + junk_signature(), + )); let blocks = snapshots .iter() .map(|snapshot| snapshot.beacon_block.clone()) @@ -404,6 +442,7 @@ fn invalid_signature_block_proposal() { harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::InvalidSignature) ), @@ -412,26 +451,37 @@ fn invalid_signature_block_proposal() { } } -#[test] -fn invalid_signature_randao_reveal() { +#[tokio::test] +async fn invalid_signature_randao_reveal() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); *block.body_mut().randao_reveal_mut() = junk_signature(); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "randao"); + assert_invalid_signature(&chain_segment, &harness, block_index, &snapshots, "randao").await; } } -#[test] -fn invalid_signature_proposer_slashing() { +#[tokio::test] +async fn invalid_signature_proposer_slashing() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); let proposer_slashing = ProposerSlashing { signed_header_1: SignedBeaconBlockHeader { message: block.block_header(), @@ -447,18 +497,27 @@ fn invalid_signature_proposer_slashing() { .proposer_slashings_mut() .push(proposer_slashing) .expect("should update proposer slashing"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "proposer slashing"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "proposer slashing", + ) + .await; } } -#[test] -fn invalid_signature_attester_slashing() { +#[tokio::test] +async fn invalid_signature_attester_slashing() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); let indexed_attestation = IndexedAttestation { attesting_indices: vec![0].into(), data: AttestationData { @@ -480,33 +539,58 @@ fn invalid_signature_attester_slashing() { attestation_1: indexed_attestation.clone(), attestation_2: indexed_attestation, }; - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); block .body_mut() .attester_slashings_mut() .push(attester_slashing) .expect("should update attester slashing"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "attester slashing"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "attester slashing", + ) + .await; } } -#[test] -fn invalid_signature_attestation() { +#[tokio::test] +async fn invalid_signature_attestation() { + let chain_segment = get_chain_segment().await; let mut checked_attestation = false; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); if let Some(attestation) = block.body_mut().attestations_mut().get_mut(0) { attestation.signature = junk_aggregate_signature(); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "attestation"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "attestation", + ) + .await; checked_attestation = true; } } @@ -517,12 +601,13 @@ fn invalid_signature_attestation() { ) } -#[test] -fn invalid_signature_deposit() { +#[tokio::test] +async fn invalid_signature_deposit() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { // Note: an invalid deposit signature is permitted! - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); let deposit = Deposit { proof: vec![Hash256::zero(); DEPOSIT_TREE_DEPTH + 1].into(), data: DepositData { @@ -532,13 +617,18 @@ fn invalid_signature_deposit() { signature: junk_signature().into(), }, }; - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); block .body_mut() .deposits_mut() .push(deposit) .expect("should update deposit"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); let blocks = snapshots @@ -550,6 +640,7 @@ fn invalid_signature_deposit() { harness .chain .process_chain_segment(blocks) + .await .into_block_error(), Err(BlockError::InvalidSignature) ), @@ -558,13 +649,18 @@ fn invalid_signature_deposit() { } } -#[test] -fn invalid_signature_exit() { +#[tokio::test] +async fn invalid_signature_exit() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); let epoch = snapshots[block_index].beacon_state.current_epoch(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); block .body_mut() .voluntary_exits_mut() @@ -576,10 +672,18 @@ fn invalid_signature_exit() { signature: junk_signature(), }) .expect("should update deposit"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "voluntary exit"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "voluntary exit", + ) + .await; } } @@ -590,27 +694,30 @@ fn unwrap_err(result: Result) -> E { } } -#[test] -fn block_gossip_verification() { +#[tokio::test] +async fn block_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); + let chain_segment = get_chain_segment().await; let block_index = CHAIN_SEGMENT_LENGTH - 2; harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT[block_index].beacon_block.slot().as_u64()); + .set_slot(chain_segment[block_index].beacon_block.slot().as_u64()); // Import the ancestors prior to the block we're testing. - for snapshot in &CHAIN_SEGMENT[0..block_index] { + for snapshot in &chain_segment[0..block_index] { let gossip_verified = harness .chain .verify_block_for_gossip(snapshot.beacon_block.clone()) + .await .expect("should obtain gossip verified block"); harness .chain .process_block(gossip_verified) + .await .expect("should import valid gossip verified block"); } @@ -624,15 +731,16 @@ fn block_gossip_verification() { * future blocks for processing at the appropriate slot). */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); let expected_block_slot = block.slot() + 1; *block.slot_mut() = expected_block_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::FutureSlot { present_slot, block_slot, @@ -654,21 +762,19 @@ fn block_gossip_verification() { * nodes, etc). */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); let expected_finalized_slot = harness - .chain - .head_info() - .expect("should get head info") - .finalized_checkpoint + .finalized_checkpoint() .epoch .start_slot(E::slots_per_epoch()); *block.slot_mut() = expected_finalized_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::WouldRevertFinalizedSlot { block_slot, finalized_slot, @@ -687,8 +793,9 @@ fn block_gossip_verification() { * proposer_index pubkey. */ - let block = CHAIN_SEGMENT[block_index] + let block = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct() .0; @@ -697,10 +804,11 @@ fn block_gossip_verification() { unwrap_err( harness .chain - .verify_block_for_gossip(SignedBeaconBlock::from_block( + .verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block( block, junk_signature() - )) + ))) + .await ), BlockError::ProposalSignatureInvalid ), @@ -715,15 +823,16 @@ fn block_gossip_verification() { * The block's parent (defined by block.parent_root) passes validation. */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); let parent_root = Hash256::from_low_u64_be(42); *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::ParentUnknown(block) if block.parent_root() == parent_root ), @@ -740,15 +849,16 @@ fn block_gossip_verification() { * store.finalized_checkpoint.root */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); - let parent_root = CHAIN_SEGMENT[0].beacon_block_root; + let parent_root = chain_segment[0].beacon_block_root; *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::NotFinalizedDescendant { block_parent_root } if block_parent_root == parent_root ), @@ -766,8 +876,9 @@ fn block_gossip_verification() { * processing while proposers for the block's branch are calculated. */ - let mut block = CHAIN_SEGMENT[block_index] + let mut block = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct() .0; @@ -779,13 +890,13 @@ fn block_gossip_verification() { *block.proposer_index_mut() = other_proposer; let block = block.sign( &generate_deterministic_keypair(other_proposer as usize).sk, - &harness.chain.head_info().unwrap().fork, + &harness.chain.canonical_head.cached_head().head_fork(), harness.chain.genesis_validators_root, &harness.chain.spec, ); assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(block.clone())), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), BlockError::IncorrectBlockProposer { block, local_shuffling, @@ -797,7 +908,7 @@ fn block_gossip_verification() { // Check to ensure that we registered this is a valid block from this proposer. assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(block.clone())), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), BlockError::RepeatProposal { proposer, slot, @@ -807,9 +918,9 @@ fn block_gossip_verification() { "should register any valid signature against the proposer, even if the block failed later verification" ); - let block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + let block = chain_segment[block_index].beacon_block.clone(); assert!( - harness.chain.verify_block_for_gossip(block).is_ok(), + harness.chain.verify_block_for_gossip(block).await.is_ok(), "the valid block should be processed" ); @@ -822,12 +933,13 @@ fn block_gossip_verification() { * signed_beacon_block.message.slot. */ - let block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + let block = chain_segment[block_index].beacon_block.clone(); assert!( matches!( harness .chain .verify_block_for_gossip(block.clone()) + .await .err() .expect("should error when processing known block"), BlockError::RepeatProposal { @@ -840,8 +952,8 @@ fn block_gossip_verification() { ); } -#[test] -fn verify_block_for_gossip_slashing_detection() { +#[tokio::test] +async fn verify_block_for_gossip_slashing_detection() { let slasher_dir = tempdir().unwrap(); let slasher = Arc::new( Slasher::open(SlasherConfig::new(slasher_dir.path().into()), test_logger()).unwrap(), @@ -858,12 +970,21 @@ fn verify_block_for_gossip_slashing_detection() { harness.advance_slot(); let state = harness.get_current_state(); - let (block1, _) = harness.make_block(state.clone(), Slot::new(1)); - let (block2, _) = harness.make_block(state, Slot::new(1)); + let (block1, _) = harness.make_block(state.clone(), Slot::new(1)).await; + let (block2, _) = harness.make_block(state, Slot::new(1)).await; - let verified_block = harness.chain.verify_block_for_gossip(block1).unwrap(); - harness.chain.process_block(verified_block).unwrap(); - unwrap_err(harness.chain.verify_block_for_gossip(block2)); + let verified_block = harness + .chain + .verify_block_for_gossip(Arc::new(block1)) + .await + .unwrap(); + harness.chain.process_block(verified_block).await.unwrap(); + unwrap_err( + harness + .chain + .verify_block_for_gossip(Arc::new(block2)) + .await, + ); // Slasher should have been handed the two conflicting blocks and crafted a slashing. slasher.process_queued(Epoch::new(0)).unwrap(); @@ -875,16 +996,20 @@ fn verify_block_for_gossip_slashing_detection() { slasher_dir.close().unwrap(); } -#[test] -fn verify_block_for_gossip_doppelganger_detection() { +#[tokio::test] +async fn verify_block_for_gossip_doppelganger_detection() { let harness = get_harness(VALIDATOR_COUNT); let state = harness.get_current_state(); - let (block, _) = harness.make_block(state.clone(), Slot::new(1)); + let (block, _) = harness.make_block(state.clone(), Slot::new(1)).await; - let verified_block = harness.chain.verify_block_for_gossip(block).unwrap(); + let verified_block = harness + .chain + .verify_block_for_gossip(Arc::new(block)) + .await + .unwrap(); let attestations = verified_block.block.message().body().attestations().clone(); - harness.chain.process_block(verified_block).unwrap(); + harness.chain.process_block(verified_block).await.unwrap(); for att in attestations.iter() { let epoch = att.data.target.epoch; @@ -921,8 +1046,8 @@ fn verify_block_for_gossip_doppelganger_detection() { } } -#[test] -fn add_base_block_to_altair_chain() { +#[tokio::test] +async fn add_base_block_to_altair_chain() { let mut spec = MainnetEthSpec::default_spec(); let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); @@ -940,11 +1065,13 @@ fn add_base_block_to_altair_chain() { harness.advance_slot(); // Build out all the blocks in epoch 0. - harness.extend_chain( - slots_per_epoch as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + slots_per_epoch as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Move into the next empty slot. harness.advance_slot(); @@ -952,7 +1079,7 @@ fn add_base_block_to_altair_chain() { // Produce an Altair block. let state = harness.get_current_state(); let slot = harness.get_current_slot(); - let (altair_signed_block, _) = harness.make_block(state.clone(), slot); + let (altair_signed_block, _) = harness.make_block(state.clone(), slot).await; let altair_block = &altair_signed_block .as_altair() .expect("test expects an altair block") @@ -1007,7 +1134,8 @@ fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(base_block.clone()) + .verify_block_for_gossip(Arc::new(base_block.clone())) + .await .err() .expect("should error when processing base block"), BlockError::InconsistentFork(InconsistentFork { @@ -1020,7 +1148,8 @@ fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_block(base_block.clone()) + .process_block(Arc::new(base_block.clone())) + .await .err() .expect("should error when processing base block"), BlockError::InconsistentFork(InconsistentFork { @@ -1031,7 +1160,10 @@ fn add_base_block_to_altair_chain() { // Ensure that it would be impossible to import via `BeaconChain::process_chain_segment`. assert!(matches!( - harness.chain.process_chain_segment(vec![base_block]), + harness + .chain + .process_chain_segment(vec![Arc::new(base_block)]) + .await, ChainSegmentResult::Failed { imported_blocks: 0, error: BlockError::InconsistentFork(InconsistentFork { @@ -1042,8 +1174,8 @@ fn add_base_block_to_altair_chain() { )); } -#[test] -fn add_altair_block_to_base_chain() { +#[tokio::test] +async fn add_altair_block_to_base_chain() { let mut spec = MainnetEthSpec::default_spec(); // Altair never happens. @@ -1060,11 +1192,13 @@ fn add_altair_block_to_base_chain() { harness.advance_slot(); // Build one block. - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Move into the next empty slot. harness.advance_slot(); @@ -1072,7 +1206,7 @@ fn add_altair_block_to_base_chain() { // Produce an altair block. let state = harness.get_current_state(); let slot = harness.get_current_slot(); - let (base_signed_block, _) = harness.make_block(state.clone(), slot); + let (base_signed_block, _) = harness.make_block(state.clone(), slot).await; let base_block = &base_signed_block .as_base() .expect("test expects a base block") @@ -1128,7 +1262,8 @@ fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(altair_block.clone()) + .verify_block_for_gossip(Arc::new(altair_block.clone())) + .await .err() .expect("should error when processing altair block"), BlockError::InconsistentFork(InconsistentFork { @@ -1141,7 +1276,8 @@ fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_block(altair_block.clone()) + .process_block(Arc::new(altair_block.clone())) + .await .err() .expect("should error when processing altair block"), BlockError::InconsistentFork(InconsistentFork { @@ -1152,7 +1288,10 @@ fn add_altair_block_to_base_chain() { // Ensure that it would be impossible to import via `BeaconChain::process_chain_segment`. assert!(matches!( - harness.chain.process_chain_segment(vec![altair_block]), + harness + .chain + .process_chain_segment(vec![Arc::new(altair_block)]) + .await, ChainSegmentResult::Failed { imported_blocks: 0, error: BlockError::InconsistentFork(InconsistentFork { diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index d67ed35f9cc..91d5eb21cae 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -27,11 +27,11 @@ fn verify_execution_payload_chain(chain: &[FullPayload]) { } } -#[test] +#[tokio::test] // TODO(merge): This isn't working cause the non-zero values in `initialize_beacon_state_from_eth1` // are causing failed lookups to the execution node. I need to come back to this. #[should_panic] -fn merge_with_terminal_block_hash_override() { +async fn merge_with_terminal_block_hash_override() { let altair_fork_epoch = Epoch::new(0); let bellatrix_fork_epoch = Epoch::new(0); @@ -70,8 +70,7 @@ fn merge_with_terminal_block_hash_override() { assert!( harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_block .as_merge() .is_ok(), @@ -80,9 +79,9 @@ fn merge_with_terminal_block_hash_override() { let mut execution_payloads = vec![]; for i in 0..E::slots_per_epoch() * 3 { - harness.extend_slots(1); + harness.extend_slots(1).await; - let block = harness.chain.head().unwrap().beacon_block; + let block = &harness.chain.head_snapshot().beacon_block; let execution_payload = block.message().body().execution_payload().unwrap().clone(); if i == 0 { @@ -94,8 +93,8 @@ fn merge_with_terminal_block_hash_override() { verify_execution_payload_chain(execution_payloads.as_slice()); } -#[test] -fn base_altair_merge_with_terminal_block_after_fork() { +#[tokio::test] +async fn base_altair_merge_with_terminal_block_after_fork() { let altair_fork_epoch = Epoch::new(4); let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); let bellatrix_fork_epoch = Epoch::new(8); @@ -118,15 +117,15 @@ fn base_altair_merge_with_terminal_block_after_fork() { * Start with the base fork. */ - assert!(harness.chain.head().unwrap().beacon_block.as_base().is_ok()); + assert!(harness.chain.head_snapshot().beacon_block.as_base().is_ok()); /* * Do the Altair fork. */ - harness.extend_to_slot(altair_fork_slot); + harness.extend_to_slot(altair_fork_slot).await; - let altair_head = harness.chain.head().unwrap().beacon_block; + let altair_head = &harness.chain.head_snapshot().beacon_block; assert!(altair_head.as_altair().is_ok()); assert_eq!(altair_head.slot(), altair_fork_slot); @@ -134,9 +133,9 @@ fn base_altair_merge_with_terminal_block_after_fork() { * Do the merge fork, without a terminal PoW block. */ - harness.extend_to_slot(merge_fork_slot); + harness.extend_to_slot(merge_fork_slot).await; - let merge_head = harness.chain.head().unwrap().beacon_block; + let merge_head = &harness.chain.head_snapshot().beacon_block; assert!(merge_head.as_merge().is_ok()); assert_eq!(merge_head.slot(), merge_fork_slot); assert_eq!( @@ -148,9 +147,9 @@ fn base_altair_merge_with_terminal_block_after_fork() { * Next merge block shouldn't include an exec payload. */ - harness.extend_slots(1); + harness.extend_slots(1).await; - let one_after_merge_head = harness.chain.head().unwrap().beacon_block; + let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; assert_eq!( *one_after_merge_head .message() @@ -175,9 +174,9 @@ fn base_altair_merge_with_terminal_block_after_fork() { */ for _ in 0..4 { - harness.extend_slots(1); + harness.extend_slots(1).await; - let block = harness.chain.head().unwrap().beacon_block; + let block = &harness.chain.head_snapshot().beacon_block; execution_payloads.push(block.message().body().execution_payload().unwrap().clone()); } diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index c9df6aa31db..535fe080a7f 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -46,18 +46,20 @@ fn get_harness(store: Arc, validator_count: usize) -> TestHarness { harness } -#[test] -fn voluntary_exit() { +#[tokio::test] +async fn voluntary_exit() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), VALIDATOR_COUNT); let spec = &harness.chain.spec.clone(); - harness.extend_chain( - (E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + (E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let validator_index1 = VALIDATOR_COUNT - 1; let validator_index2 = VALIDATOR_COUNT - 2; diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 2a48a4b6911..e37ed286bcf 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -2,8 +2,8 @@ use beacon_chain::{ test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ExecutionPayloadError, HeadInfo, StateSkipConfig, - WhenSlotSkipped, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, + BeaconChainError, BlockError, ExecutionPayloadError, StateSkipConfig, WhenSlotSkipped, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ json_structures::{JsonForkChoiceStateV1, JsonPayloadAttributesV1}, @@ -12,6 +12,7 @@ use execution_layer::{ use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Error as ProtoArrayError, ExecutionStatus}; use slot_clock::SlotClock; +use std::sync::Arc; use std::time::Duration; use task_executor::ShutdownReason; use tree_hash::TreeHash; @@ -84,19 +85,19 @@ impl InvalidPayloadRig { fn execution_status(&self, block_root: Hash256) -> ExecutionStatus { self.harness .chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&block_root) .unwrap() .execution_status } - fn fork_choice(&self) { - self.harness.chain.fork_choice().unwrap(); - } - - fn head_info(&self) -> HeadInfo { - self.harness.chain.head_info().unwrap() + async fn recompute_head(&self) { + self.harness + .chain + .recompute_head_at_current_slot() + .await + .unwrap(); } fn previous_forkchoice_update_params(&self) -> (ForkChoiceState, PayloadAttributes) { @@ -142,22 +143,24 @@ impl InvalidPayloadRig { .block_hash } - fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec { - (0..num_blocks) - .map(|_| self.import_block(is_valid.clone())) - .collect() + async fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec { + let mut roots = Vec::with_capacity(num_blocks as usize); + for _ in 0..num_blocks { + roots.push(self.import_block(is_valid.clone()).await); + } + roots } - fn move_to_first_justification(&mut self, is_valid: Payload) { + async fn move_to_first_justification(&mut self, is_valid: Payload) { let slots_till_justification = E::slots_per_epoch() * 3; - self.build_blocks(slots_till_justification, is_valid); + self.build_blocks(slots_till_justification, is_valid).await; - let justified_checkpoint = self.head_info().current_justified_checkpoint; + let justified_checkpoint = self.harness.justified_checkpoint(); assert_eq!(justified_checkpoint.epoch, 2); } /// Import a block while setting the newPayload and forkchoiceUpdated responses to `is_valid`. - fn import_block(&mut self, is_valid: Payload) -> Hash256 { + async fn import_block(&mut self, is_valid: Payload) -> Hash256 { self.import_block_parametric(is_valid, is_valid, |error| { matches!( error, @@ -166,6 +169,7 @@ impl InvalidPayloadRig { ) ) }) + .await } fn block_root_at_slot(&self, slot: Slot) -> Option { @@ -178,13 +182,13 @@ impl InvalidPayloadRig { fn validate_manually(&self, block_root: Hash256) { self.harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .on_valid_execution_payload(block_root) .unwrap(); } - fn import_block_parametric) -> bool>( + async fn import_block_parametric) -> bool>( &mut self, new_payload_response: Payload, forkchoice_response: Payload, @@ -192,10 +196,10 @@ impl InvalidPayloadRig { ) -> Hash256 { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); - let head = self.harness.chain.head().unwrap(); - let state = head.beacon_state; + let head = self.harness.chain.head_snapshot(); + let state = head.beacon_state.clone_with_only_committee_caches(); let slot = state.slot() + 1; - let (block, post_state) = self.harness.make_block(state, slot); + let (block, post_state) = self.harness.make_block(state, slot).await; let block_root = block.canonical_root(); let set_new_payload = |payload: Payload| match payload { @@ -249,7 +253,11 @@ impl InvalidPayloadRig { } else { mock_execution_layer.server.full_payload_verification(); } - let root = self.harness.process_block(slot, block.clone()).unwrap(); + let root = self + .harness + .process_block(slot, block.clone()) + .await + .unwrap(); if self.enable_attestations { let all_validators: Vec = (0..VALIDATOR_COUNT).collect(); @@ -294,7 +302,7 @@ impl InvalidPayloadRig { set_new_payload(new_payload_response); set_forkchoice_updated(forkchoice_response); - match self.harness.process_block(slot, block) { + match self.harness.process_block(slot, block).await { Err(error) if evaluate_error(&error) => (), Err(other) => { panic!("evaluate_error returned false with {:?}", other) @@ -309,8 +317,12 @@ impl InvalidPayloadRig { } }; - let block_in_forkchoice = - self.harness.chain.fork_choice.read().get_block(&block_root); + let block_in_forkchoice = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .get_block(&block_root); if let Payload::Invalid { .. } = new_payload_response { // A block found to be immediately invalid should not end up in fork choice. assert_eq!(block_in_forkchoice, None); @@ -333,106 +345,111 @@ impl InvalidPayloadRig { block_root } - fn invalidate_manually(&self, block_root: Hash256) { + async fn invalidate_manually(&self, block_root: Hash256) { self.harness .chain .process_invalid_execution_payload(&InvalidationOperation::InvalidateOne { block_root }) + .await .unwrap(); } } /// Simple test of the different import types. -#[test] -fn valid_invalid_syncing() { +#[tokio::test] +async fn valid_invalid_syncing() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); + rig.import_block(Payload::Valid).await; rig.import_block(Payload::Invalid { latest_valid_hash: None, - }); - rig.import_block(Payload::Syncing); + }) + .await; + rig.import_block(Payload::Syncing).await; } /// Ensure that an invalid payload can invalidate its parent too (given the right /// `latest_valid_hash`. -#[test] -fn invalid_payload_invalidates_parent() { +#[tokio::test] +async fn invalid_payload_invalidates_parent() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - rig.move_to_first_justification(Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + rig.move_to_first_justification(Payload::Syncing).await; let roots = vec![ - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, ]; let latest_valid_hash = rig.block_hash(roots[0]); rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; assert!(rig.execution_status(roots[0]).is_valid_and_post_bellatrix()); assert!(rig.execution_status(roots[1]).is_invalid()); assert!(rig.execution_status(roots[2]).is_invalid()); - assert_eq!(rig.head_info().block_root, roots[0]); + assert_eq!(rig.harness.head_block_root(), roots[0]); } /// Test invalidation of a payload via the fork choice updated message. /// /// The `invalid_payload` argument determines the type of invalid payload: `Invalid`, /// `InvalidBlockHash`, etc, taking the `latest_valid_hash` as an argument. -fn immediate_forkchoice_update_invalid_test( +async fn immediate_forkchoice_update_invalid_test( invalid_payload: impl FnOnce(Option) -> Payload, ) { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - rig.move_to_first_justification(Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + rig.move_to_first_justification(Payload::Syncing).await; - let valid_head_root = rig.import_block(Payload::Valid); + let valid_head_root = rig.import_block(Payload::Valid).await; let latest_valid_hash = Some(rig.block_hash(valid_head_root)); // Import a block which returns syncing when supplied via newPayload, and then // invalid when the forkchoice update is sent. rig.import_block_parametric(Payload::Syncing, invalid_payload(latest_valid_hash), |_| { false - }); + }) + .await; // The head should be the latest valid block. - assert_eq!(rig.head_info().block_root, valid_head_root); + assert_eq!(rig.harness.head_block_root(), valid_head_root); } -#[test] -fn immediate_forkchoice_update_payload_invalid() { +#[tokio::test] +async fn immediate_forkchoice_update_payload_invalid() { immediate_forkchoice_update_invalid_test(|latest_valid_hash| Payload::Invalid { latest_valid_hash, }) + .await } -#[test] -fn immediate_forkchoice_update_payload_invalid_block_hash() { - immediate_forkchoice_update_invalid_test(|_| Payload::InvalidBlockHash) +#[tokio::test] +async fn immediate_forkchoice_update_payload_invalid_block_hash() { + immediate_forkchoice_update_invalid_test(|_| Payload::InvalidBlockHash).await } -#[test] -fn immediate_forkchoice_update_payload_invalid_terminal_block() { - immediate_forkchoice_update_invalid_test(|_| Payload::InvalidTerminalBlock) +#[tokio::test] +async fn immediate_forkchoice_update_payload_invalid_terminal_block() { + immediate_forkchoice_update_invalid_test(|_| Payload::InvalidTerminalBlock).await } /// Ensure the client tries to exit when the justified checkpoint is invalidated. -#[test] -fn justified_checkpoint_becomes_invalid() { +#[tokio::test] +async fn justified_checkpoint_becomes_invalid() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - rig.move_to_first_justification(Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + rig.move_to_first_justification(Payload::Syncing).await; - let justified_checkpoint = rig.head_info().current_justified_checkpoint; + let justified_checkpoint = rig.harness.justified_checkpoint(); let parent_root_of_justified = rig .harness .chain @@ -456,7 +473,8 @@ fn justified_checkpoint_becomes_invalid() { // is invalid. BlockError::BeaconChainError(BeaconChainError::JustifiedPayloadInvalid { .. }) ) - }); + }) + .await; // The beacon chain should have triggered a shutdown. assert_eq!( @@ -468,18 +486,18 @@ fn justified_checkpoint_becomes_invalid() { } /// Ensure that a `latest_valid_hash` for a pre-finality block only reverts a single block. -#[test] -fn pre_finalized_latest_valid_hash() { +#[tokio::test] +async fn pre_finalized_latest_valid_hash() { let num_blocks = E::slots_per_epoch() * 4; let finalized_epoch = 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); let mut blocks = vec![]; - blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block. - blocks.extend(rig.build_blocks(num_blocks - 1, Payload::Syncing)); + blocks.push(rig.import_block(Payload::Valid).await); // Import a valid transition block. + blocks.extend(rig.build_blocks(num_blocks - 1, Payload::Syncing).await); - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); let pre_finalized_block_root = rig.block_root_at_slot(Slot::new(1)).unwrap(); let pre_finalized_block_hash = rig.block_hash(pre_finalized_block_root); @@ -490,10 +508,11 @@ fn pre_finalized_latest_valid_hash() { // Import a pre-finalized block. rig.import_block(Payload::Invalid { latest_valid_hash: Some(pre_finalized_block_hash), - }); + }) + .await; // The latest imported block should be the head. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // The beacon chain should *not* have triggered a shutdown. assert_eq!(rig.harness.shutdown_reasons(), vec![]); @@ -514,16 +533,16 @@ fn pre_finalized_latest_valid_hash() { /// /// - Invalidate descendants of `latest_valid_root`. /// - Validate `latest_valid_root` and its ancestors. -#[test] -fn latest_valid_hash_will_validate() { +#[tokio::test] +async fn latest_valid_hash_will_validate() { const LATEST_VALID_SLOT: u64 = 3; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); let mut blocks = vec![]; - blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block. - blocks.extend(rig.build_blocks(4, Payload::Syncing)); + blocks.push(rig.import_block(Payload::Valid).await); // Import a valid transition block. + blocks.extend(rig.build_blocks(4, Payload::Syncing).await); let latest_valid_root = rig .block_root_at_slot(Slot::new(LATEST_VALID_SLOT)) @@ -532,9 +551,10 @@ fn latest_valid_hash_will_validate() { rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; - assert_eq!(rig.head_info().slot, LATEST_VALID_SLOT); + assert_eq!(rig.harness.head_slot(), LATEST_VALID_SLOT); for slot in 0..=5 { let slot = Slot::new(slot); @@ -558,18 +578,18 @@ fn latest_valid_hash_will_validate() { } /// Check behaviour when the `latest_valid_hash` is a junk value. -#[test] -fn latest_valid_hash_is_junk() { +#[tokio::test] +async fn latest_valid_hash_is_junk() { let num_blocks = E::slots_per_epoch() * 5; let finalized_epoch = 3; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); let mut blocks = vec![]; - blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block. - blocks.extend(rig.build_blocks(num_blocks, Payload::Syncing)); + blocks.push(rig.import_block(Payload::Valid).await); // Import a valid transition block. + blocks.extend(rig.build_blocks(num_blocks, Payload::Syncing).await); - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); // No service should have triggered a shutdown, yet. assert!(rig.harness.shutdown_reasons().is_empty()); @@ -577,10 +597,11 @@ fn latest_valid_hash_is_junk() { let junk_hash = ExecutionBlockHash::repeat_byte(42); rig.import_block(Payload::Invalid { latest_valid_hash: Some(junk_hash), - }); + }) + .await; // The latest imported block should be the head. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // The beacon chain should *not* have triggered a shutdown. assert_eq!(rig.harness.shutdown_reasons(), vec![]); @@ -598,19 +619,19 @@ fn latest_valid_hash_is_junk() { } /// Check that descendants of invalid blocks are also invalidated. -#[test] -fn invalidates_all_descendants() { +#[tokio::test] +async fn invalidates_all_descendants() { let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; let finalized_epoch = 2; let finalized_slot = E::slots_per_epoch() * 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + let blocks = rig.build_blocks(num_blocks, Payload::Syncing).await; - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // Apply a block which conflicts with the canonical chain. let fork_slot = Slot::new(4 * E::slots_per_epoch() + 3); @@ -621,9 +642,14 @@ fn invalidates_all_descendants() { .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) .unwrap(); assert_eq!(fork_parent_state.slot(), fork_parent_slot); - let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot); - let fork_block_root = rig.harness.chain.process_block(fork_block).unwrap(); - rig.fork_choice(); + let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; + let fork_block_root = rig + .harness + .chain + .process_block(Arc::new(fork_block)) + .await + .unwrap(); + rig.recompute_head().await; // The latest valid hash will be set to the grandparent of the fork block. This means that the // parent of the fork block will become invalid. @@ -638,14 +664,15 @@ fn invalidates_all_descendants() { let latest_valid_hash = rig.block_hash(latest_valid_root); // The new block should not become the head, the old head should remain. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; // The block before the fork should become the head. - assert_eq!(rig.head_info().block_root, latest_valid_root); + assert_eq!(rig.harness.head_block_root(), latest_valid_root); // The fork block should be invalidated, even though it's not an ancestor of the block that // triggered the INVALID response from the EL. @@ -677,19 +704,19 @@ fn invalidates_all_descendants() { } /// Check that the head will switch after the canonical branch is invalidated. -#[test] -fn switches_heads() { +#[tokio::test] +async fn switches_heads() { let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; let finalized_epoch = 2; let finalized_slot = E::slots_per_epoch() * 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + let blocks = rig.build_blocks(num_blocks, Payload::Syncing).await; - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // Apply a block which conflicts with the canonical chain. let fork_slot = Slot::new(4 * E::slots_per_epoch() + 3); @@ -700,23 +727,29 @@ fn switches_heads() { .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) .unwrap(); assert_eq!(fork_parent_state.slot(), fork_parent_slot); - let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot); + let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; let fork_parent_root = fork_block.parent_root(); - let fork_block_root = rig.harness.chain.process_block(fork_block).unwrap(); - rig.fork_choice(); + let fork_block_root = rig + .harness + .chain + .process_block(Arc::new(fork_block)) + .await + .unwrap(); + rig.recompute_head().await; let latest_valid_slot = fork_parent_slot; let latest_valid_hash = rig.block_hash(fork_parent_root); // The new block should not become the head, the old head should remain. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; // The fork block should become the head. - assert_eq!(rig.head_info().block_root, fork_block_root); + assert_eq!(rig.harness.head_block_root(), fork_block_root); // The fork block has not yet been validated. assert!(rig.execution_status(fork_block_root).is_optimistic()); @@ -746,17 +779,18 @@ fn switches_heads() { } } -#[test] -fn invalid_during_processing() { +#[tokio::test] +async fn invalid_during_processing() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); let roots = &[ - rig.import_block(Payload::Valid), + rig.import_block(Payload::Valid).await, rig.import_block(Payload::Invalid { latest_valid_hash: None, - }), - rig.import_block(Payload::Valid), + }) + .await, + rig.import_block(Payload::Valid).await, ]; // 0 should be present in the chain. @@ -772,20 +806,20 @@ fn invalid_during_processing() { None ); // 2 should be the head. - let head = rig.harness.chain.head_info().unwrap(); - assert_eq!(head.block_root, roots[2]); + let head_block_root = rig.harness.head_block_root(); + assert_eq!(head_block_root, roots[2]); } -#[test] -fn invalid_after_optimistic_sync() { +#[tokio::test] +async fn invalid_after_optimistic_sync() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. let mut roots = vec![ - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, ]; for root in &roots { @@ -793,29 +827,32 @@ fn invalid_after_optimistic_sync() { } // 2 should be the head. - let head = rig.harness.chain.head_info().unwrap(); - assert_eq!(head.block_root, roots[2]); + let head = rig.harness.head_block_root(); + assert_eq!(head, roots[2]); - roots.push(rig.import_block(Payload::Invalid { - latest_valid_hash: Some(rig.block_hash(roots[1])), - })); + roots.push( + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(rig.block_hash(roots[1])), + }) + .await, + ); // Running fork choice is necessary since a block has been invalidated. - rig.fork_choice(); + rig.recompute_head().await; // 1 should be the head, since 2 was invalidated. - let head = rig.harness.chain.head_info().unwrap(); - assert_eq!(head.block_root, roots[1]); + let head = rig.harness.head_block_root(); + assert_eq!(head, roots[1]); } -#[test] -fn manually_validate_child() { +#[tokio::test] +async fn manually_validate_child() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. - let parent = rig.import_block(Payload::Syncing); - let child = rig.import_block(Payload::Syncing); + let parent = rig.import_block(Payload::Syncing).await; + let child = rig.import_block(Payload::Syncing).await; assert!(rig.execution_status(parent).is_optimistic()); assert!(rig.execution_status(child).is_optimistic()); @@ -826,14 +863,14 @@ fn manually_validate_child() { assert!(rig.execution_status(child).is_valid_and_post_bellatrix()); } -#[test] -fn manually_validate_parent() { +#[tokio::test] +async fn manually_validate_parent() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. - let parent = rig.import_block(Payload::Syncing); - let child = rig.import_block(Payload::Syncing); + let parent = rig.import_block(Payload::Syncing).await; + let child = rig.import_block(Payload::Syncing).await; assert!(rig.execution_status(parent).is_optimistic()); assert!(rig.execution_status(child).is_optimistic()); @@ -844,14 +881,14 @@ fn manually_validate_parent() { assert!(rig.execution_status(child).is_optimistic()); } -#[test] -fn payload_preparation() { +#[tokio::test] +async fn payload_preparation() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); + rig.import_block(Payload::Valid).await; let el = rig.execution_layer(); - let head = rig.harness.chain.head().unwrap(); + let head = rig.harness.chain.head_snapshot(); let current_slot = rig.harness.chain.slot().unwrap(); assert_eq!(head.beacon_state.slot(), 1); assert_eq!(current_slot, 1); @@ -865,18 +902,19 @@ fn payload_preparation() { let fee_recipient = Address::repeat_byte(99); // Provide preparation data to the EL for `proposer`. - el.update_proposer_preparation_blocking( + el.update_proposer_preparation( Epoch::new(1), &[ProposerPreparationData { validator_index: proposer as u64, fee_recipient, }], ) - .unwrap(); + .await; rig.harness .chain - .prepare_beacon_proposer_blocking() + .prepare_beacon_proposer(rig.harness.chain.slot().unwrap()) + .await .unwrap(); let payload_attributes = PayloadAttributes { @@ -896,15 +934,15 @@ fn payload_preparation() { assert_eq!(rig.previous_payload_attributes(), payload_attributes); } -#[test] -fn invalid_parent() { +#[tokio::test] +async fn invalid_parent() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. // Import a syncing block atop the transition block (we'll call this the "parent block" since we // build another block on it later). - let parent_root = rig.import_block(Payload::Syncing); + let parent_root = rig.import_block(Payload::Syncing).await; let parent_block = rig.harness.get_block(parent_root.into()).unwrap(); let parent_state = rig .harness @@ -914,34 +952,34 @@ fn invalid_parent() { // Produce another block atop the parent, but don't import yet. let slot = parent_block.slot() + 1; rig.harness.set_current_slot(slot); - let (block, state) = rig.harness.make_block(parent_state, slot); + let (block, state) = rig.harness.make_block(parent_state, slot).await; + let block = Arc::new(block); let block_root = block.canonical_root(); assert_eq!(block.parent_root(), parent_root); // Invalidate the parent block. - rig.invalidate_manually(parent_root); + rig.invalidate_manually(parent_root).await; assert!(rig.execution_status(parent_root).is_invalid()); // Ensure the block built atop an invalid payload is invalid for gossip. assert!(matches!( - rig.harness.chain.verify_block_for_gossip(block.clone()), + rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.clone()), + rig.harness.chain.process_block(block.clone()).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); // Ensure the block built atop an invalid payload cannot be imported to fork choice. - let (block, _block_signature) = block.deconstruct(); assert!(matches!( - rig.harness.chain.fork_choice.write().on_block( + rig.harness.chain.canonical_head.fork_choice_write_lock().on_block( slot, - &block, + block.message(), block_root, Duration::from_secs(0), &state, @@ -960,21 +998,21 @@ fn invalid_parent() { } /// Tests to ensure that we will still send a proposer preparation -#[test] -fn payload_preparation_before_transition_block() { +#[tokio::test] +async fn payload_preparation_before_transition_block() { let rig = InvalidPayloadRig::new(); let el = rig.execution_layer(); - let head = rig.harness.chain.head().unwrap(); - let head_info = rig.head_info(); - assert!( - !head_info.is_merge_transition_complete, - "the head block is pre-transition" - ); + let head = rig.harness.chain.head_snapshot(); assert_eq!( - head_info.execution_payload_block_hash, - Some(ExecutionBlockHash::zero()), - "the head block is post-bellatrix" + head.beacon_block + .message() + .body() + .execution_payload() + .unwrap() + .block_hash(), + ExecutionBlockHash::zero(), + "the head block is post-bellatrix but pre-transition" ); let current_slot = rig.harness.chain.slot().unwrap(); @@ -986,24 +1024,32 @@ fn payload_preparation_before_transition_block() { let fee_recipient = Address::repeat_byte(99); // Provide preparation data to the EL for `proposer`. - el.update_proposer_preparation_blocking( + el.update_proposer_preparation( Epoch::new(0), &[ProposerPreparationData { validator_index: proposer as u64, fee_recipient, }], ) - .unwrap(); + .await; rig.move_to_terminal_block(); rig.harness .chain - .prepare_beacon_proposer_blocking() + .prepare_beacon_proposer(current_slot) + .await .unwrap(); + let forkchoice_update_params = rig + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .get_forkchoice_update_parameters(); rig.harness .chain - .update_execution_engine_forkchoice_blocking(current_slot) + .update_execution_engine_forkchoice(current_slot, forkchoice_update_params) + .await .unwrap(); let (fork_choice_state, payload_attributes) = rig.previous_forkchoice_update_params(); @@ -1012,15 +1058,15 @@ fn payload_preparation_before_transition_block() { assert_eq!(fork_choice_state.head_block_hash, latest_block_hash); } -#[test] -fn attesting_to_optimistic_head() { +#[tokio::test] +async fn attesting_to_optimistic_head() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. - let root = rig.import_block(Payload::Syncing); + let root = rig.import_block(Payload::Syncing).await; - let head = rig.harness.chain.head().unwrap(); + let head = rig.harness.chain.head_snapshot(); let slot = head.beacon_block.slot(); assert_eq!( head.beacon_block_root, root, diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 771295c415e..560e865a8f2 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -72,18 +72,20 @@ fn get_harness( harness } -#[test] -fn full_participation_no_skips() { +#[tokio::test] +async fn full_participation_no_skips() { let num_blocks_produced = E::slots_per_epoch() * 5; let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_finalization(&harness, num_blocks_produced); check_split_slot(&harness, store); @@ -91,8 +93,8 @@ fn full_participation_no_skips() { check_iterators(&harness); } -#[test] -fn randomised_skips() { +#[tokio::test] +async fn randomised_skips() { let num_slots = E::slots_per_epoch() * 5; let mut num_blocks_produced = 0; let db_path = tempdir().unwrap(); @@ -104,14 +106,16 @@ fn randomised_skips() { for slot in 1..=num_slots { if rng.gen_bool(0.8) { - harness.extend_chain( - 1, - BlockStrategy::ForkCanonicalChainAt { - previous_slot: Slot::new(head_slot), - first_slot: Slot::new(slot), - }, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::ForkCanonicalChainAt { + previous_slot: Slot::new(head_slot), + first_slot: Slot::new(slot), + }, + AttestationStrategy::AllValidators, + ) + .await; harness.advance_slot(); num_blocks_produced += 1; head_slot = slot; @@ -120,7 +124,7 @@ fn randomised_skips() { } } - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; assert_eq!( state.slot(), @@ -133,8 +137,8 @@ fn randomised_skips() { check_iterators(&harness); } -#[test] -fn long_skip() { +#[tokio::test] +async fn long_skip() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); @@ -148,11 +152,13 @@ fn long_skip() { // Having this set lower ensures that we start justifying and finalizing quickly after a skip. let final_blocks = 2 * E::slots_per_epoch() + E::slots_per_epoch() / 2; - harness.extend_chain( - initial_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + initial_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_finalization(&harness, initial_blocks); @@ -162,14 +168,16 @@ fn long_skip() { } // 3. Produce more blocks, establish a new finalized epoch - harness.extend_chain( - final_blocks as usize, - BlockStrategy::ForkCanonicalChainAt { - previous_slot: Slot::new(initial_blocks), - first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1), - }, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + final_blocks as usize, + BlockStrategy::ForkCanonicalChainAt { + previous_slot: Slot::new(initial_blocks), + first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1), + }, + AttestationStrategy::AllValidators, + ) + .await; check_finalization(&harness, initial_blocks + skip_slots + final_blocks); check_split_slot(&harness, store); @@ -183,8 +191,8 @@ fn long_skip() { /// 1. The chunked vector scheme doesn't attempt to store an incorrect genesis value /// 2. We correctly load the genesis value for all required slots /// NOTE: this test takes about a minute to run -#[test] -fn randao_genesis_storage() { +#[tokio::test] +async fn randao_genesis_storage() { let validator_count = 8; let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -195,24 +203,24 @@ fn randao_genesis_storage() { // Check we have a non-trivial genesis value let genesis_value = *harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .get_randao_mix(Epoch::new(0)) .expect("randao mix ok"); assert!(!genesis_value.is_zero()); - harness.extend_chain( - num_slots as usize - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_slots as usize - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Check that genesis value is still present assert!(harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .randao_mixes() .iter() @@ -221,15 +229,16 @@ fn randao_genesis_storage() { // Then upon adding one more block, it isn't harness.advance_slot(); - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; assert!(harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .randao_mixes() .iter() @@ -243,8 +252,8 @@ fn randao_genesis_storage() { } // Check that closing and reopening a freezer DB restores the split slot to its correct value. -#[test] -fn split_slot_restore() { +#[tokio::test] +async fn split_slot_restore() { let db_path = tempdir().unwrap(); let split_slot = { @@ -253,11 +262,13 @@ fn split_slot_restore() { let num_blocks = 4 * E::slots_per_epoch(); - harness.extend_chain( - num_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; store.get_split_slot() }; @@ -272,8 +283,8 @@ fn split_slot_restore() { // Check attestation processing and `load_epoch_boundary_state` in the presence of a split DB. // This is a bit of a monster test in that it tests lots of different things, but until they're // tested elsewhere, this is as good a place as any. -#[test] -fn epoch_boundary_state_attestation_processing() { +#[tokio::test] +async fn epoch_boundary_state_attestation_processing() { let num_blocks_produced = E::slots_per_epoch() * 5; let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -285,13 +296,15 @@ fn epoch_boundary_state_attestation_processing() { let mut late_attestations = vec![]; for _ in 0..num_blocks_produced { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(timely_validators.clone()), - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(timely_validators.clone()), + ) + .await; - let head = harness.chain.head().expect("head ok"); + let head = harness.chain.head_snapshot(); late_attestations.extend(harness.get_unaggregated_attestations( &AttestationStrategy::SomeValidators(late_validators.clone()), &head.beacon_state, @@ -328,12 +341,7 @@ fn epoch_boundary_state_attestation_processing() { assert_eq!(epoch_boundary_state, ebs_of_ebs); // If the attestation is pre-finalization it should be rejected. - let finalized_epoch = harness - .chain - .head_info() - .expect("should get head") - .finalized_checkpoint - .epoch; + let finalized_epoch = harness.finalized_checkpoint().epoch; let res = harness .chain @@ -364,8 +372,8 @@ fn epoch_boundary_state_attestation_processing() { } // Test that the `end_slot` for forwards block and state root iterators works correctly. -#[test] -fn forwards_iter_block_and_state_roots_until() { +#[tokio::test] +async fn forwards_iter_block_and_state_roots_until() { let num_blocks_produced = E::slots_per_epoch() * 17; let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -373,13 +381,14 @@ fn forwards_iter_block_and_state_roots_until() { let all_validators = &harness.get_all_validators(); let (mut head_state, mut head_state_root) = harness.get_current_state_and_root(); - let head_block_root = harness.chain.head_info().unwrap().block_root; + let head_block_root = harness.head_block_root(); let mut block_roots = vec![head_block_root]; let mut state_roots = vec![head_state_root]; for slot in (1..=num_blocks_produced).map(Slot::from) { let (block_root, mut state) = harness .add_attested_block_at_slot(slot, head_state, head_state_root, all_validators) + .await .unwrap(); head_state_root = state.update_tree_hash_cache().unwrap(); head_state = state; @@ -429,19 +438,21 @@ fn forwards_iter_block_and_state_roots_until() { test_range(Slot::new(0), head_state.slot()); } -#[test] -fn block_replay_with_inaccurate_state_roots() { +#[tokio::test] +async fn block_replay_with_inaccurate_state_roots() { let num_blocks_produced = E::slots_per_epoch() * 3 + 31; let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let chain = &harness.chain; - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Slot must not be 0 mod 32 or else no blocks will be replayed. let (mut head_state, head_root) = harness.get_current_state_and_root(); @@ -471,8 +482,8 @@ fn block_replay_with_inaccurate_state_roots() { ); } -#[test] -fn block_replayer_hooks() { +#[tokio::test] +async fn block_replayer_hooks() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); @@ -487,12 +498,9 @@ fn block_replayer_hooks() { let (state, state_root) = harness.get_current_state_and_root(); let all_validators = harness.get_all_validators(); - let (_, _, end_block_root, mut end_state) = harness.add_attested_blocks_at_slots( - state.clone(), - state_root, - &block_slots, - &all_validators, - ); + let (_, _, end_block_root, mut end_state) = harness + .add_attested_blocks_at_slots(state.clone(), state_root, &block_slots, &all_validators) + .await; let blocks = store .load_blocks_to_replay(Slot::new(0), max_slot, end_block_root.into()) @@ -548,8 +556,8 @@ fn block_replayer_hooks() { assert_eq!(end_state, replay_state); } -#[test] -fn delete_blocks_and_states() { +#[tokio::test] +async fn delete_blocks_and_states() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let validators_keypairs = @@ -567,7 +575,9 @@ fn delete_blocks_and_states() { let initial_slots: Vec = (1..=unforked_blocks).map(Into::into).collect(); let (state, state_root) = harness.get_current_state_and_root(); let all_validators = harness.get_all_validators(); - harness.add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators); + harness + .add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators) + .await; // Create a fork post-finalization. let two_thirds = (LOW_VALIDATOR_COUNT / 3) * 2; @@ -587,20 +597,21 @@ fn delete_blocks_and_states() { let fork1_state = harness.get_current_state(); let fork2_state = fork1_state.clone(); - let results = harness.add_blocks_on_multiple_chains(vec![ - (fork1_state, fork1_slots, honest_validators), - (fork2_state, fork2_slots, faulty_validators), - ]); + let results = harness + .add_blocks_on_multiple_chains(vec![ + (fork1_state, fork1_slots, honest_validators), + (fork2_state, fork2_slots, faulty_validators), + ]) + .await; let honest_head = results[0].2; let faulty_head = results[1].2; assert_ne!(honest_head, faulty_head, "forks should be distinct"); - let head_info = harness.chain.head_info().expect("should get head"); - assert_eq!(head_info.slot, unforked_blocks + fork_blocks); + assert_eq!(harness.head_slot(), unforked_blocks + fork_blocks); assert_eq!( - head_info.block_root, + harness.head_block_root(), honest_head.into(), "the honest chain should be the canonical chain", ); @@ -671,7 +682,7 @@ fn delete_blocks_and_states() { // Check that we never produce invalid blocks when there is deep forking that changes the shuffling. // See https://github.com/sigp/lighthouse/issues/845 -fn multi_epoch_fork_valid_blocks_test( +async fn multi_epoch_fork_valid_blocks_test( initial_blocks: usize, num_fork1_blocks_: usize, num_fork2_blocks_: usize, @@ -696,7 +707,9 @@ fn multi_epoch_fork_valid_blocks_test( let initial_slots: Vec = (1..=initial_blocks).map(Into::into).collect(); let (state, state_root) = harness.get_current_state_and_root(); let all_validators = harness.get_all_validators(); - harness.add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators); + harness + .add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators) + .await; } assert!(num_fork1_validators <= LOW_VALIDATOR_COUNT); @@ -714,10 +727,12 @@ fn multi_epoch_fork_valid_blocks_test( .map(Into::into) .collect(); - let results = harness.add_blocks_on_multiple_chains(vec![ - (fork1_state, fork1_slots, fork1_validators), - (fork2_state, fork2_slots, fork2_validators), - ]); + let results = harness + .add_blocks_on_multiple_chains(vec![ + (fork1_state, fork1_slots, fork1_validators), + (fork2_state, fork2_slots, fork2_validators), + ]) + .await; let head1 = results[0].2; let head2 = results[1].2; @@ -726,43 +741,47 @@ fn multi_epoch_fork_valid_blocks_test( } // This is the minimal test of block production with different shufflings. -#[test] -fn block_production_different_shuffling_early() { +#[tokio::test] +async fn block_production_different_shuffling_early() { let slots_per_epoch = E::slots_per_epoch() as usize; multi_epoch_fork_valid_blocks_test( slots_per_epoch - 2, slots_per_epoch + 3, slots_per_epoch + 3, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; } -#[test] -fn block_production_different_shuffling_long() { +#[tokio::test] +async fn block_production_different_shuffling_long() { let slots_per_epoch = E::slots_per_epoch() as usize; multi_epoch_fork_valid_blocks_test( 2 * slots_per_epoch - 2, 3 * slots_per_epoch, 3 * slots_per_epoch, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; } // Check that the op pool safely includes multiple attestations per block when necessary. // This checks the correctness of the shuffling compatibility memoization. -#[test] -fn multiple_attestations_per_block() { +#[tokio::test] +async fn multiple_attestations_per_block() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store, HIGH_VALIDATOR_COUNT); - harness.extend_chain( - E::slots_per_epoch() as usize * 3, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + E::slots_per_epoch() as usize * 3, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); let committees_per_slot = head .beacon_state .get_committee_count_at_slot(head.beacon_state.slot()) @@ -774,6 +793,8 @@ fn multiple_attestations_per_block() { assert_eq!( snapshot .beacon_block + .as_ref() + .clone() .deconstruct() .0 .body() @@ -784,18 +805,20 @@ fn multiple_attestations_per_block() { } } -#[test] -fn shuffling_compatible_linear_chain() { +#[tokio::test] +async fn shuffling_compatible_linear_chain() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); // Skip the block at the end of the first epoch. - let head_block_root = harness.extend_chain( - 4 * E::slots_per_epoch() as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + let head_block_root = harness + .extend_chain( + 4 * E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_shuffling_compatible( &harness, @@ -808,25 +831,29 @@ fn shuffling_compatible_linear_chain() { ); } -#[test] -fn shuffling_compatible_missing_pivot_block() { +#[tokio::test] +async fn shuffling_compatible_missing_pivot_block() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); // Skip the block at the end of the first epoch. - harness.extend_chain( - E::slots_per_epoch() as usize - 2, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + E::slots_per_epoch() as usize - 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.advance_slot(); harness.advance_slot(); - let head_block_root = harness.extend_chain( - 2 * E::slots_per_epoch() as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + let head_block_root = harness + .extend_chain( + 2 * E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_shuffling_compatible( &harness, @@ -839,15 +866,16 @@ fn shuffling_compatible_missing_pivot_block() { ); } -#[test] -fn shuffling_compatible_simple_fork() { +#[tokio::test] +async fn shuffling_compatible_simple_fork() { let slots_per_epoch = E::slots_per_epoch() as usize; let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test( 2 * slots_per_epoch, 3 * slots_per_epoch, 3 * slots_per_epoch, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; let head1_state = get_state_for_block(&harness, head1); let head2_state = get_state_for_block(&harness, head2); @@ -860,15 +888,16 @@ fn shuffling_compatible_simple_fork() { drop(db_path); } -#[test] -fn shuffling_compatible_short_fork() { +#[tokio::test] +async fn shuffling_compatible_short_fork() { let slots_per_epoch = E::slots_per_epoch() as usize; let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test( 2 * slots_per_epoch - 2, slots_per_epoch + 2, slots_per_epoch + 2, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; let head1_state = get_state_for_block(&harness, head1); let head2_state = get_state_for_block(&harness, head2); @@ -973,8 +1002,8 @@ fn check_shuffling_compatible( } // Ensure blocks from abandoned forks are pruned from the Hot DB -#[test] -fn prunes_abandoned_fork_between_two_finalized_checkpoints() { +#[tokio::test] +async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -997,7 +1026,8 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { state_root, &canonical_chain_slots, &honest_validators, - ); + ) + .await; state = new_state; let canonical_chain_slot: u64 = rig.get_current_slot().into(); @@ -1005,12 +1035,14 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { .map(Slot::new) .collect(); let (current_state, current_state_root) = rig.get_current_state_and_root(); - let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( - current_state, - current_state_root, - &stray_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, stray_head, _) = rig + .add_attested_blocks_at_slots( + current_state, + current_state_root, + &stray_slots, + &adversarial_validators, + ) + .await; // Precondition: Ensure all stray_blocks blocks are still known for &block_hash in stray_blocks.values() { @@ -1040,12 +1072,9 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (canonical_chain_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots( - state, - state_root, - &finalization_slots, - &honest_validators, - ); + let (canonical_chain_blocks_post_finalization, _, _, _) = rig + .add_attested_blocks_at_slots(state, state_root, &finalization_slots, &honest_validators) + .await; // Postcondition: New blocks got finalized assert_eq!( @@ -1083,8 +1112,8 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { assert!(!rig.chain.knows_head(&stray_head)); } -#[test] -fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { +#[tokio::test] +async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1103,12 +1132,14 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { // Fill up 0th epoch let canonical_chain_slots_zeroth_epoch: Vec = (1..rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (_, _, _, mut state) = rig.add_attested_blocks_at_slots( - state, - state_root, - &canonical_chain_slots_zeroth_epoch, - &honest_validators, - ); + let (_, _, _, mut state) = rig + .add_attested_blocks_at_slots( + state, + state_root, + &canonical_chain_slots_zeroth_epoch, + &honest_validators, + ) + .await; // Fill up 1st epoch let canonical_chain_slots_first_epoch: Vec = (rig.epoch_start_slot(1) @@ -1122,7 +1153,8 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { state_root, &canonical_chain_slots_first_epoch, &honest_validators, - ); + ) + .await; let canonical_chain_slot: u64 = rig.get_current_slot().into(); let stray_chain_slots_first_epoch: Vec = (rig.epoch_start_slot(1) + 2 @@ -1130,12 +1162,14 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( - state.clone(), - state_root, - &stray_chain_slots_first_epoch, - &adversarial_validators, - ); + let (stray_blocks, stray_states, stray_head, _) = rig + .add_attested_blocks_at_slots( + state.clone(), + state_root, + &stray_chain_slots_first_epoch, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1169,12 +1203,9 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (canonical_chain_blocks, _, _, _) = rig.add_attested_blocks_at_slots( - state, - state_root, - &finalization_slots, - &honest_validators, - ); + let (canonical_chain_blocks, _, _, _) = rig + .add_attested_blocks_at_slots(state, state_root, &finalization_slots, &honest_validators) + .await; // Postconditions assert_eq!( @@ -1213,8 +1244,8 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { assert!(get_blocks(&chain_dump).contains(&shared_head)); } -#[test] -fn pruning_does_not_touch_blocks_prior_to_finalization() { +#[tokio::test] +async fn pruning_does_not_touch_blocks_prior_to_finalization() { const HONEST_VALIDATOR_COUNT: usize = 16; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1232,12 +1263,9 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { // Fill up 0th epoch with canonical chain blocks let zeroth_epoch_slots: Vec = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (canonical_chain_blocks, _, _, new_state) = rig.add_attested_blocks_at_slots( - state, - state_root, - &zeroth_epoch_slots, - &honest_validators, - ); + let (canonical_chain_blocks, _, _, new_state) = rig + .add_attested_blocks_at_slots(state, state_root, &zeroth_epoch_slots, &honest_validators) + .await; state = new_state; let canonical_chain_slot: u64 = rig.get_current_slot().into(); @@ -1246,12 +1274,14 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( - state.clone(), - state_root, - &first_epoch_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, stray_head, _) = rig + .add_attested_blocks_at_slots( + state.clone(), + state_root, + &first_epoch_slots, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1279,8 +1309,9 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (_, _, _, _) = - rig.add_attested_blocks_at_slots(state, state_root, &slots, &honest_validators); + let (_, _, _, _) = rig + .add_attested_blocks_at_slots(state, state_root, &slots, &honest_validators) + .await; // Postconditions assert_eq!( @@ -1308,8 +1339,8 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { assert!(rig.chain.knows_head(&stray_head)); } -#[test] -fn prunes_fork_growing_past_youngest_finalized_checkpoint() { +#[tokio::test] +async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1326,12 +1357,9 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { // Fill up 0th epoch with canonical chain blocks let zeroth_epoch_slots: Vec = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (canonical_blocks_zeroth_epoch, _, _, mut state) = rig.add_attested_blocks_at_slots( - state, - state_root, - &zeroth_epoch_slots, - &honest_validators, - ); + let (canonical_blocks_zeroth_epoch, _, _, mut state) = rig + .add_attested_blocks_at_slots(state, state_root, &zeroth_epoch_slots, &honest_validators) + .await; // Fill up 1st epoch. Contains a fork. let slots_first_epoch: Vec = (rig.epoch_start_slot(1) + 1..rig.epoch_start_slot(2)) @@ -1344,9 +1372,11 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { state_root, &slots_first_epoch, &adversarial_validators, - ); - let (canonical_blocks_first_epoch, _, _, mut canonical_state) = - rig.add_attested_blocks_at_slots(state, state_root, &slots_first_epoch, &honest_validators); + ) + .await; + let (canonical_blocks_first_epoch, _, _, mut canonical_state) = rig + .add_attested_blocks_at_slots(state, state_root, &slots_first_epoch, &honest_validators) + .await; // Fill up 2nd epoch. Extends both the canonical chain and the fork. let stray_slots_second_epoch: Vec = (rig.epoch_start_slot(2) @@ -1360,7 +1390,8 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { stray_state_root, &stray_slots_second_epoch, &adversarial_validators, - ); + ) + .await; // Precondition: Ensure all stray_blocks blocks are still known let stray_blocks: HashMap = stray_blocks_first_epoch @@ -1400,12 +1431,14 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks, _, _, _) = rig.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ); + let (canonical_blocks, _, _, _) = rig + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + ) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1451,8 +1484,8 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { } // This is to check if state outside of normal block processing are pruned correctly. -#[test] -fn prunes_skipped_slots_states() { +#[tokio::test] +async fn prunes_skipped_slots_states() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1475,7 +1508,8 @@ fn prunes_skipped_slots_states() { state_root, &canonical_slots_zeroth_epoch, &honest_validators, - ); + ) + .await; let skipped_slot: Slot = (rig.epoch_start_slot(1) + 1).into(); @@ -1483,12 +1517,14 @@ fn prunes_skipped_slots_states() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, _, stray_state) = rig.add_attested_blocks_at_slots( - canonical_state.clone(), - canonical_state_root, - &stray_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, _, stray_state) = rig + .add_attested_blocks_at_slots( + canonical_state.clone(), + canonical_state_root, + &stray_slots, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1526,12 +1562,14 @@ fn prunes_skipped_slots_states() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ); + let (canonical_blocks_post_finalization, _, _, _) = rig + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + ) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1575,8 +1613,8 @@ fn prunes_skipped_slots_states() { } // This is to check if state outside of normal block processing are pruned correctly. -#[test] -fn finalizes_non_epoch_start_slot() { +#[tokio::test] +async fn finalizes_non_epoch_start_slot() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1599,7 +1637,8 @@ fn finalizes_non_epoch_start_slot() { state_root, &canonical_slots_zeroth_epoch, &honest_validators, - ); + ) + .await; let skipped_slot: Slot = rig.epoch_start_slot(1).into(); @@ -1607,12 +1646,14 @@ fn finalizes_non_epoch_start_slot() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, _, stray_state) = rig.add_attested_blocks_at_slots( - canonical_state.clone(), - canonical_state_root, - &stray_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, _, stray_state) = rig + .add_attested_blocks_at_slots( + canonical_state.clone(), + canonical_state_root, + &stray_slots, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1650,12 +1691,14 @@ fn finalizes_non_epoch_start_slot() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ); + let (canonical_blocks_post_finalization, _, _, _) = rig + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + ) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1759,14 +1802,14 @@ fn check_no_blocks_exist<'a>( } } -#[test] -fn prune_single_block_fork() { +#[tokio::test] +async fn prune_single_block_fork() { let slots_per_epoch = E::slots_per_epoch(); - pruning_test(3 * slots_per_epoch, 1, slots_per_epoch, 0, 1); + pruning_test(3 * slots_per_epoch, 1, slots_per_epoch, 0, 1).await; } -#[test] -fn prune_single_block_long_skip() { +#[tokio::test] +async fn prune_single_block_long_skip() { let slots_per_epoch = E::slots_per_epoch(); pruning_test( 2 * slots_per_epoch, @@ -1774,11 +1817,12 @@ fn prune_single_block_long_skip() { 2 * slots_per_epoch, 2 * slots_per_epoch as u64, 1, - ); + ) + .await; } -#[test] -fn prune_shared_skip_states_mid_epoch() { +#[tokio::test] +async fn prune_shared_skip_states_mid_epoch() { let slots_per_epoch = E::slots_per_epoch(); pruning_test( slots_per_epoch + slots_per_epoch / 2, @@ -1786,39 +1830,43 @@ fn prune_shared_skip_states_mid_epoch() { slots_per_epoch, 2, slots_per_epoch - 1, - ); + ) + .await; } -#[test] -fn prune_shared_skip_states_epoch_boundaries() { +#[tokio::test] +async fn prune_shared_skip_states_epoch_boundaries() { let slots_per_epoch = E::slots_per_epoch(); - pruning_test(slots_per_epoch - 1, 1, slots_per_epoch, 2, slots_per_epoch); - pruning_test(slots_per_epoch - 1, 2, slots_per_epoch, 1, slots_per_epoch); + pruning_test(slots_per_epoch - 1, 1, slots_per_epoch, 2, slots_per_epoch).await; + pruning_test(slots_per_epoch - 1, 2, slots_per_epoch, 1, slots_per_epoch).await; pruning_test( 2 * slots_per_epoch + slots_per_epoch / 2, slots_per_epoch as u64 / 2, slots_per_epoch, slots_per_epoch as u64 / 2 + 1, slots_per_epoch, - ); + ) + .await; pruning_test( 2 * slots_per_epoch + slots_per_epoch / 2, slots_per_epoch as u64 / 2, slots_per_epoch, slots_per_epoch as u64 / 2 + 1, slots_per_epoch, - ); + ) + .await; pruning_test( 2 * slots_per_epoch - 1, slots_per_epoch as u64, 1, 0, 2 * slots_per_epoch, - ); + ) + .await; } /// Generic harness for pruning tests. -fn pruning_test( +async fn pruning_test( // Number of blocks to start the chain with before forking. num_initial_blocks: u64, // Number of skip slots on the main chain after the initial blocks. @@ -1850,30 +1898,34 @@ fn pruning_test( let start_slot = Slot::new(1); let divergence_slot = start_slot + num_initial_blocks; let (state, state_root) = harness.get_current_state_and_root(); - let (_, _, _, divergence_state) = harness.add_attested_blocks_at_slots( - state, - state_root, - &slots(start_slot, num_initial_blocks)[..], - &honest_validators, - ); - - let mut chains = harness.add_blocks_on_multiple_chains(vec![ - // Canonical chain - ( - divergence_state.clone(), - slots( - divergence_slot + num_canonical_skips, - num_canonical_middle_blocks, + let (_, _, _, divergence_state) = harness + .add_attested_blocks_at_slots( + state, + state_root, + &slots(start_slot, num_initial_blocks)[..], + &honest_validators, + ) + .await; + + let mut chains = harness + .add_blocks_on_multiple_chains(vec![ + // Canonical chain + ( + divergence_state.clone(), + slots( + divergence_slot + num_canonical_skips, + num_canonical_middle_blocks, + ), + honest_validators.clone(), + ), + // Fork chain + ( + divergence_state.clone(), + slots(divergence_slot + num_fork_skips, num_fork_blocks), + faulty_validators, ), - honest_validators.clone(), - ), - // Fork chain - ( - divergence_state.clone(), - slots(divergence_slot + num_fork_skips, num_fork_blocks), - faulty_validators, - ), - ]); + ]) + .await; let (_, _, _, mut canonical_state) = chains.remove(0); let (stray_blocks, stray_states, _, stray_head_state) = chains.remove(0); @@ -1899,20 +1951,19 @@ fn pruning_test( let num_finalization_blocks = 4 * E::slots_per_epoch(); let canonical_slot = divergence_slot + num_canonical_skips + num_canonical_middle_blocks; let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - harness.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &slots(canonical_slot, num_finalization_blocks), - &honest_validators, - ); + harness + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &slots(canonical_slot, num_finalization_blocks), + &honest_validators, + ) + .await; // Check that finalization has advanced past the divergence slot. assert!( harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint + .finalized_checkpoint() .epoch .start_slot(E::slots_per_epoch()) > divergence_slot @@ -1940,43 +1991,48 @@ fn garbage_collect_temp_states_from_failed_block() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - let slots_per_epoch = E::slots_per_epoch(); - let genesis_state = harness.get_current_state(); - let block_slot = Slot::new(2 * slots_per_epoch); - let (signed_block, state) = harness.make_block(genesis_state, block_slot); - - let (mut block, _) = signed_block.deconstruct(); - - // Mutate the block to make it invalid, and re-sign it. - *block.state_root_mut() = Hash256::repeat_byte(0xff); - let proposer_index = block.proposer_index() as usize; - let block = block.sign( - &harness.validator_keypairs[proposer_index].sk, - &state.fork(), - state.genesis_validators_root(), - &harness.spec, - ); + // Use a `block_on_dangerous` rather than an async test to stop spawned processes from holding + // a reference to the store. + harness.chain.task_executor.clone().block_on_dangerous( + async move { + let slots_per_epoch = E::slots_per_epoch(); + + let genesis_state = harness.get_current_state(); + let block_slot = Slot::new(2 * slots_per_epoch); + let (signed_block, state) = harness.make_block(genesis_state, block_slot).await; + + let (mut block, _) = signed_block.deconstruct(); + + // Mutate the block to make it invalid, and re-sign it. + *block.state_root_mut() = Hash256::repeat_byte(0xff); + let proposer_index = block.proposer_index() as usize; + let block = block.sign( + &harness.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &harness.spec, + ); - // The block should be rejected, but should store a bunch of temporary states. - harness.set_current_slot(block_slot); - harness.process_block_result(block).unwrap_err(); + // The block should be rejected, but should store a bunch of temporary states. + harness.set_current_slot(block_slot); + harness.process_block_result(block).await.unwrap_err(); - assert_eq!( - store.iter_temporary_state_roots().count(), - block_slot.as_usize() - 1 + assert_eq!( + store.iter_temporary_state_roots().count(), + block_slot.as_usize() - 1 + ); + }, + "test", ); - drop(harness); - drop(store); - // On startup, the store should garbage collect all the temporary states. let store = get_store(&db_path); assert_eq!(store.iter_temporary_state_roots().count(), 0); } -#[test] -fn weak_subjectivity_sync() { +#[tokio::test] +async fn weak_subjectivity_sync() { // Build an initial chain on one harness, representing a synced node with full history. let num_initial_blocks = E::slots_per_epoch() * 11; let num_final_blocks = E::slots_per_epoch() * 2; @@ -1985,17 +2041,19 @@ fn weak_subjectivity_sync() { let full_store = get_store(&temp1); let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT); - harness.extend_chain( - num_initial_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_initial_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let genesis_state = full_store .get_state(&harness.chain.genesis_state_root, Some(Slot::new(0))) .unwrap() .unwrap(); - let wss_checkpoint = harness.chain.head_info().unwrap().finalized_checkpoint; + let wss_checkpoint = harness.finalized_checkpoint(); let wss_block = harness .chain .store @@ -2010,11 +2068,13 @@ fn weak_subjectivity_sync() { // Add more blocks that advance finalization further. harness.advance_slot(); - harness.extend_chain( - num_final_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_final_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); let log = test_logger(); @@ -2028,6 +2088,7 @@ fn weak_subjectivity_sync() { BeaconChainBuilder::new(MinimalEthSpec) .store(store.clone()) .custom_spec(test_spec::()) + .task_executor(harness.chain.task_executor.clone()) .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) .unwrap() .logger(log.clone()) @@ -2058,12 +2119,15 @@ fn weak_subjectivity_sync() { let full_block = harness .chain .store - .make_full_block(&snapshot.beacon_block_root, block.clone()) + .make_full_block(&snapshot.beacon_block_root, block.as_ref().clone()) .unwrap(); beacon_chain.slot_clock.set_slot(block.slot().as_u64()); - beacon_chain.process_block(full_block).unwrap(); - beacon_chain.fork_choice().unwrap(); + beacon_chain + .process_block(Arc::new(full_block)) + .await + .unwrap(); + beacon_chain.recompute_head_at_current_slot().await.unwrap(); // Check that the new block's state can be loaded correctly. let state_root = block.state_root(); @@ -2157,8 +2221,8 @@ fn weak_subjectivity_sync() { assert_eq!(store.get_anchor_slot(), None); } -#[test] -fn finalizes_after_resuming_from_db() { +#[tokio::test] +async fn finalizes_after_resuming_from_db() { let validator_count = 16; let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 8; let first_half = num_blocks_produced / 2; @@ -2175,17 +2239,18 @@ fn finalizes_after_resuming_from_db() { harness.advance_slot(); - harness.extend_chain( - first_half as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + first_half as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; assert!( harness .chain - .head() - .expect("should read head") + .head_snapshot() .beacon_state .finalized_checkpoint() .epoch @@ -2227,17 +2292,15 @@ fn finalizes_after_resuming_from_db() { .slot_clock .set_slot(latest_slot.as_u64() + 1); - resumed_harness.extend_chain( - (num_blocks_produced - first_half) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + resumed_harness + .extend_chain( + (num_blocks_produced - first_half) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &resumed_harness - .chain - .head() - .expect("should read head") - .beacon_state; + let state = &resumed_harness.chain.head_snapshot().beacon_state; assert_eq!( state.slot(), num_blocks_produced, @@ -2260,8 +2323,8 @@ fn finalizes_after_resuming_from_db() { ); } -#[test] -fn revert_minority_fork_on_resume() { +#[tokio::test] +async fn revert_minority_fork_on_resume() { let validator_count = 16; let slots_per_epoch = MinimalEthSpec::slots_per_epoch(); @@ -2317,17 +2380,17 @@ fn revert_minority_fork_on_resume() { harness1.process_attestations(attestations.clone()); harness2.process_attestations(attestations); - let (block, new_state) = harness1.make_block(state, slot); + let (block, new_state) = harness1.make_block(state, slot).await; - harness1.process_block(slot, block.clone()).unwrap(); - harness2.process_block(slot, block.clone()).unwrap(); + harness1.process_block(slot, block.clone()).await.unwrap(); + harness2.process_block(slot, block.clone()).await.unwrap(); state = new_state; block_root = block.canonical_root(); } - assert_eq!(harness1.chain.head_info().unwrap().slot, fork_slot - 1); - assert_eq!(harness2.chain.head_info().unwrap().slot, fork_slot - 1); + assert_eq!(harness1.head_slot(), fork_slot - 1); + assert_eq!(harness2.head_slot(), fork_slot - 1); // Fork the two chains. let mut state1 = state.clone(); @@ -2352,13 +2415,13 @@ fn revert_minority_fork_on_resume() { harness2.process_attestations(attestations); // Minority chain block (no attesters). - let (block1, new_state1) = harness1.make_block(state1, slot); - harness1.process_block(slot, block1).unwrap(); + let (block1, new_state1) = harness1.make_block(state1, slot).await; + harness1.process_block(slot, block1).await.unwrap(); state1 = new_state1; // Majority chain block (all attesters). - let (block2, new_state2) = harness2.make_block(state2, slot); - harness2.process_block(slot, block2.clone()).unwrap(); + let (block2, new_state2) = harness2.make_block(state2, slot).await; + harness2.process_block(slot, block2.clone()).await.unwrap(); state2 = new_state2; block_root = block2.canonical_root(); @@ -2367,8 +2430,8 @@ fn revert_minority_fork_on_resume() { } let end_slot = fork_slot + post_fork_blocks - 1; - assert_eq!(harness1.chain.head_info().unwrap().slot, end_slot); - assert_eq!(harness2.chain.head_info().unwrap().slot, end_slot); + assert_eq!(harness1.head_slot(), end_slot); + assert_eq!(harness2.head_slot(), end_slot); // Resume from disk with the hard-fork activated: this should revert the post-fork blocks. // We have to do some hackery with the `slot_clock` so that the correct slot is set when @@ -2396,24 +2459,35 @@ fn revert_minority_fork_on_resume() { .build(); // Head should now be just before the fork. - resumed_harness.chain.fork_choice().unwrap(); - let head = resumed_harness.chain.head_info().unwrap(); - assert_eq!(head.slot, fork_slot - 1); + resumed_harness + .chain + .recompute_head_at_current_slot() + .await + .unwrap(); + assert_eq!(resumed_harness.head_slot(), fork_slot - 1); // Head track should know the canonical head and the rogue head. assert_eq!(resumed_harness.chain.heads().len(), 2); - assert!(resumed_harness.chain.knows_head(&head.block_root.into())); + assert!(resumed_harness + .chain + .knows_head(&resumed_harness.head_block_root().into())); // Apply blocks from the majority chain and trigger finalization. let initial_split_slot = resumed_harness.chain.store.get_split_slot(); for block in &majority_blocks { - resumed_harness.process_block_result(block.clone()).unwrap(); + resumed_harness + .process_block_result(block.clone()) + .await + .unwrap(); // The canonical head should be the block from the majority chain. - resumed_harness.chain.fork_choice().unwrap(); - let head_info = resumed_harness.chain.head_info().unwrap(); - assert_eq!(head_info.slot, block.slot()); - assert_eq!(head_info.block_root, block.canonical_root()); + resumed_harness + .chain + .recompute_head_at_current_slot() + .await + .unwrap(); + assert_eq!(resumed_harness.head_slot(), block.slot()); + assert_eq!(resumed_harness.head_block_root(), block.canonical_root()); } let advanced_split_slot = resumed_harness.chain.store.get_split_slot(); @@ -2432,10 +2506,22 @@ fn revert_minority_fork_on_resume() { fn assert_chains_pretty_much_the_same(a: &BeaconChain, b: &BeaconChain) { assert_eq!(a.spec, b.spec, "spec should be equal"); assert_eq!(a.op_pool, b.op_pool, "op_pool should be equal"); + let a_head = a.head_snapshot(); + let b_head = b.head_snapshot(); assert_eq!( - a.head().unwrap(), - b.head().unwrap(), - "head() should be equal" + a_head.beacon_block_root, b_head.beacon_block_root, + "head block roots should be equal" + ); + assert_eq!( + a_head.beacon_block, b_head.beacon_block, + "head blocks should be equal" + ); + // Clone with committee caches only to prevent other caches from messing with the equality + // check. + assert_eq!( + a_head.beacon_state.clone_with_only_committee_caches(), + b_head.beacon_state.clone_with_only_committee_caches(), + "head states should be equal" ); assert_eq!(a.heads(), b.heads(), "heads() should be equal"); assert_eq!( @@ -2446,15 +2532,21 @@ fn assert_chains_pretty_much_the_same(a: &BeaconChain, b let slot = a.slot().unwrap(); let spec = T::EthSpec::default_spec(); assert!( - a.fork_choice.write().get_head(slot, &spec).unwrap() - == b.fork_choice.write().get_head(slot, &spec).unwrap(), + a.canonical_head + .fork_choice_write_lock() + .get_head(slot, &spec) + .unwrap() + == b.canonical_head + .fork_choice_write_lock() + .get_head(slot, &spec) + .unwrap(), "fork_choice heads should be equal" ); } /// Check that the head state's slot matches `expected_slot`. fn check_slot(harness: &TestHarness, expected_slot: u64) { - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; assert_eq!( state.slot(), @@ -2465,7 +2557,7 @@ fn check_slot(harness: &TestHarness, expected_slot: u64) { /// Check that the chain has finalized under best-case assumptions, and check the head slot. fn check_finalization(harness: &TestHarness, expected_slot: u64) { - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; check_slot(harness, expected_slot); @@ -2487,8 +2579,7 @@ fn check_split_slot(harness: &TestHarness, store: Arc, L assert_eq!( harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .finalized_checkpoint() .epoch @@ -2575,10 +2666,7 @@ fn check_iterators(harness: &TestHarness) { max_slot = Some(slot); } // Assert that we reached the head. - assert_eq!( - max_slot, - Some(harness.chain.head_info().expect("should get head").slot) - ); + assert_eq!(max_slot, Some(harness.head_slot())); // Assert that the block root iterator reaches the head. assert_eq!( harness @@ -2588,7 +2676,7 @@ fn check_iterators(harness: &TestHarness) { .last() .map(Result::unwrap) .map(|(_, slot)| slot), - Some(harness.chain.head_info().expect("should get head").slot) + Some(harness.head_slot()) ); } diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 626c132d69e..1e51b0ffb9b 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -46,15 +46,8 @@ fn get_valid_sync_committee_message( slot: Slot, relative_sync_committee: RelativeSyncCommittee, ) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) { - let head_state = harness - .chain - .head_beacon_state() - .expect("should get head state"); - let head_block_root = harness - .chain - .head() - .expect("should get head state") - .beacon_block_root; + let head_state = harness.chain.head_beacon_state_cloned(); + let head_block_root = harness.chain.head_snapshot().beacon_block_root; let (signature, _) = harness .make_sync_committee_messages(&head_state, head_block_root, slot, relative_sync_committee) .get(0) @@ -77,16 +70,9 @@ fn get_valid_sync_contribution( harness: &BeaconChainHarness>, relative_sync_committee: RelativeSyncCommittee, ) -> (SignedContributionAndProof, usize, SecretKey) { - let head_state = harness - .chain - .head_beacon_state() - .expect("should get head state"); + let head_state = harness.chain.head_beacon_state_cloned(); - let head_block_root = harness - .chain - .head() - .expect("should get head state") - .beacon_block_root; + let head_block_root = harness.chain.head_snapshot().beacon_block_root; let sync_contributions = harness.make_sync_contributions( &head_state, head_block_root, @@ -116,7 +102,7 @@ fn get_non_aggregator( harness: &BeaconChainHarness>, slot: Slot, ) -> (usize, SecretKey) { - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; let sync_subcommittee_size = E::sync_committee_size() .safe_div(SYNC_COMMITTEE_SUBNET_COUNT as usize) .expect("should determine sync subcommittee size"); @@ -162,17 +148,19 @@ fn get_non_aggregator( } /// Tests verification of `SignedContributionAndProof` from the gossip network. -#[test] -fn aggregated_gossip_verification() { +#[tokio::test] +async fn aggregated_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - &[Slot::new(1), Slot::new(2)], - (0..VALIDATOR_COUNT).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + &[Slot::new(1), Slot::new(2)], + (0..VALIDATOR_COUNT).collect::>().as_slice(), + ) + .await; let current_slot = harness.chain.slot().expect("should get slot"); @@ -406,7 +394,7 @@ fn aggregated_gossip_verification() { valid_aggregate.message.contribution.clone(), None, &non_aggregator_sk, - &harness.chain.head_info().expect("should get head info").fork, + &harness.chain.canonical_head.cached_head().head_fork(), harness.chain.genesis_validators_root, &harness.chain.spec, ) @@ -474,6 +462,7 @@ fn aggregated_gossip_verification() { harness .add_attested_block_at_slot(target_slot, state, Hash256::zero(), &[]) + .await .expect("should add block"); // **Incorrectly** create a sync contribution using the current sync committee @@ -488,17 +477,19 @@ fn aggregated_gossip_verification() { } /// Tests the verification conditions for sync committee messages on the gossip network. -#[test] -fn unaggregated_gossip_verification() { +#[tokio::test] +async fn unaggregated_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - &[Slot::new(1), Slot::new(2)], - (0..VALIDATOR_COUNT).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + &[Slot::new(1), Slot::new(2)], + (0..VALIDATOR_COUNT).collect::>().as_slice(), + ) + .await; let current_slot = harness.chain.slot().expect("should get slot"); @@ -648,6 +639,7 @@ fn unaggregated_gossip_verification() { harness .add_attested_block_at_slot(target_slot, state, Hash256::zero(), &[]) + .await .expect("should add block"); // **Incorrectly** create a sync message using the current sync committee diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 7b17937a210..f98580db3fe 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -6,14 +6,16 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, OP_POOL_DB_KEY, }, - StateSkipConfig, WhenSlotSkipped, + BeaconChain, StateSkipConfig, WhenSlotSkipped, }; use lazy_static::lazy_static; use operation_pool::PersistedOperationPool; use state_processing::{ per_slot_processing, per_slot_processing::Error as SlotProcessingError, EpochProcessingError, }; -use types::{BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot}; +use types::{ + BeaconState, BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot, +}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 24; @@ -40,7 +42,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness = harness .chain @@ -122,7 +126,7 @@ fn iterators() { ) }); - let head = &harness.chain.head().expect("should get head"); + let head = harness.chain.head_snapshot(); assert_eq!( *block_roots.last().expect("should have some block roots"), @@ -137,20 +141,44 @@ fn iterators() { ); } -#[test] -fn find_reorgs() { +fn find_reorg_slot( + chain: &BeaconChain>, + new_state: &BeaconState, + new_block_root: Hash256, +) -> Slot { + let (old_state, old_block_root) = { + let head = chain.canonical_head.cached_head(); + let old_state = head.snapshot.beacon_state.clone(); + let old_block_root = head.head_block_root(); + (old_state, old_block_root) + }; + beacon_chain::canonical_head::find_reorg_slot( + &old_state, + old_block_root, + new_state, + new_block_root, + &chain.spec, + ) + .unwrap() +} + +#[tokio::test] +async fn find_reorgs() { let num_blocks_produced = MinimalEthSpec::slots_per_historical_root() + 1; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - // No need to produce attestations for this test. - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + // No need to produce attestations for this test. + AttestationStrategy::SomeValidators(vec![]), + ) + .await; - let head_state = harness.chain.head_beacon_state().unwrap(); + let head = harness.chain.head_snapshot(); + let head_state = &head.beacon_state; let head_slot = head_state.slot(); let genesis_state = harness .chain @@ -160,10 +188,11 @@ fn find_reorgs() { // because genesis is more than `SLOTS_PER_HISTORICAL_ROOT` away, this should return with the // finalized slot. assert_eq!( - harness - .chain - .find_reorg_slot(&genesis_state, harness.chain.genesis_block_root) - .unwrap(), + find_reorg_slot( + &harness.chain, + &genesis_state, + harness.chain.genesis_block_root + ), head_state .finalized_checkpoint() .epoch @@ -172,13 +201,11 @@ fn find_reorgs() { // test head assert_eq!( - harness - .chain - .find_reorg_slot( - &head_state, - harness.chain.head_beacon_block().unwrap().canonical_root() - ) - .unwrap(), + find_reorg_slot( + &harness.chain, + &head_state, + harness.chain.head_beacon_block().canonical_root() + ), head_slot ); @@ -194,16 +221,13 @@ fn find_reorgs() { .unwrap() .unwrap(); assert_eq!( - harness - .chain - .find_reorg_slot(&prev_state, prev_block_root) - .unwrap(), + find_reorg_slot(&harness.chain, &prev_state, prev_block_root), prev_slot ); } -#[test] -fn chooses_fork() { +#[tokio::test] +async fn chooses_fork() { let harness = get_harness(VALIDATOR_COUNT); let two_thirds = (VALIDATOR_COUNT / 3) * 2; @@ -217,22 +241,27 @@ fn chooses_fork() { let faulty_fork_blocks = delay + 2; // Build an initial chain where all validators agree. - harness.extend_chain( - initial_blocks, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); - - let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block( - &honest_validators, - &faulty_validators, - honest_fork_blocks, - faulty_fork_blocks, - ); + harness + .extend_chain( + initial_blocks, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let (honest_head, faulty_head) = harness + .generate_two_forks_by_skipping_a_block( + &honest_validators, + &faulty_validators, + honest_fork_blocks, + faulty_fork_blocks, + ) + .await; assert_ne!(honest_head, faulty_head, "forks should be distinct"); - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -241,29 +270,28 @@ fn chooses_fork() { ); assert_eq!( - harness - .chain - .head() - .expect("should get head") - .beacon_block_root, + harness.chain.head_snapshot().beacon_block_root, honest_head, "the honest chain should be the canonical chain" ); } -#[test] -fn finalizes_with_full_participation() { +#[tokio::test] +async fn finalizes_with_full_participation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -287,8 +315,8 @@ fn finalizes_with_full_participation() { ); } -#[test] -fn finalizes_with_two_thirds_participation() { +#[tokio::test] +async fn finalizes_with_two_thirds_participation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); @@ -296,13 +324,16 @@ fn finalizes_with_two_thirds_participation() { let two_thirds = (VALIDATOR_COUNT / 3) * 2; let attesters = (0..two_thirds).collect(); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(attesters), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(attesters), + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -331,8 +362,8 @@ fn finalizes_with_two_thirds_participation() { ); } -#[test] -fn does_not_finalize_with_less_than_two_thirds_participation() { +#[tokio::test] +async fn does_not_finalize_with_less_than_two_thirds_participation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); @@ -341,13 +372,16 @@ fn does_not_finalize_with_less_than_two_thirds_participation() { let less_than_two_thirds = two_thirds - 1; let attesters = (0..less_than_two_thirds).collect(); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(attesters), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(attesters), + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -371,19 +405,22 @@ fn does_not_finalize_with_less_than_two_thirds_participation() { ); } -#[test] -fn does_not_finalize_without_attestation() { +#[tokio::test] +async fn does_not_finalize_without_attestation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -407,18 +444,20 @@ fn does_not_finalize_without_attestation() { ); } -#[test] -fn roundtrip_operation_pool() { +#[tokio::test] +async fn roundtrip_operation_pool() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); // Add some attestations - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; assert!(harness.chain.op_pool.num_attestations() > 0); // TODO: could add some other operations @@ -439,20 +478,23 @@ fn roundtrip_operation_pool() { assert_eq!(harness.chain.op_pool, restored_op_pool); } -#[test] -fn unaggregated_attestations_added_to_fork_choice_some_none() { +#[tokio::test] +async fn unaggregated_attestations_added_to_fork_choice_some_none() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() / 2; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; - let mut fork_choice = harness.chain.fork_choice.write(); + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let mut fork_choice = harness.chain.canonical_head.fork_choice_write_lock(); // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); @@ -493,8 +535,8 @@ fn unaggregated_attestations_added_to_fork_choice_some_none() { } } -#[test] -fn attestations_with_increasing_slots() { +#[tokio::test] +async fn attestations_with_increasing_slots() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); @@ -502,14 +544,16 @@ fn attestations_with_increasing_slots() { let mut attestations = vec![]; for _ in 0..num_blocks_produced { - harness.extend_chain( - 2, - BlockStrategy::OnCanonicalHead, - // Don't produce & include any attestations (we'll collect them later). - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + 2, + BlockStrategy::OnCanonicalHead, + // Don't produce & include any attestations (we'll collect them later). + AttestationStrategy::SomeValidators(vec![]), + ) + .await; - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); let head_state_root = head.beacon_state_root(); attestations.extend(harness.get_unaggregated_attestations( @@ -548,20 +592,23 @@ fn attestations_with_increasing_slots() { } } -#[test] -fn unaggregated_attestations_added_to_fork_choice_all_updated() { +#[tokio::test] +async fn unaggregated_attestations_added_to_fork_choice_all_updated() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; - let mut fork_choice = harness.chain.fork_choice.write(); + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let mut fork_choice = harness.chain.canonical_head.fork_choice_write_lock(); // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); @@ -605,7 +652,7 @@ fn unaggregated_attestations_added_to_fork_choice_all_updated() { } } -fn run_skip_slot_test(skip_slots: u64) { +async fn run_skip_slot_test(skip_slots: u64) { let num_validators = 8; let harness_a = get_harness(num_validators); let harness_b = get_harness(num_validators); @@ -615,83 +662,60 @@ fn run_skip_slot_test(skip_slots: u64) { harness_b.advance_slot(); } - harness_a.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - // No attestation required for test. - AttestationStrategy::SomeValidators(vec![]), - ); + harness_a + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + // No attestation required for test. + AttestationStrategy::SomeValidators(vec![]), + ) + .await; assert_eq!( - harness_a - .chain - .head() - .expect("should get head") - .beacon_block - .slot(), + harness_a.chain.head_snapshot().beacon_block.slot(), Slot::new(skip_slots + 1) ); assert_eq!( - harness_b - .chain - .head() - .expect("should get head") - .beacon_block - .slot(), + harness_b.chain.head_snapshot().beacon_block.slot(), Slot::new(0) ); assert_eq!( harness_b .chain - .process_block( - harness_a - .chain - .head() - .expect("should get head") - .beacon_block - .clone(), - ) + .process_block(harness_a.chain.head_snapshot().beacon_block.clone()) + .await .unwrap(), - harness_a - .chain - .head() - .expect("should get head") - .beacon_block_root + harness_a.chain.head_snapshot().beacon_block_root ); harness_b .chain - .fork_choice() + .recompute_head_at_current_slot() + .await .expect("should run fork choice"); assert_eq!( - harness_b - .chain - .head() - .expect("should get head") - .beacon_block - .slot(), + harness_b.chain.head_snapshot().beacon_block.slot(), Slot::new(skip_slots + 1) ); } -#[test] -fn produces_and_processes_with_genesis_skip_slots() { +#[tokio::test] +async fn produces_and_processes_with_genesis_skip_slots() { for i in 0..MinimalEthSpec::slots_per_epoch() * 4 { - run_skip_slot_test(i) + run_skip_slot_test(i).await } } -#[test] -fn block_roots_skip_slot_behaviour() { +#[tokio::test] +async fn block_roots_skip_slot_behaviour() { let harness = get_harness(VALIDATOR_COUNT); // Test should be longer than the block roots to ensure a DB lookup is triggered. let chain_length = harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_state .block_roots() .len() as u64 @@ -708,11 +732,13 @@ fn block_roots_skip_slot_behaviour() { let slot = harness.chain.slot().unwrap().as_u64(); if !skipped_slots.contains(&slot) { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; } } @@ -820,7 +846,7 @@ fn block_roots_skip_slot_behaviour() { let future_slot = harness.chain.slot().unwrap() + 1; assert_eq!( - harness.chain.head().unwrap().beacon_block.slot(), + harness.chain.head_snapshot().beacon_block.slot(), future_slot - 2, "test precondition" ); diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 95ba1b56578..b7f06183f10 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -684,26 +684,20 @@ where if let Some(execution_layer) = beacon_chain.execution_layer.as_ref() { // Only send a head update *after* genesis. if let Ok(current_slot) = beacon_chain.slot() { - let head = beacon_chain - .head_info() - .map_err(|e| format!("Unable to read beacon chain head: {:?}", e))?; - - // Issue the head to the execution engine on startup. This ensures it can start - // syncing. - if head - .execution_payload_block_hash - .map_or(false, |h| h != ExecutionBlockHash::zero()) + let params = beacon_chain + .canonical_head + .cached_head() + .forkchoice_update_parameters(); + if params + .head_hash + .map_or(false, |hash| hash != ExecutionBlockHash::zero()) { - // Spawn a new task using the "async" fork choice update method, rather than - // using the "blocking" method. - // - // Using the blocking method may cause a panic if this code is run inside an - // async context. + // Spawn a new task to update the EE without waiting for it to complete. let inner_chain = beacon_chain.clone(); runtime_context.executor.spawn( async move { let result = inner_chain - .update_execution_engine_forkchoice_async(current_slot) + .update_execution_engine_forkchoice(current_slot, params) .await; // No need to exit early if setting the head fails. It will be set again if/when the @@ -811,8 +805,16 @@ where self.db_path = Some(hot_path.into()); self.freezer_db_path = Some(cold_path.into()); + let inner_spec = spec.clone(); let schema_upgrade = |db, from, to| { - migrate_schema::>(db, datadir, from, to, log) + migrate_schema::>( + db, + datadir, + from, + to, + log, + &inner_spec, + ) }; let store = HotColdDB::open( diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 22c3bfcb3a8..9476819a4b3 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,5 +1,5 @@ use crate::metrics; -use beacon_chain::{BeaconChain, BeaconChainTypes, HeadSafetyStatus}; +use beacon_chain::{BeaconChain, BeaconChainTypes, ExecutionStatus}; use lighthouse_network::{types::SyncState, NetworkGlobals}; use parking_lot::Mutex; use slog::{crit, debug, error, info, warn, Logger}; @@ -100,15 +100,10 @@ pub fn spawn_notifier( current_sync_state = sync_state; } - let head_info = match beacon_chain.head_info() { - Ok(head_info) => head_info, - Err(e) => { - error!(log, "Failed to get beacon chain head info"; "error" => format!("{:?}", e)); - break; - } - }; - - let head_slot = head_info.slot; + let cached_head = beacon_chain.canonical_head.cached_head(); + let head_slot = cached_head.head_slot(); + let head_root = cached_head.head_block_root(); + let finalized_checkpoint = cached_head.finalized_checkpoint(); metrics::set_gauge(&metrics::NOTIFIER_HEAD_SLOT, head_slot.as_u64() as i64); @@ -125,9 +120,6 @@ pub fn spawn_notifier( }; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - let finalized_epoch = head_info.finalized_checkpoint.epoch; - let finalized_root = head_info.finalized_checkpoint.root; - let head_root = head_info.block_root; // The default is for regular sync but this gets modified if backfill sync is in // progress. @@ -177,8 +169,8 @@ pub fn spawn_notifier( log, "Slot timer"; "peers" => peer_count_pretty(connected_peer_count), - "finalized_root" => format!("{}", finalized_root), - "finalized_epoch" => finalized_epoch, + "finalized_root" => format!("{}", finalized_checkpoint.root), + "finalized_epoch" => finalized_checkpoint.epoch, "head_block" => format!("{}", head_root), "head_slot" => head_slot, "current_slot" => current_slot, @@ -264,35 +256,29 @@ pub fn spawn_notifier( head_root.to_string() }; - let block_hash = match beacon_chain.head_safety_status() { - Ok(HeadSafetyStatus::Safe(hash_opt)) => hash_opt - .map(|hash| format!("{} (verified)", hash)) - .unwrap_or_else(|| "n/a".to_string()), - Ok(HeadSafetyStatus::Unsafe(block_hash)) => { + let block_hash = match beacon_chain.canonical_head.head_execution_status() { + Ok(ExecutionStatus::Irrelevant(_)) => "n/a".to_string(), + Ok(ExecutionStatus::Valid(hash)) => format!("{} (verified)", hash), + Ok(ExecutionStatus::Optimistic(hash)) => { warn!( log, - "Head execution payload is unverified"; - "execution_block_hash" => ?block_hash, + "Head is optimistic"; + "info" => "chain not fully verified, \ + block and attestation production disabled until execution engine syncs", + "execution_block_hash" => ?hash, ); - format!("{} (unverified)", block_hash) + format!("{} (unverified)", hash) } - Ok(HeadSafetyStatus::Invalid(block_hash)) => { + Ok(ExecutionStatus::Invalid(hash)) => { crit!( log, "Head execution payload is invalid"; "msg" => "this scenario may be unrecoverable", - "execution_block_hash" => ?block_hash, - ); - format!("{} (invalid)", block_hash) - } - Err(e) => { - error!( - log, - "Failed to read head safety status"; - "error" => ?e + "execution_block_hash" => ?hash, ); - "n/a".to_string() + format!("{} (invalid)", hash) } + Err(_) => "unknown".to_string(), }; info!( @@ -300,8 +286,8 @@ pub fn spawn_notifier( "Synced"; "peers" => peer_count_pretty(connected_peer_count), "exec_hash" => block_hash, - "finalized_root" => format!("{}", finalized_root), - "finalized_epoch" => finalized_epoch, + "finalized_root" => format!("{}", finalized_checkpoint.root), + "finalized_epoch" => finalized_checkpoint.epoch, "epoch" => current_epoch, "block" => block_info, "slot" => current_slot, @@ -312,8 +298,8 @@ pub fn spawn_notifier( log, "Searching for peers"; "peers" => peer_count_pretty(connected_peer_count), - "finalized_root" => format!("{}", finalized_root), - "finalized_epoch" => finalized_epoch, + "finalized_root" => format!("{}", finalized_checkpoint.root), + "finalized_epoch" => finalized_checkpoint.epoch, "head_slot" => head_slot, "current_slot" => current_slot, ); @@ -332,57 +318,52 @@ pub fn spawn_notifier( fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger) { let current_slot_opt = beacon_chain.slot().ok(); - if let Ok(head_info) = beacon_chain.head_info() { - // Perform some logging about the eth1 chain - if let Some(eth1_chain) = beacon_chain.eth1_chain.as_ref() { - // No need to do logging if using the dummy backend. - if eth1_chain.is_dummy_backend() { - return; - } + // Perform some logging about the eth1 chain + if let Some(eth1_chain) = beacon_chain.eth1_chain.as_ref() { + // No need to do logging if using the dummy backend. + if eth1_chain.is_dummy_backend() { + return; + } - if let Some(status) = - eth1_chain.sync_status(head_info.genesis_time, current_slot_opt, &beacon_chain.spec) - { - debug!( - log, - "Eth1 cache sync status"; - "eth1_head_block" => status.head_block_number, - "latest_cached_block_number" => status.latest_cached_block_number, - "latest_cached_timestamp" => status.latest_cached_block_timestamp, - "voting_target_timestamp" => status.voting_target_timestamp, - "ready" => status.lighthouse_is_cached_and_ready - ); + if let Some(status) = eth1_chain.sync_status( + beacon_chain.genesis_time, + current_slot_opt, + &beacon_chain.spec, + ) { + debug!( + log, + "Eth1 cache sync status"; + "eth1_head_block" => status.head_block_number, + "latest_cached_block_number" => status.latest_cached_block_number, + "latest_cached_timestamp" => status.latest_cached_block_timestamp, + "voting_target_timestamp" => status.voting_target_timestamp, + "ready" => status.lighthouse_is_cached_and_ready + ); - if !status.lighthouse_is_cached_and_ready { - let voting_target_timestamp = status.voting_target_timestamp; + if !status.lighthouse_is_cached_and_ready { + let voting_target_timestamp = status.voting_target_timestamp; - let distance = status - .latest_cached_block_timestamp - .map(|latest| { - voting_target_timestamp.saturating_sub(latest) - / beacon_chain.spec.seconds_per_eth1_block - }) - .map(|distance| distance.to_string()) - .unwrap_or_else(|| "initializing deposits".to_string()); + let distance = status + .latest_cached_block_timestamp + .map(|latest| { + voting_target_timestamp.saturating_sub(latest) + / beacon_chain.spec.seconds_per_eth1_block + }) + .map(|distance| distance.to_string()) + .unwrap_or_else(|| "initializing deposits".to_string()); - warn!( - log, - "Syncing eth1 block cache"; - "est_blocks_remaining" => distance, - ); - } - } else { - error!( + warn!( log, - "Unable to determine eth1 sync status"; + "Syncing eth1 block cache"; + "est_blocks_remaining" => distance, ); } + } else { + error!( + log, + "Unable to determine eth1 sync status"; + ); } - } else { - error!( - log, - "Unable to get head info"; - ); } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 156382c4812..61f1c569d43 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -274,31 +274,6 @@ impl ExecutionLayer { self.inner.execution_engine_forkchoice_lock.lock().await } - /// Convenience function to allow calling async functions in a non-async context. - pub fn block_on<'a, F, U, V>(&'a self, generate_future: F) -> Result - where - F: Fn(&'a Self) -> U, - U: Future>, - { - let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; - // TODO(merge): respect the shutdown signal. - runtime.block_on(generate_future(self)) - } - - /// Convenience function to allow calling async functions in a non-async context. - /// - /// The function is "generic" since it does not enforce a particular return type on - /// `generate_future`. - pub fn block_on_generic<'a, F, U, V>(&'a self, generate_future: F) -> Result - where - F: Fn(&'a Self) -> U, - U: Future, - { - let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; - // TODO(merge): respect the shutdown signal. - Ok(runtime.block_on(generate_future(self))) - } - /// Convenience function to allow spawning a task without waiting for the result. pub fn spawn(&self, generate_future: F, name: &'static str) where @@ -431,18 +406,6 @@ impl ExecutionLayer { self.engines().is_synced().await } - /// Updates the proposer preparation data provided by validators - pub fn update_proposer_preparation_blocking( - &self, - update_epoch: Epoch, - preparation_data: &[ProposerPreparationData], - ) -> Result<(), Error> { - self.block_on_generic(|_| async move { - self.update_proposer_preparation(update_epoch, preparation_data) - .await - }) - } - /// Updates the proposer preparation data provided by validators pub async fn update_proposer_preparation( &self, diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 9207067e33d..35a35bcb74f 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -58,12 +58,10 @@ fn cached_attestation_duties( request_indices: &[u64], chain: &BeaconChain, ) -> Result { - let head = chain - .head_info() - .map_err(warp_utils::reject::beacon_chain_error)?; + let head_block_root = chain.canonical_head.cached_head().head_block_root(); - let (duties, dependent_root) = chain - .validator_attestation_duties(request_indices, request_epoch, head.block_root) + let (duties, dependent_root, _execution_status) = chain + .validator_attestation_duties(request_indices, request_epoch, head_block_root) .map_err(warp_utils::reject::beacon_chain_error)?; convert_to_api_response(duties, request_indices, dependent_root, chain) diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 727215bfcad..73f50985bdf 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,6 +1,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::BlockId as CoreBlockId; use std::str::FromStr; +use std::sync::Arc; use types::{BlindedPayload, Hash256, SignedBeaconBlock, Slot}; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given @@ -23,19 +24,18 @@ impl BlockId { chain: &BeaconChain, ) -> Result { match &self.0 { - CoreBlockId::Head => chain - .head_info() - .map(|head| head.block_root) - .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Head => Ok(chain.canonical_head.cached_head().head_block_root()), CoreBlockId::Genesis => Ok(chain.genesis_block_root), - CoreBlockId::Finalized => chain - .head_info() - .map(|head| head.finalized_checkpoint.root) - .map_err(warp_utils::reject::beacon_chain_error), - CoreBlockId::Justified => chain - .head_info() - .map(|head| head.current_justified_checkpoint.root) - .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Finalized => Ok(chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .root), + CoreBlockId::Justified => Ok(chain + .canonical_head + .cached_head() + .justified_checkpoint() + .root), CoreBlockId::Slot(slot) => chain .block_root_at_slot(*slot, WhenSlotSkipped::None) .map_err(warp_utils::reject::beacon_chain_error) @@ -57,10 +57,7 @@ impl BlockId { chain: &BeaconChain, ) -> Result>, warp::Rejection> { match &self.0 { - CoreBlockId::Head => chain - .head_beacon_block() - .map(Into::into) - .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Head => Ok(chain.head_beacon_block().clone_as_blinded()), CoreBlockId::Slot(slot) => { let root = self.root(chain)?; chain @@ -103,11 +100,9 @@ impl BlockId { pub async fn full_block( &self, chain: &BeaconChain, - ) -> Result, warp::Rejection> { + ) -> Result>, warp::Rejection> { match &self.0 { - CoreBlockId::Head => chain - .head_beacon_block() - .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Head => Ok(chain.head_beacon_block()), CoreBlockId::Slot(slot) => { let root = self.root(chain)?; chain @@ -122,7 +117,7 @@ impl BlockId { slot ))); } - Ok(block) + Ok(Arc::new(block)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -136,8 +131,8 @@ impl BlockId { .get_block(&root) .await .map_err(warp_utils::reject::beacon_chain_error) - .and_then(|root_opt| { - root_opt.ok_or_else(|| { + .and_then(|block_opt| { + block_opt.map(Arc::new).ok_or_else(|| { warp_utils::reject::custom_not_found(format!( "beacon block with root {}", root diff --git a/beacon_node/http_api/src/database.rs b/beacon_node/http_api/src/database.rs index 014db8a6027..645c19c40e5 100644 --- a/beacon_node/http_api/src/database.rs +++ b/beacon_node/http_api/src/database.rs @@ -22,7 +22,7 @@ pub fn info( pub fn historical_blocks( chain: Arc>, - blocks: Vec>, + blocks: Vec>>, ) -> Result { chain .import_historical_block_batch(blocks) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 06dc9687648..ff4d46efcb4 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -23,7 +23,7 @@ use beacon_chain::{ observed_operations::ObservationOutcome, validator_monitor::{get_block_delay_ms, timestamp_now}, AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, - HeadSafetyStatus, ProduceBlockVerification, WhenSlotSkipped, + ProduceBlockVerification, WhenSlotSkipped, }; use block_id::BlockId; use eth2::types::{self as api_types, EndpointVersion, ValidatorId}; @@ -369,9 +369,7 @@ pub fn serve( chain: Arc>| async move { match *network_globals.sync_state.read() { SyncState::SyncingFinalized { .. } => { - let head_slot = chain - .best_slot() - .map_err(warp_utils::reject::beacon_chain_error)?; + let head_slot = chain.canonical_head.cached_head().head_slot(); let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { @@ -404,35 +402,6 @@ pub fn serve( ) .untuple_one(); - // Create a `warp` filter that rejects requests unless the head has been verified by the - // execution layer. - let only_with_safe_head = warp::any() - .and(chain_filter.clone()) - .and_then(move |chain: Arc>| async move { - let status = chain.head_safety_status().map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to read head safety status: {:?}", - e - )) - })?; - match status { - HeadSafetyStatus::Safe(_) => Ok(()), - HeadSafetyStatus::Unsafe(hash) => { - Err(warp_utils::reject::custom_server_error(format!( - "optimistic head hash {:?} has not been verified by the execution layer", - hash - ))) - } - HeadSafetyStatus::Invalid(hash) => { - Err(warp_utils::reject::custom_server_error(format!( - "the head block has an invalid payload {:?}, this may be unrecoverable", - hash - ))) - } - } - }) - .untuple_one(); - // Create a `warp` filter that provides access to the logger. let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); @@ -451,15 +420,12 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|chain: Arc>| { blocking_json_task(move || { - chain - .head_info() - .map_err(warp_utils::reject::beacon_chain_error) - .map(|head| api_types::GenesisData { - genesis_time: head.genesis_time, - genesis_validators_root: head.genesis_validators_root, - genesis_fork_version: chain.spec.genesis_fork_version, - }) - .map(api_types::GenericResponse::from) + let genesis_data = api_types::GenesisData { + genesis_time: chain.genesis_time, + genesis_validators_root: chain.genesis_validators_root, + genesis_fork_version: chain.spec.genesis_fork_version, + }; + Ok(api_types::GenericResponse::from(genesis_data)) }) }); @@ -835,10 +801,10 @@ pub fn serve( blocking_json_task(move || { let (root, block) = match (query.slot, query.parent_root) { // No query parameters, return the canonical head block. - (None, None) => chain - .head_beacon_block() - .map_err(warp_utils::reject::beacon_chain_error) - .map(|block| (block.canonical_root(), block.into()))?, + (None, None) => { + let block = chain.head_beacon_block(); + (block.canonical_root(), block.clone_as_blinded()) + } // Only the parent root parameter, do a forwards-iterator lookup. (None, Some(parent_root)) => { let parent = BlockId::from_root(parent_root).blinded_block(&chain)?; @@ -945,93 +911,85 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( - |block: SignedBeaconBlock, + |block: Arc>, chain: Arc>, network_tx: UnboundedSender>, - log: Logger| { - blocking_json_task(move || { - let seen_timestamp = timestamp_now(); + log: Logger| async move { + let seen_timestamp = timestamp_now(); + + // Send the block, regardless of whether or not it is valid. The API + // specification is very clear that this is the desired behaviour. + publish_pubsub_message(&network_tx, PubsubMessage::BeaconBlock(block.clone()))?; + + // Determine the delay after the start of the slot, register it with metrics. + let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); + metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); + + match chain.process_block(block.clone()).await { + Ok(root) => { + info!( + log, + "Valid block from HTTP API"; + "block_delay" => ?delay, + "root" => format!("{}", root), + "proposer_index" => block.message().proposer_index(), + "slot" => block.slot(), + ); - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. - publish_pubsub_message( - &network_tx, - PubsubMessage::BeaconBlock(Box::new(block.clone())), - )?; + // Notify the validator monitor. + chain.validator_monitor.read().register_api_block( + seen_timestamp, + block.message(), + root, + &chain.slot_clock, + ); - // Determine the delay after the start of the slot, register it with metrics. - let delay = - get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); - metrics::observe_duration( - &metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, - delay, - ); + // Update the head since it's likely this block will become the new + // head. + chain + .recompute_head_at_current_slot() + .await + .map_err(warp_utils::reject::beacon_chain_error)?; - match chain.process_block(block.clone()) { - Ok(root) => { - info!( + // Perform some logging to inform users if their blocks are being produced + // late. + // + // Check to see the thresholds are non-zero to avoid logging errors with small + // slot times (e.g., during testing) + let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); + let error_threshold = crit_threshold / 2; + if delay >= crit_threshold { + crit!( log, - "Valid block from HTTP API"; - "block_delay" => ?delay, - "root" => format!("{}", root), - "proposer_index" => block.message().proposer_index(), + "Block was broadcast too late"; + "msg" => "system may be overloaded, block likely to be orphaned", + "delay_ms" => delay.as_millis(), "slot" => block.slot(), - ); - - // Notify the validator monitor. - chain.validator_monitor.read().register_api_block( - seen_timestamp, - block.message(), - root, - &chain.slot_clock, - ); - - // Update the head since it's likely this block will become the new - // head. - chain - .fork_choice() - .map_err(warp_utils::reject::beacon_chain_error)?; - - // Perform some logging to inform users if their blocks are being produced - // late. - // - // Check to see the thresholds are non-zero to avoid logging errors with small - // slot times (e.g., during testing) - let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); - let error_threshold = crit_threshold / 2; - if delay >= crit_threshold { - crit!( - log, - "Block was broadcast too late"; - "msg" => "system may be overloaded, block likely to be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } else if delay >= error_threshold { - error!( - log, - "Block broadcast was delayed"; - "msg" => "system may be overloaded, block may be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } - - Ok(()) - } - Err(e) => { - let msg = format!("{:?}", e); + "root" => ?root, + ) + } else if delay >= error_threshold { error!( log, - "Invalid block provided to HTTP API"; - "reason" => &msg - ); - Err(warp_utils::reject::broadcast_without_import(msg)) + "Block broadcast was delayed"; + "msg" => "system may be overloaded, block may be orphaned", + "delay_ms" => delay.as_millis(), + "slot" => block.slot(), + "root" => ?root, + ) } + + Ok(warp::reply::json(&())) } - }) + Err(e) => { + let msg = format!("{:?}", e); + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::broadcast_without_import(msg)) + } + } }, ); @@ -1049,99 +1007,90 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( - |block: SignedBeaconBlock>, + |block: Arc>>, chain: Arc>, network_tx: UnboundedSender>, - _log: Logger| { - blocking_json_task(move || { - if let Some(el) = chain.execution_layer.as_ref() { - //FIXME(sean): we may not always receive the payload in this response because it - // should be the relay's job to propogate the block. However, since this block is - // already signed and sent this might be ok (so long as the relay validates - // the block before revealing the payload). - - //FIXME(sean) additionally, this endpoint should serve blocks prior to Bellatrix, and should - // be able to support the normal block proposal flow, because at some point full block endpoints - // will be deprecated from the beacon API. This will entail creating full blocks in - // `validator/blinded_blocks`, caching their payloads, and transforming them into blinded - // blocks. We will access the payload of those blocks here. This flow should happen if the - // execution layer has no payload builders or if we have not yet finalized post-merge transition. - let payload = el - .block_on(|el| el.propose_blinded_beacon_block(&block)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "proposal failed: {:?}", - e - )) - })?; - let new_block = SignedBeaconBlock::Merge(SignedBeaconBlockMerge { - message: BeaconBlockMerge { - slot: block.message().slot(), - proposer_index: block.message().proposer_index(), - parent_root: block.message().parent_root(), - state_root: block.message().state_root(), - body: BeaconBlockBodyMerge { - randao_reveal: block.message().body().randao_reveal().clone(), - eth1_data: block.message().body().eth1_data().clone(), - graffiti: *block.message().body().graffiti(), - proposer_slashings: block - .message() - .body() - .proposer_slashings() - .clone(), - attester_slashings: block - .message() - .body() - .attester_slashings() - .clone(), - attestations: block.message().body().attestations().clone(), - deposits: block.message().body().deposits().clone(), - voluntary_exits: block - .message() - .body() - .voluntary_exits() - .clone(), - sync_aggregate: block - .message() - .body() - .sync_aggregate() - .unwrap() - .clone(), - execution_payload: payload.into(), - }, + _log: Logger| async move { + if let Some(el) = chain.execution_layer.as_ref() { + //FIXME(sean): we may not always receive the payload in this response because it + // should be the relay's job to propogate the block. However, since this block is + // already signed and sent this might be ok (so long as the relay validates + // the block before revealing the payload). + + //FIXME(sean) additionally, this endpoint should serve blocks prior to Bellatrix, and should + // be able to support the normal block proposal flow, because at some point full block endpoints + // will be deprecated from the beacon API. This will entail creating full blocks in + // `validator/blinded_blocks`, caching their payloads, and transforming them into blinded + // blocks. We will access the payload of those blocks here. This flow should happen if the + // execution layer has no payload builders or if we have not yet finalized post-merge transition. + let payload = el.propose_blinded_beacon_block(&block).await.map_err(|e| { + warp_utils::reject::custom_server_error(format!("proposal failed: {:?}", e)) + })?; + let new_block = SignedBeaconBlock::Merge(SignedBeaconBlockMerge { + message: BeaconBlockMerge { + slot: block.message().slot(), + proposer_index: block.message().proposer_index(), + parent_root: block.message().parent_root(), + state_root: block.message().state_root(), + body: BeaconBlockBodyMerge { + randao_reveal: block.message().body().randao_reveal().clone(), + eth1_data: block.message().body().eth1_data().clone(), + graffiti: *block.message().body().graffiti(), + proposer_slashings: block + .message() + .body() + .proposer_slashings() + .clone(), + attester_slashings: block + .message() + .body() + .attester_slashings() + .clone(), + attestations: block.message().body().attestations().clone(), + deposits: block.message().body().deposits().clone(), + voluntary_exits: block.message().body().voluntary_exits().clone(), + sync_aggregate: block + .message() + .body() + .sync_aggregate() + .unwrap() + .clone(), + execution_payload: payload.into(), }, - signature: block.signature().clone(), - }); + }, + signature: block.signature().clone(), + }); + let new_block = Arc::new(new_block); - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. - publish_pubsub_message( - &network_tx, - PubsubMessage::BeaconBlock(Box::new(new_block.clone())), - )?; + // Send the block, regardless of whether or not it is valid. The API + // specification is very clear that this is the desired behaviour. + publish_pubsub_message( + &network_tx, + PubsubMessage::BeaconBlock(new_block.clone()), + )?; - match chain.process_block(new_block) { - Ok(_) => { - // Update the head since it's likely this block will become the new - // head. - chain - .fork_choice() - .map_err(warp_utils::reject::beacon_chain_error)?; + match chain.process_block(new_block).await { + Ok(_) => { + // Update the head since it's likely this block will become the new + // head. + chain + .recompute_head_at_current_slot() + .await + .map_err(warp_utils::reject::beacon_chain_error)?; - Ok(()) - } - Err(e) => { - let msg = format!("{:?}", e); + Ok(warp::reply::json(&())) + } + Err(e) => { + let msg = format!("{:?}", e); - Err(warp_utils::reject::broadcast_without_import(msg)) - } + Err(warp_utils::reject::broadcast_without_import(msg)) } - } else { - Err(warp_utils::reject::custom_server_error( - "no execution layer found".to_string(), - )) } - }) + } else { + Err(warp_utils::reject::custom_server_error( + "no execution layer found".to_string(), + )) + } }, ); @@ -1401,9 +1350,7 @@ pub fn serve( )), )?; - chain - .import_attester_slashing(slashing) - .map_err(warp_utils::reject::beacon_chain_error)?; + chain.import_attester_slashing(slashing); } Ok(()) @@ -1744,10 +1691,7 @@ pub fn serve( .and_then( |network_globals: Arc>, chain: Arc>| { blocking_json_task(move || { - let head_slot = chain - .head_info() - .map(|info| info.slot) - .map_err(warp_utils::reject::beacon_chain_error)?; + let head_slot = chain.canonical_head.cached_head().head_slot(); let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { warp_utils::reject::custom_server_error("Unable to read slot clock".into()) })?; @@ -1993,48 +1937,49 @@ pub fn serve( |endpoint_version: EndpointVersion, slot: Slot, query: api_types::ValidatorBlocksQuery, - chain: Arc>| { - blocking_json_task(move || { - let randao_reveal = query.randao_reveal.as_ref().map_or_else( - || { - if query.verify_randao { - Err(warp_utils::reject::custom_bad_request( - "randao_reveal is mandatory unless verify_randao=false".into(), - )) - } else { - Ok(Signature::empty()) - } - }, - |sig_bytes| { - sig_bytes.try_into().map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "randao reveal is not a valid BLS signature: {:?}", - e - )) - }) - }, - )?; + chain: Arc>| async move { + let randao_reveal = query.randao_reveal.as_ref().map_or_else( + || { + if query.verify_randao { + Err(warp_utils::reject::custom_bad_request( + "randao_reveal is mandatory unless verify_randao=false".into(), + )) + } else { + Ok(Signature::empty()) + } + }, + |sig_bytes| { + sig_bytes.try_into().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not a valid BLS signature: {:?}", + e + )) + }) + }, + )?; - let randao_verification = if query.verify_randao { - ProduceBlockVerification::VerifyRandao - } else { - ProduceBlockVerification::NoVerification - }; + let randao_verification = if query.verify_randao { + ProduceBlockVerification::VerifyRandao + } else { + ProduceBlockVerification::NoVerification + }; - let (block, _) = chain - .produce_block_with_verification::>( - randao_reveal, - slot, - query.graffiti.map(Into::into), - randao_verification, - ) - .map_err(warp_utils::reject::block_production_error)?; - let fork_name = block - .to_ref() - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - fork_versioned_response(endpoint_version, fork_name, block) - }) + let (block, _) = chain + .produce_block_with_verification::>( + randao_reveal, + slot, + query.graffiti.map(Into::into), + randao_verification, + ) + .await + .map_err(warp_utils::reject::block_production_error)?; + let fork_name = block + .to_ref() + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + + fork_versioned_response(endpoint_version, fork_name, block) + .map(|response| warp::reply::json(&response)) }, ); @@ -2055,48 +2000,48 @@ pub fn serve( |endpoint_version: EndpointVersion, slot: Slot, query: api_types::ValidatorBlocksQuery, - chain: Arc>| { - blocking_json_task(move || { - let randao_reveal = query.randao_reveal.as_ref().map_or_else( - || { - if query.verify_randao { - Err(warp_utils::reject::custom_bad_request( - "randao_reveal is mandatory unless verify_randao=false".into(), - )) - } else { - Ok(Signature::empty()) - } - }, - |sig_bytes| { - sig_bytes.try_into().map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "randao reveal is not a valid BLS signature: {:?}", - e - )) - }) - }, - )?; + chain: Arc>| async move { + let randao_reveal = query.randao_reveal.as_ref().map_or_else( + || { + if query.verify_randao { + Err(warp_utils::reject::custom_bad_request( + "randao_reveal is mandatory unless verify_randao=false".into(), + )) + } else { + Ok(Signature::empty()) + } + }, + |sig_bytes| { + sig_bytes.try_into().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not a valid BLS signature: {:?}", + e + )) + }) + }, + )?; - let randao_verification = if query.verify_randao { - ProduceBlockVerification::VerifyRandao - } else { - ProduceBlockVerification::NoVerification - }; + let randao_verification = if query.verify_randao { + ProduceBlockVerification::VerifyRandao + } else { + ProduceBlockVerification::NoVerification + }; - let (block, _) = chain - .produce_block_with_verification::>( - randao_reveal, - slot, - query.graffiti.map(Into::into), - randao_verification, - ) - .map_err(warp_utils::reject::block_production_error)?; - let fork_name = block - .to_ref() - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - fork_versioned_response(endpoint_version, fork_name, block) - }) + let (block, _) = chain + .produce_block_with_verification::>( + randao_reveal, + slot, + query.graffiti.map(Into::into), + randao_verification, + ) + .await + .map_err(warp_utils::reject::block_production_error)?; + let fork_name = block + .to_ref() + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + fork_versioned_response(endpoint_version, fork_name, block) + .map(|response| warp::reply::json(&response)) }, ); @@ -2107,7 +2052,6 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) - .and(only_with_safe_head.clone()) .and(chain_filter.clone()) .and_then( |query: api_types::ValidatorAttestationDataQuery, chain: Arc>| { @@ -2140,7 +2084,6 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) - .and(only_with_safe_head.clone()) .and(chain_filter.clone()) .and_then( |query: api_types::ValidatorAggregateAttestationQuery, chain: Arc>| { @@ -2217,7 +2160,6 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) - .and(only_with_safe_head) .and(chain_filter.clone()) .and_then( |sync_committee_data: SyncContributionData, chain: Arc>| { @@ -2413,40 +2355,39 @@ pub fn serve( .and_then( |chain: Arc>, log: Logger, - preparation_data: Vec| { - blocking_json_task(move || { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::beacon_chain_error)?; - let current_epoch = chain - .epoch() - .map_err(warp_utils::reject::beacon_chain_error)?; + preparation_data: Vec| async move { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; - debug!( - log, - "Received proposer preparation data"; - "count" => preparation_data.len(), - ); + let current_slot = chain + .slot() + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - execution_layer - .update_proposer_preparation_blocking(current_epoch, &preparation_data) - .map_err(|_e| { - warp_utils::reject::custom_bad_request( - "error processing proposer preparations".to_string(), - ) - })?; + debug!( + log, + "Received proposer preparation data"; + "count" => preparation_data.len(), + ); + + execution_layer + .update_proposer_preparation(current_epoch, &preparation_data) + .await; - chain.prepare_beacon_proposer_blocking().map_err(|e| { + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { warp_utils::reject::custom_bad_request(format!( "error updating proposer preparations: {:?}", e )) })?; - Ok(()) - }) + Ok::<_, warp::reject::Rejection>(warp::reply::json(&())) }, ); @@ -2461,69 +2402,66 @@ pub fn serve( .and_then( |chain: Arc>, log: Logger, - register_val_data: Vec| { - blocking_json_task(move || { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::beacon_chain_error)?; - let current_epoch = chain - .slot_clock - .now_or_genesis() - .ok_or(BeaconChainError::UnableToReadSlot) - .map_err(warp_utils::reject::beacon_chain_error)? - .epoch(T::EthSpec::slots_per_epoch()); + register_val_data: Vec| async move { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - debug!( - log, - "Received register validator request"; - "count" => register_val_data.len(), - ); + debug!( + log, + "Received register validator request"; + "count" => register_val_data.len(), + ); - let preparation_data = register_val_data - .iter() - .filter_map(|register_data| { - chain - .validator_index(®ister_data.message.pubkey) - .ok() - .flatten() - .map(|validator_index| ProposerPreparationData { - validator_index: validator_index as u64, - fee_recipient: register_data.message.fee_recipient, - }) - }) - .collect::>(); + let preparation_data = register_val_data + .iter() + .filter_map(|register_data| { + chain + .validator_index(®ister_data.message.pubkey) + .ok() + .flatten() + .map(|validator_index| ProposerPreparationData { + validator_index: validator_index as u64, + fee_recipient: register_data.message.fee_recipient, + }) + }) + .collect::>(); - debug!( - log, - "Resolved validator request pubkeys"; - "count" => preparation_data.len() - ); + debug!( + log, + "Resolved validator request pubkeys"; + "count" => preparation_data.len() + ); - // Update the prepare beacon proposer cache based on this request. - execution_layer - .update_proposer_preparation_blocking(current_epoch, &preparation_data) - .map_err(|_e| { - warp_utils::reject::custom_bad_request( - "error processing proposer preparations".to_string(), - ) - })?; + // Update the prepare beacon proposer cache based on this request. + execution_layer + .update_proposer_preparation(current_epoch, &preparation_data) + .await; - // Call prepare beacon proposer blocking with the latest update in order to make - // sure we have a local payload to fall back to in the event of the blined block - // flow failing. - chain.prepare_beacon_proposer_blocking().map_err(|e| { + // Call prepare beacon proposer blocking with the latest update in order to make + // sure we have a local payload to fall back to in the event of the blined block + // flow failing. + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { warp_utils::reject::custom_bad_request(format!( "error updating proposer preparations: {:?}", e )) })?; - //TODO(sean): In the MEV-boost PR, add a call here to send the update request to the builder + //TODO(sean): In the MEV-boost PR, add a call here to send the update request to the builder - Ok(()) - }) + Ok::<_, warp::Rejection>(warp::reply::json(&())) }, ); // POST validator/sync_committee_subscriptions @@ -2689,7 +2627,11 @@ pub fn serve( .and_then(|chain: Arc>| { blocking_task(move || { Ok::<_, warp::Rejection>(warp::reply::json(&api_types::GenericResponseRef::from( - chain.fork_choice.read().proto_array().core_proto_array(), + chain + .canonical_head + .fork_choice_read_lock() + .proto_array() + .core_proto_array(), ))) }) }); @@ -2732,9 +2674,6 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|chain: Arc>| { blocking_json_task(move || { - let head_info = chain - .head_info() - .map_err(warp_utils::reject::beacon_chain_error)?; let current_slot_opt = chain.slot().ok(); chain @@ -2746,7 +2685,7 @@ pub fn serve( ) }) .and_then(|eth1| { - eth1.sync_status(head_info.genesis_time, current_slot_opt, &chain.spec) + eth1.sync_status(chain.genesis_time, current_slot_opt, &chain.spec) .ok_or_else(|| { warp_utils::reject::custom_server_error( "Unable to determine Eth1 sync status".to_string(), @@ -2869,7 +2808,7 @@ pub fn serve( .and(chain_filter.clone()) .and(log_filter.clone()) .and_then( - |blocks: Vec>, + |blocks: Vec>>, chain: Arc>, log: Logger| { info!( diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index b040eec779b..bddae555499 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -55,7 +55,7 @@ pub fn proposer_duties( .safe_add(1) .map_err(warp_utils::reject::arith_error)? { - let (proposers, dependent_root, _) = + let (proposers, dependent_root, _execution_status, _fork) = compute_proposer_duties_from_head(request_epoch, chain) .map_err(warp_utils::reject::beacon_chain_error)?; convert_to_api_response(chain, request_epoch, dependent_root, proposers) @@ -88,16 +88,23 @@ fn try_proposer_duties_from_cache( request_epoch: Epoch, chain: &BeaconChain, ) -> Result, warp::reject::Rejection> { - let head = chain - .head_info() - .map_err(warp_utils::reject::beacon_chain_error)?; - let head_epoch = head.slot.epoch(T::EthSpec::slots_per_epoch()); + let (head_slot, head_block_root, head_decision_root) = { + let head = chain.canonical_head.cached_head(); + let head_block_root = head.head_block_root(); + let decision_root = head + .snapshot + .beacon_state + .proposer_shuffling_decision_root(head_block_root) + .map_err(warp_utils::reject::beacon_state_error)?; + (head.head_slot(), head_block_root, decision_root) + }; + let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch()); let dependent_root = match head_epoch.cmp(&request_epoch) { // head_epoch == request_epoch - Ordering::Equal => head.proposer_shuffling_decision_root, + Ordering::Equal => head_decision_root, // head_epoch < request_epoch - Ordering::Less => head.block_root, + Ordering::Less => head_block_root, // head_epoch > request_epoch Ordering::Greater => { return Err(warp_utils::reject::custom_server_error(format!( @@ -132,8 +139,9 @@ fn compute_and_cache_proposer_duties( current_epoch: Epoch, chain: &BeaconChain, ) -> Result { - let (indices, dependent_root, fork) = compute_proposer_duties_from_head(current_epoch, chain) - .map_err(warp_utils::reject::beacon_chain_error)?; + let (indices, dependent_root, _execution_status, fork) = + compute_proposer_duties_from_head(current_epoch, chain) + .map_err(warp_utils::reject::beacon_chain_error)?; // Prime the proposer shuffling cache with the newly-learned value. chain diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 95c049d9979..8604c918991 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -18,27 +18,23 @@ impl StateId { chain: &BeaconChain, ) -> Result { let slot = match &self.0 { - CoreStateId::Head => { - return chain - .head_info() - .map(|head| head.state_root) - .map_err(warp_utils::reject::beacon_chain_error) - } + CoreStateId::Head => return Ok(chain.canonical_head.cached_head().head_state_root()), CoreStateId::Genesis => return Ok(chain.genesis_state_root), - CoreStateId::Finalized => chain.head_info().map(|head| { - head.finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()) - }), - CoreStateId::Justified => chain.head_info().map(|head| { - head.current_justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()) - }), - CoreStateId::Slot(slot) => Ok(*slot), + CoreStateId::Finalized => chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + CoreStateId::Justified => chain + .canonical_head + .cached_head() + .justified_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + CoreStateId::Slot(slot) => *slot, CoreStateId::Root(root) => return Ok(*root), - } - .map_err(warp_utils::reject::beacon_chain_error)?; + }; chain .state_root_at_slot(slot) @@ -62,11 +58,7 @@ impl StateId { chain: &BeaconChain, ) -> Result, warp::Rejection> { let (state_root, slot_opt) = match &self.0 { - CoreStateId::Head => { - return chain - .head_beacon_state() - .map_err(warp_utils::reject::beacon_chain_error) - } + CoreStateId::Head => return Ok(chain.head_beacon_state_cloned()), CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)), _ => (self.root(chain)?, None), }; diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 6b4f79fa5d5..942a1167c2f 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -45,6 +45,7 @@ async fn sync_committee_duties_across_fork() { genesis_state_root, &all_validators, ) + .await .unwrap(); harness.advance_slot(); @@ -61,6 +62,7 @@ async fn sync_committee_duties_across_fork() { let state_root = state.canonical_root(); harness .add_attested_block_at_slot(fork_slot, state, state_root, &all_validators) + .await .unwrap(); assert_eq!( @@ -244,6 +246,7 @@ async fn sync_committee_indices_across_fork() { genesis_state_root, &all_validators, ) + .await .unwrap(); harness.advance_slot(); @@ -277,6 +280,7 @@ async fn sync_committee_indices_across_fork() { let state_root = state.canonical_root(); harness .add_attested_block_at_slot(fork_slot + 1, state, state_root, &all_validators) + .await .unwrap(); let current_period = fork_epoch.sync_committee_period(&spec).unwrap(); diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 8b12aa4a5b2..3327093d097 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -47,11 +47,13 @@ pub async fn fork_choice_before_proposal() { // Create some chain depth. harness.advance_slot(); - harness.extend_chain( - num_initial as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // We set up the following block graph, where B is a block that is temporarily orphaned by C, // but is then reinstated and built upon by D. @@ -64,8 +66,8 @@ pub async fn fork_choice_before_proposal() { let slot_d = slot_a + 3; let state_a = harness.get_current_state(); - let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b); - let block_root_b = harness.process_block(slot_b, block_b).unwrap(); + let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b).await; + let block_root_b = harness.process_block(slot_b, block_b).await.unwrap(); // Create attestations to B but keep them in reserve until after C has been processed. let attestations_b = harness.make_attestations( @@ -76,8 +78,11 @@ pub async fn fork_choice_before_proposal() { slot_b, ); - let (block_c, state_c) = harness.make_block(state_a, slot_c); - let block_root_c = harness.process_block(slot_c, block_c.clone()).unwrap(); + let (block_c, state_c) = harness.make_block(state_a, slot_c).await; + let block_root_c = harness + .process_block(slot_c, block_c.clone()) + .await + .unwrap(); // Create attestations to C from a small number of validators and process them immediately. let attestations_c = harness.make_attestations( @@ -94,7 +99,7 @@ pub async fn fork_choice_before_proposal() { // Due to proposer boost, the head should be C during slot C. assert_eq!( - harness.chain.head_info().unwrap().block_root, + harness.chain.canonical_head.cached_head().head_block_root(), block_root_c.into() ); @@ -102,7 +107,7 @@ pub async fn fork_choice_before_proposal() { // Manually prod the per-slot task, because the slot timer doesn't run in the background in // these tests. harness.advance_slot(); - harness.chain.per_slot_task(); + harness.chain.per_slot_task().await; let proposer_index = state_b .get_beacon_proposer_index(slot_d, &harness.chain.spec) @@ -119,7 +124,7 @@ pub async fn fork_choice_before_proposal() { // Head is now B. assert_eq!( - harness.chain.head_info().unwrap().block_root, + harness.chain.canonical_head.cached_head().head_block_root(), block_root_b.into() ); // D's parent is B. diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 2b0cfd7c41b..b57a87dfcae 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -11,7 +11,6 @@ use eth2::{ types::*, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; -use execution_layer::test_utils::MockExecutionLayer; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; use lighthouse_network::{Enr, EnrExt, PeerId}; @@ -21,7 +20,6 @@ use slot_clock::SlotClock; use state_processing::per_slot_processing; use std::convert::TryInto; use std::sync::Arc; -use task_executor::test_utils::TestRuntime; use tokio::sync::{mpsc, oneshot}; use tokio::time::Duration; use tree_hash::TreeHash; @@ -52,6 +50,7 @@ const SKIPPED_SLOTS: &[u64] = &[ ]; struct ApiTester { + harness: Arc>>, chain: Arc>>, client: BeaconNodeHttpClient, next_block: SignedBeaconBlock, @@ -62,14 +61,9 @@ struct ApiTester { proposer_slashing: ProposerSlashing, voluntary_exit: SignedVoluntaryExit, _server_shutdown: oneshot::Sender<()>, - validator_keypairs: Vec, network_rx: mpsc::UnboundedReceiver>, local_enr: Enr, external_peer_id: PeerId, - // This is never directly accessed, but adding it creates a payload cache, which we use in tests here. - #[allow(dead_code)] - mock_el: Option>, - _runtime: TestRuntime, } impl ApiTester { @@ -81,12 +75,14 @@ impl ApiTester { } pub async fn new_from_spec(spec: ChainSpec) -> Self { - let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec.clone()) - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let harness = Arc::new( + BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec.clone()) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(), + ); harness.advance_slot(); @@ -94,17 +90,19 @@ impl ApiTester { let slot = harness.chain.slot().unwrap().as_u64(); if !SKIPPED_SLOTS.contains(&slot) { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; } harness.advance_slot(); } - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!( harness.chain.slot().unwrap(), @@ -112,12 +110,14 @@ impl ApiTester { "precondition: current slot is one after head" ); - let (next_block, _next_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (next_block, _next_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; // `make_block` adds random graffiti, so this will produce an alternate block - let (reorg_block, _reorg_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (reorg_block, _reorg_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; let head_state_root = head.beacon_state_root(); let attestations = harness @@ -168,15 +168,19 @@ impl ApiTester { let chain = harness.chain.clone(); assert_eq!( - chain.head_info().unwrap().finalized_checkpoint.epoch, + chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch, 2, "precondition: finality" ); assert_eq!( chain - .head_info() - .unwrap() - .current_justified_checkpoint + .canonical_head + .cached_head() + .justified_checkpoint() .epoch, 3, "precondition: justification" @@ -206,6 +210,7 @@ impl ApiTester { ); Self { + harness, chain, client, next_block, @@ -216,32 +221,33 @@ impl ApiTester { proposer_slashing, voluntary_exit, _server_shutdown: shutdown_tx, - validator_keypairs: harness.validator_keypairs, network_rx, local_enr, external_peer_id, - mock_el: harness.mock_execution_layer, - _runtime: harness.runtime, } } pub async fn new_from_genesis() -> Self { - let harness = BeaconChainHarness::builder(MainnetEthSpec) - .default_spec() - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .build(); + let harness = Arc::new( + BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .build(), + ); harness.advance_slot(); - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); - let (next_block, _next_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (next_block, _next_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; // `make_block` adds random graffiti, so this will produce an alternate block - let (reorg_block, _reorg_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (reorg_block, _reorg_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; let head_state_root = head.beacon_state_root(); let attestations = harness @@ -286,6 +292,7 @@ impl ApiTester { ); Self { + harness, chain, client, next_block, @@ -296,15 +303,16 @@ impl ApiTester { proposer_slashing, voluntary_exit, _server_shutdown: shutdown_tx, - validator_keypairs: harness.validator_keypairs, network_rx, local_enr, external_peer_id, - mock_el: None, - _runtime: harness.runtime, } } + fn validator_keypairs(&self) -> &[Keypair] { + &self.harness.validator_keypairs + } + fn skip_slots(self, count: u64) -> Self { for _ in 0..count { self.chain @@ -329,7 +337,9 @@ impl ApiTester { StateId::Slot(Slot::from(SKIPPED_SLOTS[3])), StateId::Root(Hash256::zero()), ]; - ids.push(StateId::Root(self.chain.head_info().unwrap().state_root)); + ids.push(StateId::Root( + self.chain.canonical_head.cached_head().head_state_root(), + )); ids } @@ -347,13 +357,20 @@ impl ApiTester { BlockId::Slot(Slot::from(SKIPPED_SLOTS[3])), BlockId::Root(Hash256::zero()), ]; - ids.push(BlockId::Root(self.chain.head_info().unwrap().block_root)); + ids.push(BlockId::Root( + self.chain.canonical_head.cached_head().head_block_root(), + )); ids } fn get_state(&self, state_id: StateId) -> Option> { match state_id { - StateId::Head => Some(self.chain.head().unwrap().beacon_state), + StateId::Head => Some( + self.chain + .head_snapshot() + .beacon_state + .clone_with_only_committee_caches(), + ), StateId::Genesis => self .chain .get_state(&self.chain.genesis_state_root, None) @@ -361,9 +378,9 @@ impl ApiTester { StateId::Finalized => { let finalized_slot = self .chain - .head_info() - .unwrap() - .finalized_checkpoint + .canonical_head + .cached_head() + .finalized_checkpoint() .epoch .start_slot(E::slots_per_epoch()); @@ -378,9 +395,9 @@ impl ApiTester { StateId::Justified => { let justified_slot = self .chain - .head_info() - .unwrap() - .current_justified_checkpoint + .canonical_head + .cached_head() + .justified_checkpoint() .epoch .start_slot(E::slots_per_epoch()); @@ -404,7 +421,7 @@ impl ApiTester { pub async fn test_beacon_genesis(self) -> Self { let result = self.client.get_beacon_genesis().await.unwrap().data; - let state = self.chain.head().unwrap().beacon_state; + let state = &self.chain.head_snapshot().beacon_state; let expected = GenesisData { genesis_time: state.genesis_time(), genesis_validators_root: state.genesis_validators_root(), @@ -426,14 +443,14 @@ impl ApiTester { .map(|res| res.data.root); let expected = match state_id { - StateId::Head => Some(self.chain.head_info().unwrap().state_root), + StateId::Head => Some(self.chain.canonical_head.cached_head().head_state_root()), StateId::Genesis => Some(self.chain.genesis_state_root), StateId::Finalized => { let finalized_slot = self .chain - .head_info() - .unwrap() - .finalized_checkpoint + .canonical_head + .cached_head() + .finalized_checkpoint() .epoch .start_slot(E::slots_per_epoch()); @@ -442,9 +459,9 @@ impl ApiTester { StateId::Justified => { let justified_slot = self .chain - .head_info() - .unwrap() - .current_justified_checkpoint + .canonical_head + .cached_head() + .justified_checkpoint() .epoch .start_slot(E::slots_per_epoch()); @@ -754,14 +771,20 @@ impl ApiTester { fn get_block_root(&self, block_id: BlockId) -> Option { match block_id { - BlockId::Head => Some(self.chain.head_info().unwrap().block_root), + BlockId::Head => Some(self.chain.canonical_head.cached_head().head_block_root()), BlockId::Genesis => Some(self.chain.genesis_block_root), - BlockId::Finalized => Some(self.chain.head_info().unwrap().finalized_checkpoint.root), + BlockId::Finalized => Some( + self.chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .root, + ), BlockId::Justified => Some( self.chain - .head_info() - .unwrap() - .current_justified_checkpoint + .canonical_head + .cached_head() + .justified_checkpoint() .root, ), BlockId::Slot(slot) => self @@ -1322,7 +1345,7 @@ impl ApiTester { pub async fn test_get_node_syncing(self) -> Self { let result = self.client.get_node_syncing().await.unwrap().data; - let head_slot = self.chain.head_info().unwrap().slot; + let head_slot = self.chain.canonical_head.cached_head().head_slot(); let sync_distance = self.chain.slot().unwrap() - head_slot; let expected = SyncingData { @@ -1536,7 +1559,7 @@ impl ApiTester { } fn validator_count(&self) -> usize { - self.chain.head().unwrap().beacon_state.validators().len() + self.chain.head_snapshot().beacon_state.validators().len() } fn interesting_validator_indices(&self) -> Vec> { @@ -1621,7 +1644,7 @@ impl ApiTester { WhenSlotSkipped::Prev, ) .unwrap() - .unwrap_or(self.chain.head_beacon_block_root().unwrap()); + .unwrap_or(self.chain.head_beacon_block_root()); assert_eq!(results.dependent_root, dependent_root); @@ -1696,7 +1719,7 @@ impl ApiTester { WhenSlotSkipped::Prev, ) .unwrap() - .unwrap_or(self.chain.head_beacon_block_root().unwrap()); + .unwrap_or(self.chain.head_beacon_block_root()); // Presently, the beacon chain harness never runs the code that primes the proposer // cache. If this changes in the future then we'll need some smarter logic here, but @@ -1824,7 +1847,7 @@ impl ApiTester { WhenSlotSkipped::Prev, ) .unwrap() - .unwrap_or(self.chain.head_beacon_block_root().unwrap()); + .unwrap_or(self.chain.head_beacon_block_root()); self.client .get_validator_duties_proposer(current_epoch) @@ -1878,7 +1901,7 @@ impl ApiTester { } pub async fn test_block_production(self) -> Self { - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; for _ in 0..E::slots_per_epoch() * 3 { @@ -1898,7 +1921,7 @@ impl ApiTester { let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); let sk = self - .validator_keypairs + .validator_keypairs() .iter() .find(|kp| kp.pk == proposer_pubkey) .map(|kp| kp.sk.clone()) @@ -1926,7 +1949,7 @@ impl ApiTester { self.client.post_beacon_blocks(&signed_block).await.unwrap(); - assert_eq!(self.chain.head_beacon_block().unwrap(), signed_block); + assert_eq!(self.chain.head_beacon_block().as_ref(), &signed_block); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } @@ -1957,7 +1980,7 @@ impl ApiTester { } pub async fn test_block_production_verify_randao_invalid(self) -> Self { - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; for _ in 0..E::slots_per_epoch() { @@ -1977,7 +2000,7 @@ impl ApiTester { let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); let sk = self - .validator_keypairs + .validator_keypairs() .iter() .find(|kp| kp.pk == proposer_pubkey) .map(|kp| kp.sk.clone()) @@ -2040,7 +2063,7 @@ impl ApiTester { } pub async fn test_get_validator_attestation_data(self) -> Self { - let mut state = self.chain.head_beacon_state().unwrap(); + let mut state = self.chain.head_beacon_state_cloned(); let slot = state.slot(); state .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) @@ -2070,7 +2093,6 @@ impl ApiTester { let attestation = self .chain .head_beacon_block() - .unwrap() .message() .body() .attestations()[0] @@ -2098,7 +2120,7 @@ impl ApiTester { let slot = self.chain.slot().unwrap(); let epoch = self.chain.epoch().unwrap(); - let mut head = self.chain.head().unwrap(); + let mut head = self.chain.head_snapshot().as_ref().clone(); while head.beacon_state.current_epoch() < epoch { per_slot_processing(&mut head.beacon_state, None, &self.chain.spec).unwrap(); } @@ -2114,7 +2136,7 @@ impl ApiTester { .client .post_validator_duties_attester( epoch, - (0..self.validator_keypairs.len() as u64) + (0..self.validator_keypairs().len() as u64) .collect::>() .as_slice(), ) @@ -2123,7 +2145,7 @@ impl ApiTester { .data; let (i, kp, duty, proof) = self - .validator_keypairs + .validator_keypairs() .iter() .enumerate() .find_map(|(i, kp)| { @@ -2238,9 +2260,9 @@ impl ApiTester { let mut registrations = vec![]; let mut fee_recipients = vec![]; - let fork = self.chain.head().unwrap().beacon_state.fork(); + let fork = self.chain.head_snapshot().beacon_state.fork(); - for (val_index, keypair) in self.validator_keypairs.iter().enumerate() { + for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { let pubkey = keypair.pk.compress(); let fee_recipient = Address::from_low_u64_be(val_index as u64); @@ -2273,8 +2295,7 @@ impl ApiTester { for (val_index, (_, fee_recipient)) in self .chain - .head() - .unwrap() + .head_snapshot() .beacon_state .validators() .into_iter() @@ -2416,7 +2437,7 @@ impl ApiTester { pub async fn test_post_lighthouse_liveness(self) -> Self { let epoch = self.chain.epoch().unwrap(); - let head_state = self.chain.head_beacon_state().unwrap(); + let head_state = self.chain.head_beacon_state_cloned(); let indices = (0..head_state.validators().len()) .map(|i| i as u64) .collect::>(); @@ -2533,7 +2554,7 @@ impl ApiTester { let block_root = self.next_block.canonical_root(); // current_duty_dependent_root = block root because this is the first slot of the epoch - let current_duty_dependent_root = self.chain.head_beacon_block_root().unwrap(); + let current_duty_dependent_root = self.chain.head_beacon_block_root(); let current_slot = self.chain.slot().unwrap(); let next_slot = self.next_block.slot(); let finalization_distance = E::slots_per_epoch() * 2; @@ -2556,17 +2577,21 @@ impl ApiTester { epoch_transition: true, }); + let finalized_block_root = self + .chain + .block_root_at_slot(next_slot - finalization_distance, WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + let finalized_block = self + .chain + .get_blinded_block(&finalized_block_root) + .unwrap() + .unwrap(); + let finalized_state_root = finalized_block.state_root(); + let expected_finalized = EventKind::FinalizedCheckpoint(SseFinalizedCheckpoint { - block: self - .chain - .block_root_at_slot(next_slot - finalization_distance, WhenSlotSkipped::Prev) - .unwrap() - .unwrap(), - state: self - .chain - .state_root_at_slot(next_slot - finalization_distance) - .unwrap() - .unwrap(), + block: finalized_block_root, + state: finalized_state_root, epoch: Epoch::new(3), }); @@ -2578,7 +2603,7 @@ impl ApiTester { let block_events = poll_events(&mut events_future, 3, Duration::from_millis(10000)).await; assert_eq!( block_events.as_slice(), - &[expected_block, expected_finalized, expected_head] + &[expected_block, expected_head, expected_finalized] ); // Test a reorg event diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index bf1918662a7..9c9e094db62 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -1358,9 +1358,9 @@ pub enum Response { /// A Status message. Status(StatusMessage), /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. - BlocksByRange(Option>>), + BlocksByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. - BlocksByRoot(Option>>), + BlocksByRoot(Option>>), } impl std::convert::From> for RPCCodedResponse { diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index f6c3e61b0b1..a46a05a8ce3 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -532,10 +532,10 @@ fn handle_v1_response( Protocol::Goodbye => Err(RPCError::InvalidData( "Goodbye RPC message has no valid response".to_string(), )), - Protocol::BlocksByRange => Ok(Some(RPCResponse::BlocksByRange(Box::new( + Protocol::BlocksByRange => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping { @@ -572,31 +572,31 @@ fn handle_v2_response( })?; match protocol { Protocol::BlocksByRange => match fork_name { - ForkName::Altair => Ok(Some(RPCResponse::BlocksByRange(Box::new( + ForkName::Altair => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( decoded_buffer, )?), )))), - ForkName::Base => Ok(Some(RPCResponse::BlocksByRange(Box::new( + ForkName::Base => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - ForkName::Merge => Ok(Some(RPCResponse::BlocksByRange(Box::new( + ForkName::Merge => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes( decoded_buffer, )?), )))), }, Protocol::BlocksByRoot => match fork_name { - ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( decoded_buffer, )?), )))), - ForkName::Base => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + ForkName::Base => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - ForkName::Merge => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + ForkName::Merge => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes( decoded_buffer, )?), @@ -898,10 +898,10 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRange, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -911,7 +911,7 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRange, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, ) .unwrap_err(), @@ -924,11 +924,11 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRoot, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, ), Ok(Some(RPCResponse::BlocksByRoot( - Box::new(empty_base_block()) + Arc::new(empty_base_block()) ))) ); @@ -937,7 +937,7 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRoot, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, ) .unwrap_err(), @@ -1013,10 +1013,10 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -1028,10 +1028,10 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Altair, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -1040,10 +1040,10 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new(altair_block())))) + Ok(Some(RPCResponse::BlocksByRange(Arc::new(altair_block())))) ); let merge_block_small = merge_block_small(&fork_context(ForkName::Merge)); @@ -1053,12 +1053,12 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new( + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new( merge_block_small.clone() ))), ForkName::Merge, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( merge_block_small.clone() )))) ); @@ -1085,11 +1085,11 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, ), Ok(Some(RPCResponse::BlocksByRoot( - Box::new(empty_base_block()) + Arc::new(empty_base_block()) ))), ); @@ -1100,11 +1100,11 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, ), Ok(Some(RPCResponse::BlocksByRoot( - Box::new(empty_base_block()) + Arc::new(empty_base_block()) ))) ); @@ -1112,22 +1112,22 @@ mod tests { encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, ), - Ok(Some(RPCResponse::BlocksByRoot(Box::new(altair_block())))) + Ok(Some(RPCResponse::BlocksByRoot(Arc::new(altair_block())))) ); assert_eq!( encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new( + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new( merge_block_small.clone() ))), ForkName::Merge, ), - Ok(Some(RPCResponse::BlocksByRoot(Box::new(merge_block_small)))) + Ok(Some(RPCResponse::BlocksByRoot(Arc::new(merge_block_small)))) ); let mut encoded = @@ -1179,7 +1179,7 @@ mod tests { let mut encoded_bytes = encode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, ) .unwrap(); @@ -1200,7 +1200,7 @@ mod tests { let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, ) .unwrap(); @@ -1222,7 +1222,7 @@ mod tests { let mut encoded_bytes = encode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Altair, ) .unwrap(); @@ -1247,7 +1247,7 @@ mod tests { let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, ) .unwrap(); @@ -1292,7 +1292,7 @@ mod tests { let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, ) .unwrap(); @@ -1316,7 +1316,7 @@ mod tests { let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, ) .unwrap(); diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 46de772d8d2..26d755a6e06 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -9,6 +9,7 @@ use ssz_types::{ VariableList, }; use std::ops::Deref; +use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -237,10 +238,10 @@ pub enum RPCResponse { /// A response to a get BLOCKS_BY_RANGE request. A None response signifies the end of the /// batch. - BlocksByRange(Box>), + BlocksByRange(Arc>), /// A response to a get BLOCKS_BY_ROOT request. - BlocksByRoot(Box>), + BlocksByRoot(Arc>), /// A PONG response to a PING request. Pong(Ping), diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index af2656a2759..a01072f8e4e 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -7,6 +7,7 @@ use snap::raw::{decompress_len, Decoder, Encoder}; use ssz::{Decode, Encode}; use std::boxed::Box; use std::io::{Error, ErrorKind}; +use std::sync::Arc; use types::{ Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, @@ -17,7 +18,7 @@ use types::{ #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. - BeaconBlock(Box>), + BeaconBlock(Arc>), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id. @@ -173,7 +174,7 @@ impl PubsubMessage { )) } }; - Ok(PubsubMessage::BeaconBlock(Box::new(beacon_block))) + Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) } GossipKind::VoluntaryExit => { let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 973485fc4a1..90052859bc9 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -174,15 +174,15 @@ fn test_blocks_by_range_chunked_rpc() { // BlocksByRange Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_base = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_base = Response::BlocksByRange(Some(Arc::new(signed_full_block))); let full_block = BeaconBlock::Altair(BeaconBlockAltair::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_altair = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_altair = Response::BlocksByRange(Some(Arc::new(signed_full_block))); let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_merge_small = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_merge_small = Response::BlocksByRange(Some(Arc::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; @@ -311,7 +311,7 @@ fn test_blocks_by_range_over_limit() { // BlocksByRange Response let full_block = merge_block_large(&common::fork_context(ForkName::Merge)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_merge_large = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_merge_large = Response::BlocksByRange(Some(Arc::new(signed_full_block))); let request_id = messages_to_send as usize; // build the sender future @@ -409,7 +409,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty()); - let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); + let rpc_response = Response::BlocksByRange(Some(Arc::new(empty_signed))); // keep count of the number of messages received let mut messages_received: u64 = 0; @@ -540,7 +540,7 @@ fn test_blocks_by_range_single_empty_rpc() { let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty()); - let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); + let rpc_response = Response::BlocksByRange(Some(Arc::new(empty_signed))); let messages_to_send = 1; @@ -660,15 +660,15 @@ fn test_blocks_by_root_chunked_rpc() { // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_base = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response_base = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); let full_block = BeaconBlock::Altair(BeaconBlockAltair::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_altair = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response_altair = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_merge_small = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response_merge_small = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; @@ -803,7 +803,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 6d7375cca72..6f75e1fb23a 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -52,6 +52,7 @@ use lighthouse_network::{ use logging::TimeLatch; use slog::{crit, debug, error, trace, warn, Logger}; use std::collections::VecDeque; +use std::future::Future; use std::pin::Pin; use std::sync::{Arc, Weak}; use std::task::Context; @@ -386,7 +387,7 @@ impl WorkEvent { message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: Box>, + block: Arc>, seen_timestamp: Duration, ) -> Self { Self { @@ -490,7 +491,7 @@ impl WorkEvent { /// Create a new `Work` event for some block, where the result from computation (if any) is /// sent to the other side of `result_tx`. pub fn rpc_beacon_block( - block: Box>, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, ) -> Self { @@ -507,7 +508,7 @@ impl WorkEvent { /// Create a new work event to import `blocks` as a beacon chain segment. pub fn chain_segment( process_id: ChainSegmentProcessId, - blocks: Vec>, + blocks: Vec>>, ) -> Self { Self { drop_during_sync: false, @@ -654,7 +655,7 @@ pub enum Work { message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: Box>, + block: Arc>, seen_timestamp: Duration, }, DelayedImportBlock { @@ -691,13 +692,13 @@ pub enum Work { seen_timestamp: Duration, }, RpcBlock { - block: Box>, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, }, ChainSegment { process_id: ChainSegmentProcessId, - blocks: Vec>, + blocks: Vec>>, }, Status { peer_id: PeerId, @@ -1307,15 +1308,6 @@ impl BeaconProcessor { let idle_tx = toolbox.idle_tx; let work_reprocessing_tx = toolbox.work_reprocessing_tx; - // Wrap the `idle_tx` in a struct that will fire the idle message whenever it is dropped. - // - // This helps ensure that the worker is always freed in the case of an early exit or panic. - // As such, this instantiation should happen as early in the function as possible. - let send_idle_on_drop = SendOnDrop { - tx: idle_tx, - log: self.log.clone(), - }; - let work_id = work.str_id(); let worker_timer = metrics::start_timer_vec(&metrics::BEACON_PROCESSOR_WORKER_TIME, &[work_id]); @@ -1325,6 +1317,16 @@ impl BeaconProcessor { &[work.str_id()], ); + // Wrap the `idle_tx` in a struct that will fire the idle message whenever it is dropped. + // + // This helps ensure that the worker is always freed in the case of an early exit or panic. + // As such, this instantiation should happen as early in the function as possible. + let send_idle_on_drop = SendOnDrop { + tx: idle_tx, + _worker_timer: worker_timer, + log: self.log.clone(), + }; + let worker_id = self.current_workers; self.current_workers = self.current_workers.saturating_add(1); @@ -1338,7 +1340,6 @@ impl BeaconProcessor { return; }; - let log = self.log.clone(); let executor = self.executor.clone(); let worker = Worker { @@ -1357,252 +1358,308 @@ impl BeaconProcessor { "worker" => worker_id, ); - let sub_executor = executor.clone(); - executor.spawn_blocking( - move || { - let _worker_timer = worker_timer; + let task_spawner = TaskSpawner { + executor: executor.clone(), + send_idle_on_drop, + }; - match work { - /* - * Individual unaggregated attestation verification. - */ - Work::GossipAttestation { - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - } => worker.process_gossip_attestation( - message_id, - peer_id, - attestation, - subnet_id, - should_import, - Some(work_reprocessing_tx), - seen_timestamp, - ), - /* - * Batched unaggregated attestation verification. - */ - Work::GossipAttestationBatch { packages } => worker - .process_gossip_attestation_batch(packages, Some(work_reprocessing_tx)), - /* - * Individual aggregated attestation verification. - */ - Work::GossipAggregate { - message_id, - peer_id, - aggregate, - seen_timestamp, - } => worker.process_gossip_aggregate( - message_id, - peer_id, - aggregate, - Some(work_reprocessing_tx), - seen_timestamp, - ), - /* - * Batched aggregated attestation verification. - */ - Work::GossipAggregateBatch { packages } => { - worker.process_gossip_aggregate_batch(packages, Some(work_reprocessing_tx)) - } - /* - * Verification for beacon blocks received on gossip. - */ - Work::GossipBlock { - message_id, - peer_id, - peer_client, - block, - seen_timestamp, - } => worker.process_gossip_block( + let sub_executor = executor; + match work { + /* + * Individual unaggregated attestation verification. + */ + Work::GossipAttestation { + message_id, + peer_id, + attestation, + subnet_id, + should_import, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_attestation( + message_id, + peer_id, + attestation, + subnet_id, + should_import, + Some(work_reprocessing_tx), + seen_timestamp, + ) + }), + /* + * Batched unaggregated attestation verification. + */ + Work::GossipAttestationBatch { packages } => task_spawner.spawn_blocking(|| { + worker.process_gossip_attestation_batch(packages, Some(work_reprocessing_tx)) + }), + /* + * Individual aggregated attestation verification. + */ + Work::GossipAggregate { + message_id, + peer_id, + aggregate, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_aggregate( + message_id, + peer_id, + aggregate, + Some(work_reprocessing_tx), + seen_timestamp, + ) + }), + /* + * Batched aggregated attestation verification. + */ + Work::GossipAggregateBatch { packages } => task_spawner.spawn_blocking(|| { + worker.process_gossip_aggregate_batch(packages, Some(work_reprocessing_tx)) + }), + /* + * Verification for beacon blocks received on gossip. + */ + Work::GossipBlock { + message_id, + peer_id, + peer_client, + block, + seen_timestamp, + } => task_spawner.spawn_async(async move { + worker + .process_gossip_block( message_id, peer_id, peer_client, - *block, - work_reprocessing_tx.clone(), - duplicate_cache, - seen_timestamp, - ), - /* - * Import for blocks that we received earlier than their intended slot. - */ - Work::DelayedImportBlock { - peer_id, block, - seen_timestamp, - } => worker.process_gossip_verified_block( - peer_id, - *block, work_reprocessing_tx, + duplicate_cache, seen_timestamp, - ), - /* - * Voluntary exits received on gossip. - */ - Work::GossipVoluntaryExit { - message_id, - peer_id, - voluntary_exit, - } => worker.process_gossip_voluntary_exit(message_id, peer_id, *voluntary_exit), - /* - * Proposer slashings received on gossip. - */ - Work::GossipProposerSlashing { - message_id, - peer_id, - proposer_slashing, - } => worker.process_gossip_proposer_slashing( - message_id, - peer_id, - *proposer_slashing, - ), - /* - * Attester slashings received on gossip. - */ - Work::GossipAttesterSlashing { - message_id, - peer_id, - attester_slashing, - } => worker.process_gossip_attester_slashing( - message_id, - peer_id, - *attester_slashing, - ), - /* - * Sync committee message verification. - */ - Work::GossipSyncSignature { - message_id, - peer_id, - sync_signature, - subnet_id, - seen_timestamp, - } => worker.process_gossip_sync_committee_signature( - message_id, - peer_id, - *sync_signature, - subnet_id, - seen_timestamp, - ), - /* - * Syn contribution verification. - */ - Work::GossipSyncContribution { - message_id, - peer_id, - sync_contribution, - seen_timestamp, - } => worker.process_sync_committee_contribution( - message_id, - peer_id, - *sync_contribution, - seen_timestamp, - ), - /* - * Verification for beacon blocks received during syncing via RPC. - */ - Work::RpcBlock { - block, - seen_timestamp, - process_type, - } => { - worker.process_rpc_block( - *block, - seen_timestamp, - process_type, - work_reprocessing_tx.clone(), - duplicate_cache, - ); - } - /* - * Verification for a chain segment (multiple blocks). - */ - Work::ChainSegment { process_id, blocks } => { - worker.process_chain_segment(process_id, blocks) - } - /* - * Processing of Status Messages. - */ - Work::Status { peer_id, message } => worker.process_status(peer_id, message), - /* - * Processing of range syncing requests from other peers. - */ - Work::BlocksByRangeRequest { - peer_id, - request_id, - request, - } => { - return worker.handle_blocks_by_range_request( - sub_executor, - send_idle_on_drop, - peer_id, - request_id, - request, - ) - } - /* - * Processing of blocks by roots requests from other peers. - */ - Work::BlocksByRootsRequest { - peer_id, - request_id, - request, - } => { - return worker.handle_blocks_by_root_request( - sub_executor, - send_idle_on_drop, - peer_id, - request_id, - request, - ) - } - Work::UnknownBlockAttestation { - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - } => worker.process_gossip_attestation( - message_id, - peer_id, - attestation, - subnet_id, - should_import, - None, // Do not allow this attestation to be re-processed beyond this point. - seen_timestamp, - ), - Work::UnknownBlockAggregate { - message_id, - peer_id, - aggregate, - seen_timestamp, - } => worker.process_gossip_aggregate( - message_id, - peer_id, - aggregate, - None, - seen_timestamp, - ), - }; + ) + .await + }), + /* + * Import for blocks that we received earlier than their intended slot. + */ + Work::DelayedImportBlock { + peer_id, + block, + seen_timestamp, + } => task_spawner.spawn_async(worker.process_gossip_verified_block( + peer_id, + *block, + work_reprocessing_tx, + seen_timestamp, + )), + /* + * Voluntary exits received on gossip. + */ + Work::GossipVoluntaryExit { + message_id, + peer_id, + voluntary_exit, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_voluntary_exit(message_id, peer_id, *voluntary_exit) + }), + /* + * Proposer slashings received on gossip. + */ + Work::GossipProposerSlashing { + message_id, + peer_id, + proposer_slashing, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_proposer_slashing(message_id, peer_id, *proposer_slashing) + }), + /* + * Attester slashings received on gossip. + */ + Work::GossipAttesterSlashing { + message_id, + peer_id, + attester_slashing, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_attester_slashing(message_id, peer_id, *attester_slashing) + }), + /* + * Sync committee message verification. + */ + Work::GossipSyncSignature { + message_id, + peer_id, + sync_signature, + subnet_id, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_sync_committee_signature( + message_id, + peer_id, + *sync_signature, + subnet_id, + seen_timestamp, + ) + }), + /* + * Syn contribution verification. + */ + Work::GossipSyncContribution { + message_id, + peer_id, + sync_contribution, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_sync_committee_contribution( + message_id, + peer_id, + *sync_contribution, + seen_timestamp, + ) + }), + /* + * Verification for beacon blocks received during syncing via RPC. + */ + Work::RpcBlock { + block, + seen_timestamp, + process_type, + } => task_spawner.spawn_async(worker.process_rpc_block( + block, + seen_timestamp, + process_type, + work_reprocessing_tx, + duplicate_cache, + )), + /* + * Verification for a chain segment (multiple blocks). + */ + Work::ChainSegment { process_id, blocks } => task_spawner + .spawn_async(async move { worker.process_chain_segment(process_id, blocks).await }), + /* + * Processing of Status Messages. + */ + Work::Status { peer_id, message } => { + task_spawner.spawn_blocking(move || worker.process_status(peer_id, message)) + } + /* + * Processing of range syncing requests from other peers. + */ + Work::BlocksByRangeRequest { + peer_id, + request_id, + request, + } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { + worker.handle_blocks_by_range_request( + sub_executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + }), + /* + * Processing of blocks by roots requests from other peers. + */ + Work::BlocksByRootsRequest { + peer_id, + request_id, + request, + } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { + worker.handle_blocks_by_root_request( + sub_executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + }), + Work::UnknownBlockAttestation { + message_id, + peer_id, + attestation, + subnet_id, + should_import, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_attestation( + message_id, + peer_id, + attestation, + subnet_id, + should_import, + None, // Do not allow this attestation to be re-processed beyond this point. + seen_timestamp, + ) + }), + Work::UnknownBlockAggregate { + message_id, + peer_id, + aggregate, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_aggregate( + message_id, + peer_id, + aggregate, + None, + seen_timestamp, + ) + }), + }; + } +} - trace!( - log, - "Beacon processor worker done"; - "work" => work_id, - "worker" => worker_id, - ); +/// Spawns tasks that are either: +/// +/// - Blocking (i.e. intensive methods that shouldn't run on the core `tokio` executor) +/// - Async (i.e. `async` methods) +/// +/// Takes a `SendOnDrop` and ensures it is dropped after the task completes. This frees the beacon +/// processor worker so a new task can be started. +struct TaskSpawner { + executor: TaskExecutor, + send_idle_on_drop: SendOnDrop, +} - // This explicit `drop` is used to remind the programmer that this variable must - // not be dropped until the worker is complete. Dropping it early will cause the - // worker to be marked as "free" and cause an over-spawning of workers. - drop(send_idle_on_drop); +impl TaskSpawner { + /// Spawn an async task, dropping the `SendOnDrop` after the task has completed. + fn spawn_async(self, task: impl Future + Send + 'static) { + self.executor.spawn( + async { + task.await; + drop(self.send_idle_on_drop) }, WORKER_TASK_NAME, - ); + ) + } + + /// Spawn a blocking task, dropping the `SendOnDrop` after the task has completed. + fn spawn_blocking(self, task: F) + where + F: FnOnce() + Send + 'static, + { + self.executor.spawn_blocking( + || { + task(); + drop(self.send_idle_on_drop) + }, + WORKER_TASK_NAME, + ) + } + + /// Spawn a blocking task, passing the `SendOnDrop` into the task. + /// + /// ## Notes + /// + /// Users must ensure the `SendOnDrop` is dropped at the appropriate time! + pub fn spawn_blocking_with_manual_send_idle(self, task: F) + where + F: FnOnce(SendOnDrop) + Send + 'static, + { + self.executor.spawn_blocking( + || { + task(self.send_idle_on_drop); + }, + WORKER_TASK_NAME, + ) } } @@ -1618,6 +1675,8 @@ impl BeaconProcessor { /// https://doc.rust-lang.org/std/ops/trait.Drop.html#panics pub struct SendOnDrop { tx: mpsc::Sender<()>, + // The field is unused, but it's here to ensure the timer is dropped once the task has finished. + _worker_timer: Option, log: Logger, } diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index 1c9d323576d..a39ca2ec33e 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -8,7 +8,6 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; -use environment::{null_logger, Environment, EnvironmentBuilder}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, rpc::methods::{MetaData, MetaDataV2}, @@ -20,7 +19,6 @@ use std::cmp; use std::iter::Iterator; use std::sync::Arc; use std::time::Duration; -use tokio::runtime::Handle; use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, MainnetEthSpec, ProposerSlashing, SignedBeaconBlock, @@ -45,7 +43,7 @@ const STANDARD_TIMEOUT: Duration = Duration::from_secs(10); /// Provides utilities for testing the `BeaconProcessor`. struct TestRig { chain: Arc>, - next_block: SignedBeaconBlock, + next_block: Arc>, attestations: Vec<(Attestation, SubnetId)>, next_block_attestations: Vec<(Attestation, SubnetId)>, next_block_aggregate_attestations: Vec>, @@ -56,7 +54,7 @@ struct TestRig { work_journal_rx: mpsc::Receiver<&'static str>, _network_rx: mpsc::UnboundedReceiver>, _sync_rx: mpsc::UnboundedReceiver>, - environment: Option>, + _harness: BeaconChainHarness, } /// This custom drop implementation ensures that we shut down the tokio runtime gracefully. Without @@ -65,12 +63,11 @@ impl Drop for TestRig { fn drop(&mut self) { // Causes the beacon processor to shutdown. self.beacon_processor_tx = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN).0; - self.environment.take().unwrap().shutdown_on_idle(); } } impl TestRig { - pub fn new(chain_length: u64) -> Self { + pub async fn new(chain_length: u64) -> Self { // This allows for testing voluntary exits without building out a massive chain. let mut spec = E::default_spec(); spec.shard_committee_period = 2; @@ -84,16 +81,18 @@ impl TestRig { harness.advance_slot(); for _ in 0..chain_length { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.advance_slot(); } - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!( harness.chain.slot().unwrap(), @@ -101,8 +100,9 @@ impl TestRig { "precondition: current slot is one after head" ); - let (next_block, next_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (next_block, next_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; let head_state_root = head.beacon_state_root(); let attestations = harness @@ -155,11 +155,11 @@ impl TestRig { let proposer_slashing = harness.make_proposer_slashing(2); let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); - let chain = harness.chain; + let chain = harness.chain.clone(); let (network_tx, _network_rx) = mpsc::unbounded_channel(); - let log = null_logger().unwrap(); + let log = harness.logger().clone(); let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); @@ -181,15 +181,7 @@ impl TestRig { &log, )); - let mut environment = EnvironmentBuilder::mainnet() - .null_logger() - .unwrap() - .multi_threaded_tokio_runtime() - .unwrap() - .build() - .unwrap(); - - let executor = environment.core_context().executor; + let executor = harness.runtime.task_executor.clone(); let (work_journal_tx, work_journal_rx) = mpsc::channel(16_364); @@ -208,7 +200,7 @@ impl TestRig { Self { chain, - next_block, + next_block: Arc::new(next_block), attestations, next_block_attestations, next_block_aggregate_attestations, @@ -219,12 +211,16 @@ impl TestRig { work_journal_rx, _network_rx, _sync_rx, - environment: Some(environment), + _harness: harness, } } + pub async fn recompute_head(&self) { + self.chain.recompute_head_at_current_slot().await.unwrap() + } + pub fn head_root(&self) -> Hash256 { - self.chain.head().unwrap().beacon_block_root + self.chain.head_snapshot().beacon_block_root } pub fn enqueue_gossip_block(&self) { @@ -233,7 +229,7 @@ impl TestRig { junk_message_id(), junk_peer_id(), Client::default(), - Box::new(self.next_block.clone()), + self.next_block.clone(), Duration::from_secs(0), )) .unwrap(); @@ -241,7 +237,7 @@ impl TestRig { pub fn enqueue_rpc_block(&self) { let event = WorkEvent::rpc_beacon_block( - Box::new(self.next_block.clone()), + self.next_block.clone(), std::time::Duration::default(), BlockProcessType::ParentLookup { chain_hash: Hash256::random(), @@ -324,28 +320,16 @@ impl TestRig { .unwrap(); } - fn handle(&mut self) -> Handle { - self.environment - .as_mut() - .unwrap() - .core_context() - .executor - .handle() - .unwrap() - } - /// Assert that the `BeaconProcessor` doesn't produce any events in the given `duration`. - pub fn assert_no_events_for(&mut self, duration: Duration) { - self.handle().block_on(async { - tokio::select! { - _ = tokio::time::sleep(duration) => (), - event = self.work_journal_rx.recv() => panic!( - "received {:?} within {:?} when expecting no events", - event, - duration - ), - } - }) + pub async fn assert_no_events_for(&mut self, duration: Duration) { + tokio::select! { + _ = tokio::time::sleep(duration) => (), + event = self.work_journal_rx.recv() => panic!( + "received {:?} within {:?} when expecting no events", + event, + duration + ), + } } /// Checks that the `BeaconProcessor` event journal contains the `expected` events in the given @@ -354,57 +338,54 @@ impl TestRig { /// /// Given the described logic, `expected` must not contain `WORKER_FREED` or `NOTHING_TO_DO` /// events. - pub fn assert_event_journal_contains_ordered(&mut self, expected: &[&str]) { + pub async fn assert_event_journal_contains_ordered(&mut self, expected: &[&str]) { assert!(expected .iter() .all(|ev| ev != &WORKER_FREED && ev != &NOTHING_TO_DO)); - let (events, worker_freed_remaining) = self.handle().block_on(async { - let mut events = Vec::with_capacity(expected.len()); - let mut worker_freed_remaining = expected.len(); - - let drain_future = async { - loop { - match self.work_journal_rx.recv().await { - Some(event) if event == WORKER_FREED => { - worker_freed_remaining -= 1; - if worker_freed_remaining == 0 { - // Break when all expected events are finished. - break; - } - } - Some(event) if event == NOTHING_TO_DO => { - // Ignore these. - } - Some(event) => { - events.push(event); + let mut events = Vec::with_capacity(expected.len()); + let mut worker_freed_remaining = expected.len(); + + let drain_future = async { + loop { + match self.work_journal_rx.recv().await { + Some(event) if event == WORKER_FREED => { + worker_freed_remaining -= 1; + if worker_freed_remaining == 0 { + // Break when all expected events are finished. + break; } - None => break, } + Some(event) if event == NOTHING_TO_DO => { + // Ignore these. + } + Some(event) => { + events.push(event); + } + None => break, } - }; - - // Drain the expected number of events from the channel, or time out and give up. - tokio::select! { - _ = tokio::time::sleep(STANDARD_TIMEOUT) => panic!( - "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?} waiting for {} `WORKER_FREED` events.", - STANDARD_TIMEOUT, - expected, - events, - worker_freed_remaining, - ), - _ = drain_future => {}, } - - (events, worker_freed_remaining) - }); + }; + + // Drain the expected number of events from the channel, or time out and give up. + tokio::select! { + _ = tokio::time::sleep(STANDARD_TIMEOUT) => panic!( + "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?} waiting for {} `WORKER_FREED` events.", + STANDARD_TIMEOUT, + expected, + events, + worker_freed_remaining, + ), + _ = drain_future => {}, + } assert_eq!(events, expected); assert_eq!(worker_freed_remaining, 0); } - pub fn assert_event_journal(&mut self, expected: &[&str]) { - self.assert_event_journal_with_timeout(expected, STANDARD_TIMEOUT); + pub async fn assert_event_journal(&mut self, expected: &[&str]) { + self.assert_event_journal_with_timeout(expected, STANDARD_TIMEOUT) + .await } /// Assert that the `BeaconProcessor` event journal is as `expected`. @@ -413,34 +394,34 @@ impl TestRig { /// /// We won't attempt to listen for any more than `expected.len()` events. As such, it makes sense /// to use the `NOTHING_TO_DO` event to ensure that execution has completed. - pub fn assert_event_journal_with_timeout(&mut self, expected: &[&str], timeout: Duration) { - let events = self.handle().block_on(async { - let mut events = Vec::with_capacity(expected.len()); - - let drain_future = async { - while let Some(event) = self.work_journal_rx.recv().await { - events.push(event); - - // Break as soon as we collect the desired number of events. - if events.len() >= expected.len() { - break; - } + pub async fn assert_event_journal_with_timeout( + &mut self, + expected: &[&str], + timeout: Duration, + ) { + let mut events = Vec::with_capacity(expected.len()); + + let drain_future = async { + while let Some(event) = self.work_journal_rx.recv().await { + events.push(event); + + // Break as soon as we collect the desired number of events. + if events.len() >= expected.len() { + break; } - }; - - // Drain the expected number of events from the channel, or time out and give up. - tokio::select! { - _ = tokio::time::sleep(timeout) => panic!( - "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?}", - timeout, - expected, - events - ), - _ = drain_future => {}, } - - events - }); + }; + + // Drain the expected number of events from the channel, or time out and give up. + tokio::select! { + _ = tokio::time::sleep(timeout) => panic!( + "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?}", + timeout, + expected, + events + ), + _ = drain_future => {}, + } assert_eq!(events, expected); } @@ -455,9 +436,9 @@ fn junk_message_id() -> MessageId { } /// Blocks that arrive early should be queued for later processing. -#[test] -fn import_gossip_block_acceptably_early() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_block_acceptably_early() { + let mut rig = TestRig::new(SMALL_CHAIN).await; let slot_start = rig .chain @@ -477,7 +458,8 @@ fn import_gossip_block_acceptably_early() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; // Note: this section of the code is a bit race-y. We're assuming that we can set the slot clock // and check the head in the time between the block arrived early and when its due for @@ -492,7 +474,8 @@ fn import_gossip_block_acceptably_early() { "block not yet imported" ); - rig.assert_event_journal(&[DELAYED_IMPORT_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[DELAYED_IMPORT_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.head_root(), @@ -502,9 +485,9 @@ fn import_gossip_block_acceptably_early() { } /// Blocks that are *too* early shouldn't get into the delay queue. -#[test] -fn import_gossip_block_unacceptably_early() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_block_unacceptably_early() { + let mut rig = TestRig::new(SMALL_CHAIN).await; let slot_start = rig .chain @@ -524,11 +507,12 @@ fn import_gossip_block_unacceptably_early() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; // Waiting for 5 seconds is a bit arbitrary, however it *should* be long enough to ensure the // block isn't imported. - rig.assert_no_events_for(Duration::from_secs(5)); + rig.assert_no_events_for(Duration::from_secs(5)).await; assert!( rig.head_root() != rig.next_block.canonical_root(), @@ -537,9 +521,9 @@ fn import_gossip_block_unacceptably_early() { } /// Blocks that arrive on-time should be processed normally. -#[test] -fn import_gossip_block_at_current_slot() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_block_at_current_slot() { + let mut rig = TestRig::new(SMALL_CHAIN).await; assert_eq!( rig.chain.slot().unwrap(), @@ -549,7 +533,8 @@ fn import_gossip_block_at_current_slot() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.head_root(), @@ -559,15 +544,16 @@ fn import_gossip_block_at_current_slot() { } /// Ensure a valid attestation can be imported. -#[test] -fn import_gossip_attestation() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_attestation() { + let mut rig = TestRig::new(SMALL_CHAIN).await; let initial_attns = rig.chain.naive_aggregation_pool.read().num_items(); rig.enqueue_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -583,8 +569,8 @@ enum BlockImportMethod { /// Ensure that attestations that reference an unknown block get properly re-queued and /// re-processed upon importing the block. -fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { - let mut rig = TestRig::new(SMALL_CHAIN); +async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Send the attestation but not the block, and check that it was not imported. @@ -592,7 +578,8 @@ fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { rig.enqueue_next_block_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -613,11 +600,12 @@ fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { } }; - rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_ATTESTATION]); + rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_ATTESTATION]) + .await; // Run fork choice, since it isn't run when processing an RPC block. At runtime it is the // responsibility of the sync manager to do this. - rig.chain.fork_choice().unwrap(); + rig.recompute_head().await; assert_eq!( rig.head_root(), @@ -632,20 +620,20 @@ fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { ); } -#[test] -fn attestation_to_unknown_block_processed_after_gossip_block() { - attestation_to_unknown_block_processed(BlockImportMethod::Gossip) +#[tokio::test] +async fn attestation_to_unknown_block_processed_after_gossip_block() { + attestation_to_unknown_block_processed(BlockImportMethod::Gossip).await } -#[test] -fn attestation_to_unknown_block_processed_after_rpc_block() { - attestation_to_unknown_block_processed(BlockImportMethod::Rpc) +#[tokio::test] +async fn attestation_to_unknown_block_processed_after_rpc_block() { + attestation_to_unknown_block_processed(BlockImportMethod::Rpc).await } /// Ensure that attestations that reference an unknown block get properly re-queued and /// re-processed upon importing the block. -fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { - let mut rig = TestRig::new(SMALL_CHAIN); +async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Empty the op pool. rig.chain @@ -659,7 +647,8 @@ fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { rig.enqueue_next_block_aggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_attestations(), @@ -680,11 +669,12 @@ fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { } }; - rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_AGGREGATE]); + rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_AGGREGATE]) + .await; // Run fork choice, since it isn't run when processing an RPC block. At runtime it is the // responsibility of the sync manager to do this. - rig.chain.fork_choice().unwrap(); + rig.recompute_head().await; assert_eq!( rig.head_root(), @@ -699,21 +689,21 @@ fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { ); } -#[test] -fn aggregate_attestation_to_unknown_block_processed_after_gossip_block() { - aggregate_attestation_to_unknown_block(BlockImportMethod::Gossip) +#[tokio::test] +async fn aggregate_attestation_to_unknown_block_processed_after_gossip_block() { + aggregate_attestation_to_unknown_block(BlockImportMethod::Gossip).await } -#[test] -fn aggregate_attestation_to_unknown_block_processed_after_rpc_block() { - aggregate_attestation_to_unknown_block(BlockImportMethod::Rpc) +#[tokio::test] +async fn aggregate_attestation_to_unknown_block_processed_after_rpc_block() { + aggregate_attestation_to_unknown_block(BlockImportMethod::Rpc).await } /// Ensure that attestations that reference an unknown block get properly re-queued and re-processed /// when the block is not seen. -#[test] -fn requeue_unknown_block_gossip_attestation_without_import() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn requeue_unknown_block_gossip_attestation_without_import() { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Send the attestation but not the block, and check that it was not imported. @@ -721,7 +711,8 @@ fn requeue_unknown_block_gossip_attestation_without_import() { rig.enqueue_next_block_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -734,7 +725,8 @@ fn requeue_unknown_block_gossip_attestation_without_import() { rig.assert_event_journal_with_timeout( &[UNKNOWN_BLOCK_ATTESTATION, WORKER_FREED, NOTHING_TO_DO], Duration::from_secs(1) + QUEUED_ATTESTATION_DELAY, - ); + ) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -745,9 +737,9 @@ fn requeue_unknown_block_gossip_attestation_without_import() { /// Ensure that aggregate that reference an unknown block get properly re-queued and re-processed /// when the block is not seen. -#[test] -fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Send the attestation but not the block, and check that it was not imported. @@ -755,7 +747,8 @@ fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { rig.enqueue_next_block_aggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -768,7 +761,8 @@ fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { rig.assert_event_journal_with_timeout( &[UNKNOWN_BLOCK_AGGREGATE, WORKER_FREED, NOTHING_TO_DO], Duration::from_secs(1) + QUEUED_ATTESTATION_DELAY, - ); + ) + .await; assert_eq!( rig.chain.op_pool.num_attestations(), @@ -778,10 +772,10 @@ fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { } /// Ensure a bunch of valid operations can be imported. -#[test] -fn import_misc_gossip_ops() { +#[tokio::test] +async fn import_misc_gossip_ops() { // Exits need the long chain so validators aren't too young to exit. - let mut rig = TestRig::new(LONG_CHAIN); + let mut rig = TestRig::new(LONG_CHAIN).await; /* * Attester slashing @@ -791,7 +785,8 @@ fn import_misc_gossip_ops() { rig.enqueue_gossip_attester_slashing(); - rig.assert_event_journal(&[GOSSIP_ATTESTER_SLASHING, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTER_SLASHING, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_attester_slashings(), @@ -807,7 +802,8 @@ fn import_misc_gossip_ops() { rig.enqueue_gossip_proposer_slashing(); - rig.assert_event_journal(&[GOSSIP_PROPOSER_SLASHING, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_PROPOSER_SLASHING, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_proposer_slashings(), @@ -823,7 +819,8 @@ fn import_misc_gossip_ops() { rig.enqueue_gossip_voluntary_exit(); - rig.assert_event_journal(&[GOSSIP_VOLUNTARY_EXIT, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_VOLUNTARY_EXIT, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_voluntary_exits(), diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index f014af4c555..56f38c7f220 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -13,6 +13,7 @@ use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerI use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; +use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; @@ -636,24 +637,27 @@ impl Worker { /// /// Raises a log if there are errors. #[allow(clippy::too_many_arguments)] - pub fn process_gossip_block( + pub async fn process_gossip_block( self, message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: SignedBeaconBlock, + block: Arc>, reprocess_tx: mpsc::Sender>, duplicate_cache: DuplicateCache, seen_duration: Duration, ) { - if let Some(gossip_verified_block) = self.process_gossip_unverified_block( - message_id, - peer_id, - peer_client, - block, - reprocess_tx.clone(), - seen_duration, - ) { + if let Some(gossip_verified_block) = self + .process_gossip_unverified_block( + message_id, + peer_id, + peer_client, + block, + reprocess_tx.clone(), + seen_duration, + ) + .await + { let block_root = gossip_verified_block.block_root; if let Some(handle) = duplicate_cache.check_and_insert(block_root) { self.process_gossip_verified_block( @@ -661,7 +665,8 @@ impl Worker { gossip_verified_block, reprocess_tx, seen_duration, - ); + ) + .await; // Drop the handle to remove the entry from the cache drop(handle); } else { @@ -678,12 +683,12 @@ impl Worker { /// if it passes gossip propagation criteria, tell the network thread to forward it. /// /// Returns the `GossipVerifiedBlock` if verification passes and raises a log if there are errors. - pub fn process_gossip_unverified_block( + pub async fn process_gossip_unverified_block( &self, message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: SignedBeaconBlock, + block: Arc>, reprocess_tx: mpsc::Sender>, seen_duration: Duration, ) -> Option> { @@ -704,7 +709,7 @@ impl Worker { Some(peer_client.to_string()), ); - let verified_block = match self.chain.verify_block_for_gossip(block) { + let verified_block = match self.chain.clone().verify_block_for_gossip(block).await { Ok(verified_block) => { if block_delay >= self.chain.slot_clock.unagg_attestation_production_delay() { metrics::inc_counter(&metrics::BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL); @@ -887,7 +892,7 @@ impl Worker { /// Process the beacon block that has already passed gossip verification. /// /// Raises a log if there are errors. - pub fn process_gossip_verified_block( + pub async fn process_gossip_verified_block( self, peer_id: PeerId, verified_block: GossipVerifiedBlock, @@ -895,9 +900,9 @@ impl Worker { // This value is not used presently, but it might come in handy for debugging. _seen_duration: Duration, ) { - let block = Box::new(verified_block.block.clone()); + let block: Arc<_> = verified_block.block.clone(); - match self.chain.process_block(verified_block) { + match self.chain.process_block(verified_block).await { Ok(block_root) => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); @@ -913,24 +918,27 @@ impl Worker { ) }; - trace!( + debug!( self.log, "Gossipsub block processed"; + "block" => ?block_root, "peer_id" => %peer_id ); - match self.chain.fork_choice() { - Ok(()) => trace!( - self.log, - "Fork choice success"; - "location" => "block gossip" - ), - Err(e) => error!( + if let Err(e) = self.chain.recompute_head_at_current_slot().await { + error!( self.log, "Fork choice failed"; "error" => ?e, - "location" => "block gossip" - ), + "location" => "block_gossip" + ) + } else { + debug!( + self.log, + "Fork choice success"; + "block" => ?block_root, + "location" => "block_gossip" + ) } } Err(BlockError::ParentUnknown { .. }) => { @@ -1144,13 +1152,9 @@ impl Worker { .read() .register_gossip_attester_slashing(slashing.as_inner()); - if let Err(e) = self.chain.import_attester_slashing(slashing) { - debug!(self.log, "Error importing attester slashing"; "error" => ?e); - metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_ERROR_TOTAL); - } else { - debug!(self.log, "Successfully imported attester slashing"); - metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL); - } + self.chain.import_attester_slashing(slashing); + debug!(self.log, "Successfully imported attester slashing"); + metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL); } /// Process the sync committee signature received from the gossip network and: diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index cf113ca1faf..87d4da2c6dd 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -9,6 +9,7 @@ use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error}; use slot_clock::SlotClock; +use std::sync::Arc; use task_executor::TaskExecutor; use types::{Epoch, EthSpec, Hash256, Slot}; @@ -62,7 +63,7 @@ impl Worker { &self, remote: &StatusMessage, ) -> Result, BeaconChainError> { - let local = self.chain.status_message()?; + let local = self.chain.status_message(); let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); let irrelevant_reason = if local.fork_digest != remote.fork_digest { @@ -143,7 +144,7 @@ impl Worker { Ok(Some(block)) => { self.send_response( peer_id, - Response::BlocksByRoot(Some(Box::new(block))), + Response::BlocksByRoot(Some(block)), request_id, ); send_block_count += 1; @@ -266,7 +267,7 @@ impl Worker { blocks_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, - response: Response::BlocksByRange(Some(Box::new(block))), + response: Response::BlocksByRange(Some(Arc::new(block))), id: request_id, }); } diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 04ed1ff6085..804cfbe4632 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -11,7 +11,8 @@ use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, }; use lighthouse_network::PeerAction; -use slog::{debug, error, info, trace, warn}; +use slog::{debug, error, info, warn}; +use std::sync::Arc; use tokio::sync::mpsc; use types::{Epoch, Hash256, SignedBeaconBlock}; @@ -45,9 +46,9 @@ pub enum FailureMode { impl Worker { /// Attempt to process a block received from a direct RPC request. - pub fn process_rpc_block( + pub async fn process_rpc_block( self, - block: SignedBeaconBlock, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, reprocess_tx: mpsc::Sender>, @@ -66,7 +67,7 @@ impl Worker { } }; let slot = block.slot(); - let result = self.chain.process_block(block); + let result = self.chain.process_block(block).await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); @@ -87,7 +88,8 @@ impl Worker { None, None, ); - self.run_fork_choice() + + self.recompute_head("process_rpc_block").await; } } // Sync handles these results @@ -102,10 +104,10 @@ impl Worker { /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync /// thread if more blocks are needed to process it. - pub fn process_chain_segment( + pub async fn process_chain_segment( &self, sync_type: ChainSegmentProcessId, - downloaded_blocks: Vec>, + downloaded_blocks: Vec>>, ) { let result = match sync_type { // this a request from the range sync @@ -114,7 +116,7 @@ impl Worker { let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); let sent_blocks = downloaded_blocks.len(); - match self.process_blocks(downloaded_blocks.iter()) { + match self.process_blocks(downloaded_blocks.iter()).await { (_, Ok(_)) => { debug!(self.log, "Batch processed"; "batch_epoch" => epoch, @@ -183,7 +185,7 @@ impl Worker { ); // parent blocks are ordered from highest slot to lowest, so we need to process in // reverse - match self.process_blocks(downloaded_blocks.iter().rev()) { + match self.process_blocks(downloaded_blocks.iter().rev()).await { (imported_blocks, Err(e)) => { debug!(self.log, "Parent lookup failed"; "error" => %e.message); BatchProcessResult::Failed { @@ -204,19 +206,17 @@ impl Worker { } /// Helper function to process blocks batches which only consumes the chain and blocks to process. - fn process_blocks<'a>( + async fn process_blocks<'a>( &self, - downloaded_blocks: impl Iterator>, + downloaded_blocks: impl Iterator>>, ) -> (usize, Result<(), ChainSegmentFailed>) { - let blocks = downloaded_blocks.cloned().collect::>(); - match self.chain.process_chain_segment(blocks) { + let blocks: Vec> = downloaded_blocks.cloned().collect(); + match self.chain.process_chain_segment(blocks).await { ChainSegmentResult::Successful { imported_blocks } => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL); if imported_blocks > 0 { - // Batch completed successfully with at least one block, run fork choice. - self.run_fork_choice(); + self.recompute_head("process_blocks_ok").await; } - (imported_blocks, Ok(())) } ChainSegmentResult::Failed { @@ -226,7 +226,7 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_FAILED_TOTAL); let r = self.handle_failed_chain_segment(error); if imported_blocks > 0 { - self.run_fork_choice(); + self.recompute_head("process_blocks_err").await; } (imported_blocks, r) } @@ -236,9 +236,13 @@ impl Worker { /// Helper function to process backfill block batches which only consumes the chain and blocks to process. fn process_backfill_blocks( &self, - blocks: Vec>, + blocks: Vec>>, ) -> (usize, Result<(), ChainSegmentFailed>) { - let blinded_blocks = blocks.into_iter().map(Into::into).collect(); + let blinded_blocks = blocks + .iter() + .map(|full_block| full_block.clone_as_blinded()) + .map(Arc::new) + .collect(); match self.chain.import_historical_block_batch(blinded_blocks) { Ok(imported_blocks) => { metrics::inc_counter( @@ -357,18 +361,18 @@ impl Worker { /// Runs fork-choice on a given chain. This is used during block processing after one successful /// block import. - fn run_fork_choice(&self) { - match self.chain.fork_choice() { - Ok(()) => trace!( + async fn recompute_head(&self, location: &str) { + match self.chain.recompute_head_at_current_slot().await { + Ok(()) => debug!( self.log, "Fork choice success"; - "location" => "batch processing" + "location" => location ), Err(e) => error!( self.log, "Fork choice failed"; "error" => ?e, - "location" => "batch import error" + "location" => location ), } } diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index cc0165131c2..3605b94acf4 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -143,10 +143,6 @@ lazy_static! { "beacon_processor_attester_slashing_imported_total", "Total number of attester slashings imported to the op pool." ); - pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_ERROR_TOTAL: Result = try_create_int_counter( - "beacon_processor_attester_slashing_error_total", - "Total number of attester slashings that raised an error during processing." - ); // Rpc blocks. pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( "beacon_processor_rpc_block_queue_total", diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index b8db9c17f83..9d86c3e55a6 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -2,9 +2,10 @@ use crate::beacon_processor::{ BeaconProcessor, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN, }; use crate::service::{NetworkMessage, RequestId}; +use crate::status::status_message; use crate::sync::manager::RequestId as SyncId; use crate::sync::SyncMessage; -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::*; use lighthouse_network::{ Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response, @@ -114,11 +115,10 @@ impl Processor { /// Called when we first connect to a peer, or when the PeerManager determines we need to /// re-status. pub fn send_status(&mut self, peer_id: PeerId) { - if let Ok(status_message) = status_message(&self.chain) { - debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); - self.network - .send_processor_request(peer_id, Request::Status(status_message)); - } + let status_message = status_message(&self.chain); + debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); + self.network + .send_processor_request(peer_id, Request::Status(status_message)); } /// Handle a `Status` request. @@ -132,12 +132,12 @@ impl Processor { ) { debug!(self.log, "Received Status Request"; "peer_id" => %peer_id, &status); - // ignore status responses if we are shutting down - if let Ok(status_message) = status_message(&self.chain) { - // Say status back. - self.network - .send_response(peer_id, Response::Status(status_message), request_id); - } + // Say status back. + self.network.send_response( + peer_id, + Response::Status(status_message(&self.chain)), + request_id, + ); self.send_beacon_processor_work(BeaconWorkEvent::status_message(peer_id, status)) } @@ -178,7 +178,7 @@ impl Processor { &mut self, peer_id: PeerId, request_id: RequestId, - beacon_block: Option>>, + beacon_block: Option>>, ) { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { @@ -209,7 +209,7 @@ impl Processor { &mut self, peer_id: PeerId, request_id: RequestId, - beacon_block: Option>>, + beacon_block: Option>>, ) { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { @@ -244,7 +244,7 @@ impl Processor { message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: Box>, + block: Arc>, ) { self.send_beacon_processor_work(BeaconWorkEvent::gossip_beacon_block( message_id, @@ -370,22 +370,6 @@ impl Processor { } } -/// Build a `StatusMessage` representing the state of the given `beacon_chain`. -pub(crate) fn status_message( - beacon_chain: &BeaconChain, -) -> Result { - let head_info = beacon_chain.head_info()?; - let fork_digest = beacon_chain.enr_fork_id().fork_digest; - - Ok(StatusMessage { - fork_digest, - finalized_root: head_info.finalized_checkpoint.root, - finalized_epoch: head_info.finalized_checkpoint.epoch, - head_root: head_info.block_root, - head_slot: head_info.slot, - }) -} - /// Wraps a Network Channel to employ various RPC related network functionality for the /// processor. #[derive(Clone)] diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a8995de2e5f..c21183608aa 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -7,7 +7,7 @@ use crate::{ subnet_service::{AttestationService, SubnetServiceMessage}, NetworkConfig, }; -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; @@ -30,8 +30,8 @@ use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; use types::{ - ChainSpec, EthSpec, ForkContext, RelativeEpoch, Slot, SubnetId, SyncCommitteeSubscription, - SyncSubnetId, Unsigned, ValidatorSubscription, + ChainSpec, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, + Unsigned, ValidatorSubscription, }; mod tests; @@ -706,29 +706,12 @@ impl NetworkService { fn update_gossipsub_parameters(&mut self) { if let Ok(slot) = self.beacon_chain.slot() { - if let Some(active_validators) = self + let active_validators_opt = self .beacon_chain - .with_head(|head| { - Ok::<_, BeaconChainError>( - head.beacon_state - .get_cached_active_validator_indices(RelativeEpoch::Current) - .map(|indices| indices.len()) - .ok() - .or_else(|| { - // if active validator cached was not build we count the - // active validators - self.beacon_chain.epoch().ok().map(|current_epoch| { - head.beacon_state - .validators() - .iter() - .filter(|validator| validator.is_active_at(current_epoch)) - .count() - }) - }), - ) - }) - .unwrap_or(None) - { + .canonical_head + .cached_head() + .active_validator_count(); + if let Some(active_validators) = active_validators_opt { if self .libp2p .swarm @@ -742,6 +725,14 @@ impl NetworkService { "active_validators" => active_validators ); } + } else { + // This scenario will only happen if the caches on the cached canonical head aren't + // built. That should never be the case. + error!( + self.log, + "Active validator count unavailable"; + "info" => "please report this bug" + ); } } } diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index ade490e00ec..865f8ee933f 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -1,4 +1,5 @@ -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use types::{EthSpec, Hash256}; use lighthouse_network::rpc::StatusMessage; /// Trait to produce a `StatusMessage` representing the state of the given `beacon_chain`. @@ -6,20 +7,33 @@ use lighthouse_network::rpc::StatusMessage; /// NOTE: The purpose of this is simply to obtain a `StatusMessage` from the `BeaconChain` without /// polluting/coupling the type with RPC concepts. pub trait ToStatusMessage { - fn status_message(&self) -> Result; + fn status_message(&self) -> StatusMessage; } impl ToStatusMessage for BeaconChain { - fn status_message(&self) -> Result { - let head_info = self.head_info()?; - let fork_digest = self.enr_fork_id().fork_digest; + fn status_message(&self) -> StatusMessage { + status_message(self) + } +} + +/// Build a `StatusMessage` representing the state of the given `beacon_chain`. +pub(crate) fn status_message(beacon_chain: &BeaconChain) -> StatusMessage { + let fork_digest = beacon_chain.enr_fork_id().fork_digest; + let cached_head = beacon_chain.canonical_head.cached_head(); + let mut finalized_checkpoint = cached_head.finalized_checkpoint(); + + // Alias the genesis checkpoint root to `0x00`. + let spec = &beacon_chain.spec; + let genesis_epoch = spec.genesis_slot.epoch(T::EthSpec::slots_per_epoch()); + if finalized_checkpoint.epoch == genesis_epoch { + finalized_checkpoint.root = Hash256::zero(); + } - Ok(StatusMessage { - fork_digest, - finalized_root: head_info.finalized_checkpoint.root, - finalized_epoch: head_info.finalized_checkpoint.epoch, - head_root: head_info.block_root, - head_slot: head_info.slot, - }) + StatusMessage { + fork_digest, + finalized_root: finalized_checkpoint.root, + finalized_epoch: finalized_checkpoint.epoch, + head_root: cached_head.head_block_root(), + head_slot: cached_head.head_slot(), } } diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 581f6b32702..778eb63263c 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -15,6 +15,7 @@ use std::sync::Arc; use std::time::{Duration, SystemTime}; use store::config::StoreConfig; use store::{HotColdDB, MemoryStore}; +use task_executor::test_utils::TestRuntime; use types::{ CommitteeIndex, Epoch, EthSpec, Hash256, MainnetEthSpec, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, @@ -32,6 +33,7 @@ type TestBeaconChainType = Witness< pub struct TestBeaconChain { chain: Arc>, + _test_runtime: TestRuntime, } impl TestBeaconChain { @@ -46,11 +48,14 @@ impl TestBeaconChain { let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let test_runtime = TestRuntime::default(); + let chain = Arc::new( BeaconChainBuilder::new(MainnetEthSpec) .logger(log.clone()) .custom_spec(spec.clone()) .store(Arc::new(store)) + .task_executor(test_runtime.task_executor.clone()) .genesis_state( interop_genesis_state::( &keypairs, @@ -74,7 +79,10 @@ impl TestBeaconChain { .build() .expect("should build"), ); - Self { chain } + Self { + chain, + _test_runtime: test_runtime, + } } } diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index d6bb802a212..7ff640065ad 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -53,7 +53,7 @@ impl BatchConfig for BackFillBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } - fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64 { + fn batch_attempt_hash(blocks: &[Arc>]) -> u64 { use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; let mut hasher = DefaultHasher::new(); @@ -392,7 +392,7 @@ impl BackFillSync { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>, + beacon_block: Option>>, ) -> Result { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 2770171be9a..99df8e4a66c 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -7,6 +7,7 @@ use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; use slog::{crit, debug, error, trace, warn, Logger}; use smallvec::SmallVec; +use std::sync::Arc; use store::{Hash256, SignedBeaconBlock}; use tokio::sync::mpsc; @@ -105,7 +106,7 @@ impl BlockLookups { pub fn search_parent( &mut self, - block: Box>, + block: Arc>, peer_id: PeerId, cx: &mut SyncNetworkContext, ) { @@ -129,7 +130,7 @@ impl BlockLookups { return; } - let parent_lookup = ParentLookup::new(*block, peer_id); + let parent_lookup = ParentLookup::new(block, peer_id); self.request_parent(parent_lookup, cx); } @@ -139,7 +140,7 @@ impl BlockLookups { &mut self, id: Id, peer_id: PeerId, - block: Option>>, + block: Option>>, seen_timestamp: Duration, cx: &mut SyncNetworkContext, ) { @@ -203,7 +204,7 @@ impl BlockLookups { &mut self, id: Id, peer_id: PeerId, - block: Option>>, + block: Option>>, seen_timestamp: Duration, cx: &mut SyncNetworkContext, ) { @@ -496,7 +497,7 @@ impl BlockLookups { Err(BlockError::ParentUnknown(block)) => { // need to keep looking for parents // add the block back to the queue and continue the search - parent_lookup.add_block(*block); + parent_lookup.add_block(block); self.request_parent(parent_lookup, cx); } Ok(_) | Err(BlockError::BlockIsAlreadyKnown { .. }) => { @@ -618,7 +619,7 @@ impl BlockLookups { fn send_block_for_processing( &mut self, - block: Box>, + block: Arc>, duration: Duration, process_type: BlockProcessType, ) -> Result<(), ()> { diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index a9a3c34bc05..62503353ade 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,4 +1,5 @@ use lighthouse_network::PeerId; +use std::sync::Arc; use store::{EthSpec, Hash256, SignedBeaconBlock}; use strum::IntoStaticStr; @@ -21,7 +22,7 @@ pub(crate) struct ParentLookup { /// The root of the block triggering this parent request. chain_hash: Hash256, /// The blocks that have currently been downloaded. - downloaded_blocks: Vec>, + downloaded_blocks: Vec>>, /// Request of the last parent. current_parent_request: SingleBlockRequest, /// Id of the last parent request. @@ -48,10 +49,10 @@ impl ParentLookup { pub fn contains_block(&self, block: &SignedBeaconBlock) -> bool { self.downloaded_blocks .iter() - .any(|d_block| d_block == block) + .any(|d_block| d_block.as_ref() == block) } - pub fn new(block: SignedBeaconBlock, peer_id: PeerId) -> Self { + pub fn new(block: Arc>, peer_id: PeerId) -> Self { let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id); Self { @@ -86,7 +87,7 @@ impl ParentLookup { self.current_parent_request.check_peer_disconnected(peer_id) } - pub fn add_block(&mut self, block: SignedBeaconBlock) { + pub fn add_block(&mut self, block: Arc>) { let next_parent = block.parent_root(); self.downloaded_blocks.push(block); self.current_parent_request.hash = next_parent; @@ -108,7 +109,7 @@ impl ParentLookup { self.current_parent_request_id = None; } - pub fn chain_blocks(&mut self) -> Vec> { + pub fn chain_blocks(&mut self) -> Vec>> { std::mem::take(&mut self.downloaded_blocks) } @@ -116,9 +117,9 @@ impl ParentLookup { /// the processing result of the block. pub fn verify_block( &mut self, - block: Option>>, + block: Option>>, failed_chains: &mut lru_cache::LRUTimeCache, - ) -> Result>>, VerifyError> { + ) -> Result>>, VerifyError> { let block = self.current_parent_request.verify_block(block)?; // check if the parent of this block isn't in the failed cache. If it is, this chain should diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 347a4ae4377..debf3de8dba 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -1,4 +1,5 @@ use std::collections::HashSet; +use std::sync::Arc; use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; use rand::seq::IteratorRandom; @@ -82,8 +83,8 @@ impl SingleBlockRequest { /// Returns the block for processing if the response is what we expected. pub fn verify_block( &mut self, - block: Option>>, - ) -> Result>>, VerifyError> { + block: Option>>, + ) -> Result>>, VerifyError> { match self.state { State::AwaitingDownload => { self.register_failure(); @@ -195,7 +196,7 @@ mod tests { let mut sl = SingleBlockRequest::<4>::new(block.canonical_root(), peer_id); sl.request_block().unwrap(); - sl.verify_block(Some(Box::new(block))).unwrap().unwrap(); + sl.verify_block(Some(Arc::new(block))).unwrap().unwrap(); } #[test] diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index dde7d49953a..e9c8ac8ca74 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -158,7 +158,7 @@ fn test_single_block_lookup_happy_path() { // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. - bl.single_block_lookup_response(id, peer_id, Some(Box::new(block)), D, &mut cx); + bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block)), D, &mut cx); rig.expect_empty_network(); rig.expect_block_process(); @@ -204,7 +204,7 @@ fn test_single_block_lookup_wrong_response() { // Peer sends something else. It should be penalized. let bad_block = rig.rand_block(); - bl.single_block_lookup_response(id, peer_id, Some(Box::new(bad_block)), D, &mut cx); + bl.single_block_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); rig.expect_penalty(); rig.expect_block_request(); // should be retried @@ -243,7 +243,7 @@ fn test_single_block_lookup_becomes_parent_request() { // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. - bl.single_block_lookup_response(id, peer_id, Some(Box::new(block.clone())), D, &mut cx); + bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block.clone())), D, &mut cx); rig.expect_empty_network(); rig.expect_block_process(); @@ -252,7 +252,7 @@ fn test_single_block_lookup_becomes_parent_request() { // Send the stream termination. Peer should have not been penalized, and the request moved to a // parent request after processing. - bl.single_block_processed(id, Err(BlockError::ParentUnknown(Box::new(block))), &mut cx); + bl.single_block_processed(id, Err(BlockError::ParentUnknown(Arc::new(block))), &mut cx); assert_eq!(bl.single_block_lookups.len(), 0); rig.expect_parent_request(); rig.expect_empty_network(); @@ -269,11 +269,11 @@ fn test_parent_lookup_happy_path() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id = rig.expect_parent_request(); // Peer sends the right block, it should be sent for processing. Peer should not be penalized. - bl.parent_lookup_response(id, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); rig.expect_empty_network(); @@ -294,12 +294,12 @@ fn test_parent_lookup_wrong_response() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id1 = rig.expect_parent_request(); // Peer sends the wrong block, peer should be penalized and the block re-requested. let bad_block = rig.rand_block(); - bl.parent_lookup_response(id1, peer_id, Some(Box::new(bad_block)), D, &mut cx); + bl.parent_lookup_response(id1, peer_id, Some(Arc::new(bad_block)), D, &mut cx); rig.expect_penalty(); let id2 = rig.expect_parent_request(); @@ -308,7 +308,7 @@ fn test_parent_lookup_wrong_response() { rig.expect_empty_network(); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. @@ -328,7 +328,7 @@ fn test_parent_lookup_empty_response() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id1 = rig.expect_parent_request(); // Peer sends an empty response, peer should be penalized and the block re-requested. @@ -337,7 +337,7 @@ fn test_parent_lookup_empty_response() { let id2 = rig.expect_parent_request(); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. @@ -357,7 +357,7 @@ fn test_parent_lookup_rpc_failure() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id1 = rig.expect_parent_request(); // The request fails. It should be tried again. @@ -365,7 +365,7 @@ fn test_parent_lookup_rpc_failure() { let id2 = rig.expect_parent_request(); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. @@ -385,7 +385,7 @@ fn test_parent_lookup_too_many_attempts() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE + 1 { let id = rig.expect_parent_request(); match i % 2 { @@ -397,7 +397,7 @@ fn test_parent_lookup_too_many_attempts() { _ => { // Send a bad block this time. It should be tried again. let bad_block = rig.rand_block(); - bl.parent_lookup_response(id, peer_id, Some(Box::new(bad_block)), D, &mut cx); + bl.parent_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); rig.expect_penalty(); } } @@ -427,12 +427,12 @@ fn test_parent_lookup_too_deep() { let peer_id = PeerId::random(); let trigger_block = blocks.pop().unwrap(); let chain_hash = trigger_block.canonical_root(); - bl.search_parent(Box::new(trigger_block), peer_id, &mut cx); + bl.search_parent(Arc::new(trigger_block), peer_id, &mut cx); for block in blocks.into_iter().rev() { let id = rig.expect_parent_request(); // the block - bl.parent_lookup_response(id, peer_id, Some(Box::new(block.clone())), D, &mut cx); + bl.parent_lookup_response(id, peer_id, Some(Arc::new(block.clone())), D, &mut cx); // the stream termination bl.parent_lookup_response(id, peer_id, None, D, &mut cx); // the processing request @@ -440,7 +440,7 @@ fn test_parent_lookup_too_deep() { // the processing result bl.parent_block_processed( chain_hash, - Err(BlockError::ParentUnknown(Box::new(block))), + Err(BlockError::ParentUnknown(Arc::new(block))), &mut cx, ) } @@ -454,7 +454,7 @@ fn test_parent_lookup_disconnection() { let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let peer_id = PeerId::random(); let trigger_block = rig.rand_block(); - bl.search_parent(Box::new(trigger_block), peer_id, &mut cx); + bl.search_parent(Arc::new(trigger_block), peer_id, &mut cx); bl.peer_disconnected(&peer_id, &mut cx); assert!(bl.parent_queue.is_empty()); } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 311fbf67c48..3e442566557 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -88,12 +88,12 @@ pub enum SyncMessage { RpcBlock { request_id: RequestId, peer_id: PeerId, - beacon_block: Option>>, + beacon_block: Option>>, seen_timestamp: Duration, }, /// A block with an unknown parent has been received. - UnknownBlock(PeerId, Box>), + UnknownBlock(PeerId, Arc>), /// A peer has sent an object that references a block that is unknown. This triggers the /// manager to attempt to find the block matching the unknown hash. @@ -229,17 +229,12 @@ impl SyncManager { /// ours that we consider it fully sync'd with respect to our current chain. fn add_peer(&mut self, peer_id: PeerId, remote: SyncInfo) { // ensure the beacon chain still exists - let local = match self.chain.status_message() { - Ok(status) => SyncInfo { - head_slot: status.head_slot, - head_root: status.head_root, - finalized_epoch: status.finalized_epoch, - finalized_root: status.finalized_root, - }, - Err(e) => { - return error!(self.log, "Failed to get peer sync info"; - "msg" => "likely due to head lock contention", "err" => ?e) - } + let status = self.chain.status_message(); + let local = SyncInfo { + head_slot: status.head_slot, + head_root: status.head_root, + finalized_epoch: status.finalized_epoch, + finalized_root: status.finalized_root, }; let sync_type = remote_sync_type(&local, &remote, &self.chain); @@ -379,7 +374,7 @@ impl SyncManager { // advanced and will produce a head chain on re-status. Otherwise it will shift // to being synced let mut sync_state = { - let head = self.chain.best_slot().unwrap_or_else(|_| Slot::new(0)); + let head = self.chain.best_slot(); let current_slot = self.chain.slot().unwrap_or_else(|_| Slot::new(0)); let peers = self.network_globals.peers.read(); @@ -482,11 +477,7 @@ impl SyncManager { SyncMessage::UnknownBlock(peer_id, block) => { // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore if !self.network_globals.sync_state.read().is_synced() { - let head_slot = self - .chain - .head_info() - .map(|info| info.slot) - .unwrap_or_else(|_| Slot::from(0u64)); + let head_slot = self.chain.canonical_head.cached_head().head_slot(); let unknown_block_slot = block.slot(); // if the block is far in the future, ignore it. If its within the slot tolerance of @@ -571,7 +562,7 @@ impl SyncManager { &mut self, request_id: RequestId, peer_id: PeerId, - beacon_block: Option>>, + beacon_block: Option>>, seen_timestamp: Duration, ) { match request_id { @@ -599,7 +590,7 @@ impl SyncManager { batch_id, &peer_id, id, - beacon_block.map(|b| *b), + beacon_block, ) { Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), Ok(ProcessResult::Successful) => {} @@ -621,7 +612,7 @@ impl SyncManager { chain_id, batch_id, id, - beacon_block.map(|b| *b), + beacon_block, ); self.update_sync_state(); } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 96bdc533f8d..ffbd1a64da0 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -65,27 +65,26 @@ impl SyncNetworkContext { chain: &C, peers: impl Iterator, ) { - if let Ok(status_message) = chain.status_message() { - for peer_id in peers { - debug!( - self.log, - "Sending Status Request"; - "peer" => %peer_id, - "fork_digest" => ?status_message.fork_digest, - "finalized_root" => ?status_message.finalized_root, - "finalized_epoch" => ?status_message.finalized_epoch, - "head_root" => %status_message.head_root, - "head_slot" => %status_message.head_slot, - ); + let status_message = chain.status_message(); + for peer_id in peers { + debug!( + self.log, + "Sending Status Request"; + "peer" => %peer_id, + "fork_digest" => ?status_message.fork_digest, + "finalized_root" => ?status_message.finalized_root, + "finalized_epoch" => ?status_message.finalized_epoch, + "head_root" => %status_message.head_root, + "head_slot" => %status_message.head_slot, + ); - let request = Request::Status(status_message.clone()); - let request_id = RequestId::Router; - let _ = self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request, - request_id, - }); - } + let request = Request::Status(status_message.clone()); + let request_id = RequestId::Router; + let _ = self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request, + request_id, + }); } } diff --git a/beacon_node/network/src/sync/peer_sync_info.rs b/beacon_node/network/src/sync/peer_sync_info.rs index ed3f07763cd..c01366f1be9 100644 --- a/beacon_node/network/src/sync/peer_sync_info.rs +++ b/beacon_node/network/src/sync/peer_sync_info.rs @@ -59,7 +59,7 @@ pub fn remote_sync_type( if remote.head_slot < near_range_start { PeerSyncType::Behind } else if remote.head_slot > near_range_end - && !chain.fork_choice.read().contains_block(&remote.head_root) + && !chain.block_is_known_to_fork_choice(&remote.head_root) { // This peer has a head ahead enough of ours and we have no knowledge of their best // block. @@ -74,7 +74,7 @@ pub fn remote_sync_type( if (local.finalized_epoch + 1 == remote.finalized_epoch && near_range_start <= remote.head_slot && remote.head_slot <= near_range_end) - || chain.fork_choice.read().contains_block(&remote.head_root) + || chain.block_is_known_to_fork_choice(&remote.head_root) { // This peer is near enough to us to be considered synced, or // we have already synced up to this peer's head diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 447f0bd11cc..c642d81db8c 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -4,6 +4,7 @@ use lighthouse_network::PeerId; use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::ops::Sub; +use std::sync::Arc; use types::{Epoch, EthSpec, SignedBeaconBlock, Slot}; /// The number of times to retry a batch before it is considered failed. @@ -46,7 +47,7 @@ pub trait BatchConfig { /// Note that simpler hashing functions considered in the past (hash of first block, hash of last /// block, number of received blocks) are not good enough to differentiate attempts. For this /// reason, we hash the complete set of blocks both in RangeSync and BackFillSync. - fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64; + fn batch_attempt_hash(blocks: &[Arc>]) -> u64; } pub struct RangeSyncBatchConfig {} @@ -58,7 +59,7 @@ impl BatchConfig for RangeSyncBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } - fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64 { + fn batch_attempt_hash(blocks: &[Arc>]) -> u64 { let mut hasher = std::collections::hash_map::DefaultHasher::new(); blocks.hash(&mut hasher); hasher.finish() @@ -100,9 +101,9 @@ pub enum BatchState { /// The batch has failed either downloading or processing, but can be requested again. AwaitingDownload, /// The batch is being downloaded. - Downloading(PeerId, Vec>, Id), + Downloading(PeerId, Vec>>, Id), /// The batch has been completely downloaded and is ready for processing. - AwaitingProcessing(PeerId, Vec>), + AwaitingProcessing(PeerId, Vec>>), /// The batch is being processed. Processing(Attempt), /// The batch was successfully processed and is waiting to be validated. @@ -211,7 +212,7 @@ impl BatchInfo { } /// Adds a block to a downloading batch. - pub fn add_block(&mut self, block: SignedBeaconBlock) -> Result<(), WrongState> { + pub fn add_block(&mut self, block: Arc>) -> Result<(), WrongState> { match self.state.poison() { BatchState::Downloading(peer, mut blocks, req_id) => { blocks.push(block); @@ -337,7 +338,7 @@ impl BatchInfo { } } - pub fn start_processing(&mut self) -> Result>, WrongState> { + pub fn start_processing(&mut self) -> Result>>, WrongState> { match self.state.poison() { BatchState::AwaitingProcessing(peer, blocks) => { self.state = BatchState::Processing(Attempt::new::(peer, &blocks)); @@ -436,7 +437,10 @@ pub struct Attempt { } impl Attempt { - fn new(peer_id: PeerId, blocks: &[SignedBeaconBlock]) -> Self { + fn new( + peer_id: PeerId, + blocks: &[Arc>], + ) -> Self { let hash = B::batch_attempt_hash(blocks); Attempt { peer_id, hash } } diff --git a/beacon_node/network/src/sync/range_sync/block_storage.rs b/beacon_node/network/src/sync/range_sync/block_storage.rs index 5f8033bc51e..df49543a6b6 100644 --- a/beacon_node/network/src/sync/range_sync/block_storage.rs +++ b/beacon_node/network/src/sync/range_sync/block_storage.rs @@ -8,6 +8,6 @@ pub trait BlockStorage { impl BlockStorage for BeaconChain { fn is_block_known(&self, block_root: &Hash256) -> bool { - self.fork_choice.read().contains_block(block_root) + self.block_is_known_to_fork_choice(block_root) } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 0f5d63ea6d1..ef5ba23e665 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -9,6 +9,7 @@ use rand::seq::SliceRandom; use slog::{crit, debug, o, warn}; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::hash::{Hash, Hasher}; +use std::sync::Arc; use tokio::sync::mpsc::Sender; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -216,7 +217,7 @@ impl SyncingChain { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>, + beacon_block: Option>>, ) -> ProcessingResult { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 9953df81d09..f08f8eb82a5 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -53,7 +53,7 @@ use lighthouse_network::rpc::GoodbyeReason; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; use lru_cache::LRUTimeCache; -use slog::{crit, debug, error, trace, warn}; +use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::mpsc; @@ -221,7 +221,7 @@ where chain_id: ChainId, batch_id: BatchId, request_id: Id, - beacon_block: Option>, + beacon_block: Option>>, ) { // check if this chunk removes the chain match self.chains.call_by_id(chain_id, |chain| { @@ -365,17 +365,12 @@ where network.status_peers(self.beacon_chain.as_ref(), chain.peers()); - let local = match self.beacon_chain.status_message() { - Ok(status) => SyncInfo { - head_slot: status.head_slot, - head_root: status.head_root, - finalized_epoch: status.finalized_epoch, - finalized_root: status.finalized_root, - }, - Err(e) => { - return error!(self.log, "Failed to get peer sync info"; - "msg" => "likely due to head lock contention", "err" => ?e) - } + let status = self.beacon_chain.status_message(); + let local = SyncInfo { + head_slot: status.head_slot, + head_root: status.head_root, + finalized_epoch: status.finalized_epoch, + finalized_root: status.finalized_root, }; // update the state of the collection @@ -447,8 +442,8 @@ mod tests { } impl ToStatusMessage for FakeStorage { - fn status_message(&self) -> Result { - Ok(self.status.read().clone()) + fn status_message(&self) -> StatusMessage { + self.status.read().clone() } } diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 84d23a45626..6b8b8eb145b 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -21,3 +21,4 @@ store = { path = "../store" } [dev-dependencies] beacon_chain = { path = "../beacon_chain" } +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 70eb31cd0fb..771dca12f69 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -710,7 +710,7 @@ mod release_tests { } /// Test state for sync contribution-related tests. - fn sync_contribution_test_state( + async fn sync_contribution_test_state( num_committees: usize, ) -> (BeaconChainHarness>, ChainSpec) { let mut spec = E::default_spec(); @@ -722,12 +722,14 @@ mod release_tests { let harness = get_harness::(num_validators, Some(spec.clone())); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - &[Slot::new(1)], - (0..num_validators).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + &[Slot::new(1)], + (0..num_validators).collect::>().as_slice(), + ) + .await; (harness, spec) } @@ -1454,9 +1456,9 @@ mod release_tests { } /// End-to-end test of basic sync contribution handling. - #[test] - fn sync_contribution_aggregation_insert_get_prune() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_aggregation_insert_get_prune() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); @@ -1514,9 +1516,9 @@ mod release_tests { } /// Adding a sync contribution already in the pool should not increase the size of the pool. - #[test] - fn sync_contribution_duplicate() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_duplicate() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); @@ -1551,9 +1553,9 @@ mod release_tests { /// Adding a sync contribution already in the pool with more bits set should increase the /// number of bits set in the aggregate. - #[test] - fn sync_contribution_with_more_bits() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_with_more_bits() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); @@ -1631,9 +1633,9 @@ mod release_tests { /// Adding a sync contribution already in the pool with fewer bits set should not increase the /// number of bits set in the aggregate. - #[test] - fn sync_contribution_with_fewer_bits() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_with_fewer_bits() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index fe66a176b67..e66cee6fdeb 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -669,7 +669,11 @@ impl, Cold: ItemStore> HotColdDB for op in batch { match op { StoreOp::PutBlock(block_root, block) => { - self.block_as_kv_store_ops(&block_root, *block, &mut key_value_batch)?; + self.block_as_kv_store_ops( + &block_root, + block.as_ref().clone(), + &mut key_value_batch, + )?; } StoreOp::PutState(state_root, state) => { diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 613c2e416ca..364bda2cc40 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -39,6 +39,7 @@ pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer; pub use metadata::AnchorInfo; pub use metrics::scrape_for_metrics; use parking_lot::MutexGuard; +use std::sync::Arc; use strum::{EnumString, IntoStaticStr}; pub use types::*; @@ -152,7 +153,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Reified key-value storage operation. Helps in modifying the storage atomically. /// See also https://github.com/sigp/lighthouse/issues/692 pub enum StoreOp<'a, E: EthSpec> { - PutBlock(Hash256, Box>), + PutBlock(Hash256, Arc>), PutState(Hash256, &'a BeaconState), PutStateSummary(Hash256, HotStateSummary), PutStateTemporaryFlag(Hash256), diff --git a/beacon_node/timer/src/lib.rs b/beacon_node/timer/src/lib.rs index bf2acaf5bb5..944846c863b 100644 --- a/beacon_node/timer/src/lib.rs +++ b/beacon_node/timer/src/lib.rs @@ -3,7 +3,7 @@ //! This service allows task execution on the beacon node for various functionality. use beacon_chain::{BeaconChain, BeaconChainTypes}; -use slog::{debug, info, warn}; +use slog::{info, warn}; use slot_clock::SlotClock; use std::sync::Arc; use tokio::time::sleep; @@ -13,11 +13,8 @@ pub fn spawn_timer( executor: task_executor::TaskExecutor, beacon_chain: Arc>, ) -> Result<(), &'static str> { - let log = executor.log(); - let per_slot_executor = executor.clone(); - + let log = executor.log().clone(); let timer_future = async move { - let log = per_slot_executor.log().clone(); loop { let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() { Some(duration) => duration, @@ -28,31 +25,12 @@ pub fn spawn_timer( }; sleep(duration_to_next_slot).await; - - let chain = beacon_chain.clone(); - if let Some(handle) = per_slot_executor - .spawn_blocking_handle(move || chain.per_slot_task(), "timer_per_slot_task") - { - if let Err(e) = handle.await { - warn!( - log, - "Per slot task failed"; - "info" => ?e - ); - } - } else { - debug!( - log, - "Per slot task timer stopped"; - "info" => "shutting down" - ); - break; - } + beacon_chain.per_slot_task().await; } }; executor.spawn(timer_future, "timer"); - info!(log, "Timer service started"); + info!(executor.log(), "Timer service started"); Ok(()) } diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index f344dc47354..08bb565870d 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2021" [dependencies] -tokio = { version = "1.14.0", features = ["rt-multi-thread"] } +tokio = { version = "1.14.0", features = ["rt-multi-thread", "macros"] } slog = "2.5.2" futures = "0.3.7" exit-future = "0.2.0" diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index dd525bea504..6bf4cc8e083 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -7,6 +7,8 @@ use slog::{crit, debug, o, trace}; use std::sync::Weak; use tokio::runtime::{Handle, Runtime}; +pub use tokio::task::JoinHandle; + /// Provides a reason when Lighthouse is shut down. #[derive(Copy, Clone, Debug, PartialEq)] pub enum ShutdownReason { @@ -312,6 +314,61 @@ impl TaskExecutor { Some(future) } + /// Block the current (non-async) thread on the completion of some future. + /// + /// ## Warning + /// + /// This method is "dangerous" since calling it from an async thread will result in a panic! Any + /// use of this outside of testing should be very deeply considered as Lighthouse has been + /// burned by this function in the past. + /// + /// Determining what is an "async thread" is rather challenging; just because a function isn't + /// marked as `async` doesn't mean it's not being called from an `async` function or there isn't + /// a `tokio` context present in the thread-local storage due to some `rayon` funkiness. Talk to + /// @paulhauner if you plan to use this function in production. He has put metrics in here to + /// track any use of it, so don't think you can pull a sneaky one on him. + pub fn block_on_dangerous( + &self, + future: F, + name: &'static str, + ) -> Option { + let timer = metrics::start_timer_vec(&metrics::BLOCK_ON_TASKS_HISTOGRAM, &[name]); + metrics::inc_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]); + let log = self.log.clone(); + let handle = self.handle()?; + let exit = self.exit.clone(); + + debug!( + log, + "Starting block_on task"; + "name" => name + ); + + handle.block_on(async { + let output = tokio::select! { + output = future => { + debug!( + log, + "Completed block_on task"; + "name" => name + ); + Some(output) + }, + _ = exit => { + debug!( + log, + "Cancelled block_on task"; + "name" => name, + ); + None + } + }; + metrics::dec_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]); + drop(timer); + output + }) + } + /// Returns a `Handle` to the current runtime. pub fn handle(&self) -> Option { self.handle_provider.handle() diff --git a/common/task_executor/src/metrics.rs b/common/task_executor/src/metrics.rs index ead5925b6e8..6ecea86d653 100644 --- a/common/task_executor/src/metrics.rs +++ b/common/task_executor/src/metrics.rs @@ -18,6 +18,16 @@ lazy_static! { "Time taken by blocking tasks", &["blocking_task_hist"] ); + pub static ref BLOCK_ON_TASKS_COUNT: Result = try_create_int_gauge_vec( + "block_on_tasks_count", + "Total number of block_on_dangerous tasks spawned", + &["name"] + ); + pub static ref BLOCK_ON_TASKS_HISTOGRAM: Result = try_create_histogram_vec( + "block_on_tasks_histogram", + "Time taken by block_on_dangerous tasks", + &["name"] + ); pub static ref TASKS_HISTOGRAM: Result = try_create_histogram_vec( "async_tasks_time_histogram", "Time taken by async tasks", diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 77603d09e65..429ab1b8c57 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -15,3 +15,4 @@ eth2_ssz_derive = "0.3.0" [dev-dependencies] beacon_chain = { path = "../../beacon_node/beacon_chain" } store = { path = "../../beacon_node/store" } +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 49510e7326b..7390ce7f945 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -5,7 +5,7 @@ use std::cmp::Ordering; use std::marker::PhantomData; use std::time::Duration; use types::{ - consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, BeaconBlock, BeaconState, + consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, }; @@ -248,6 +248,7 @@ fn dequeue_attestations( /// Equivalent to the `is_from_block` `bool` in: /// /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation +#[derive(Clone, Copy)] pub enum AttestationFromBlock { True, False, @@ -261,6 +262,13 @@ pub struct ForkchoiceUpdateParameters { pub finalized_hash: Option, } +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct ForkChoiceView { + pub head_block_root: Hash256, + pub justified_checkpoint: Checkpoint, + pub finalized_checkpoint: Checkpoint, +} + /// Provides an implementation of "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": /// /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#ethereum-20-phase-0----beacon-chain-fork-choice @@ -279,7 +287,9 @@ pub struct ForkChoice { /// Attestations that arrived at the current slot and must be queued for later processing. queued_attestations: Vec, /// Stores a cache of the values required to be sent to the execution layer. - forkchoice_update_parameters: Option, + forkchoice_update_parameters: ForkchoiceUpdateParameters, + /// The most recent result of running `Self::get_head`. + head_block_root: Hash256, _phantom: PhantomData, } @@ -306,6 +316,8 @@ where anchor_block_root: Hash256, anchor_block: &SignedBeaconBlock, anchor_state: &BeaconState, + current_slot: Option, + spec: &ChainSpec, ) -> Result> { // Sanity check: the anchor must lie on an epoch boundary. if anchor_block.slot() % E::slots_per_epoch() != 0 { @@ -340,6 +352,9 @@ where }, ); + // If the current slot is not provided, use the value that was last provided to the store. + let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot()); + let proto_array = ProtoArrayForkChoice::new( finalized_block_slot, finalized_block_state_root, @@ -350,15 +365,28 @@ where execution_status, )?; - Ok(Self { + let mut fork_choice = Self { fc_store, proto_array, queued_attestations: vec![], - forkchoice_update_parameters: None, + // This will be updated during the next call to `Self::get_head`. + forkchoice_update_parameters: ForkchoiceUpdateParameters { + head_hash: None, + finalized_hash: None, + head_root: Hash256::zero(), + }, + // This will be updated during the next call to `Self::get_head`. + head_block_root: Hash256::zero(), _phantom: PhantomData, - }) + }; + + // Ensure that `fork_choice.head_block_root` is updated. + fork_choice.get_head(current_slot, spec)?; + + Ok(fork_choice) } + /* /// Instantiates `Self` from some existing components. /// /// This is useful if the existing components have been loaded from disk after a process @@ -376,13 +404,13 @@ where _phantom: PhantomData, } } + */ /// Returns cached information that can be used to issue a `forkchoiceUpdated` message to an /// execution engine. /// - /// These values are updated each time `Self::get_head` is called. May return `None` if - /// `Self::get_head` has not yet been called. - pub fn get_forkchoice_update_parameters(&self) -> Option { + /// These values are updated each time `Self::get_head` is called. + pub fn get_forkchoice_update_parameters(&self) -> ForkchoiceUpdateParameters { self.forkchoice_update_parameters } @@ -455,6 +483,8 @@ where spec, )?; + self.head_block_root = head_root; + // Cache some values for the next forkchoiceUpdate call to the execution layer. let head_hash = self .get_block(&head_root) @@ -463,15 +493,35 @@ where let finalized_hash = self .get_block(&finalized_root) .and_then(|b| b.execution_status.block_hash()); - self.forkchoice_update_parameters = Some(ForkchoiceUpdateParameters { + self.forkchoice_update_parameters = ForkchoiceUpdateParameters { head_root, head_hash, finalized_hash, - }); + }; Ok(head_root) } + /// Return information about: + /// + /// - The LMD head of the chain. + /// - The FFG checkpoints. + /// + /// The information is "cached" since the last call to `Self::get_head`. + /// + /// ## Notes + /// + /// The finalized/justified checkpoints are determined from the fork choice store. Therefore, + /// it's possible that the state corresponding to `get_state(get_block(head_block_root))` will + /// have *differing* finalized and justified information. + pub fn cached_fork_choice_view(&self) -> ForkChoiceView { + ForkChoiceView { + head_block_root: self.head_block_root, + justified_checkpoint: self.justified_checkpoint(), + finalized_checkpoint: self.finalized_checkpoint(), + } + } + /// Returns `true` if the given `store` should be updated to set /// `state.current_justified_checkpoint` its `justified_checkpoint`. /// @@ -566,7 +616,7 @@ where pub fn on_block>( &mut self, current_slot: Slot, - block: &BeaconBlock, + block: BeaconBlockRef, block_root: Hash256, block_delay: Duration, state: &BeaconState, @@ -966,6 +1016,11 @@ where } } + /// Returns the weight for the given block root. + pub fn get_block_weight(&self, block_root: &Hash256) -> Option { + self.proto_array.get_weight(block_root) + } + /// Returns the `ProtoBlock` for the justified checkpoint. /// /// ## Notes @@ -995,6 +1050,39 @@ where .is_descendant(self.fc_store.finalized_checkpoint().root, block_root) } + /// Returns `Ok(true)` if `block_root` has been imported optimistically. That is, the + /// execution payload has not been verified. + /// + /// Returns `Ok(false)` if `block_root`'s execution payload has been verfied, if it is a + /// pre-Bellatrix block or if it is before the PoW terminal block. + /// + /// In the case where the block could not be found in fork-choice, it returns the + /// `execution_status` of the current finalized block. + /// + /// This function assumes the `block_root` exists. + pub fn is_optimistic_block(&self, block_root: &Hash256) -> Result> { + if let Some(status) = self.get_block_execution_status(block_root) { + Ok(status.is_optimistic()) + } else { + Ok(self.get_finalized_block()?.execution_status.is_optimistic()) + } + } + + /// The same as `is_optimistic_block` but does not fallback to `self.get_finalized_block` + /// when the block cannot be found. + /// + /// Intended to be used when checking if the head has been imported optimistically. + pub fn is_optimistic_block_no_fallback( + &self, + block_root: &Hash256, + ) -> Result> { + if let Some(status) = self.get_block_execution_status(block_root) { + Ok(status.is_optimistic()) + } else { + Err(Error::MissingProtoArrayBlock(*block_root)) + } + } + /// Returns `Ok(false)` if a block is not viable to be imported optimistically. /// /// ## Notes @@ -1109,17 +1197,31 @@ where pub fn from_persisted( persisted: PersistedForkChoice, fc_store: T, + spec: &ChainSpec, ) -> Result> { let proto_array = ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes) .map_err(Error::InvalidProtoArrayBytes)?; - Ok(Self { + let current_slot = fc_store.get_current_slot(); + + let mut fork_choice = Self { fc_store, proto_array, queued_attestations: persisted.queued_attestations, - forkchoice_update_parameters: None, + // Will be updated in the following call to `Self::get_head`. + forkchoice_update_parameters: ForkchoiceUpdateParameters { + head_hash: None, + finalized_hash: None, + head_root: Hash256::zero(), + }, + // Will be updated in the following call to `Self::get_head`. + head_block_root: Hash256::zero(), _phantom: PhantomData, - }) + }; + + fork_choice.get_head(current_slot, spec)?; + + Ok(fork_choice) } /// Takes a snapshot of `Self` and stores it in `PersistedForkChoice`, allowing this struct to diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index 78260075160..6df0cbc2c26 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -1,4 +1,4 @@ -use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; +use types::{BeaconBlockRef, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; /// Approximates the `Store` in "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": /// @@ -33,7 +33,7 @@ pub trait ForkChoiceStore: Sized { /// choice. Allows the implementer to performing caching or other housekeeping duties. fn on_verified_block>( &mut self, - block: &BeaconBlock, + block: BeaconBlockRef, block_root: Hash256, state: &BeaconState, ) -> Result<(), Self::Error>; diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 157306dd5f8..6f79b488dd6 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -2,8 +2,9 @@ mod fork_choice; mod fork_choice_store; pub use crate::fork_choice::{ - AttestationFromBlock, Error, ForkChoice, InvalidAttestation, InvalidBlock, - PayloadVerificationStatus, PersistedForkChoice, QueuedAttestation, + AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, + InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, + QueuedAttestation, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 3f8a2ac6b6b..2d10319cf06 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -16,9 +16,8 @@ use fork_choice::{ }; use store::MemoryStore; use types::{ - test_utils::generate_deterministic_keypair, BeaconBlock, BeaconBlockRef, BeaconState, - ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, IndexedAttestation, MainnetEthSpec, Slot, - SubnetId, + test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, + Epoch, EthSpec, Hash256, IndexedAttestation, MainnetEthSpec, SignedBeaconBlock, Slot, SubnetId, }; pub type E = MainnetEthSpec; @@ -74,7 +73,14 @@ impl ForkChoiceTest { where T: Fn(&BeaconForkChoiceStore, MemoryStore>) -> U, { - func(&self.harness.chain.fork_choice.read().fc_store()) + func( + &self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .fc_store(), + ) } /// Assert the epochs match. @@ -109,15 +115,7 @@ impl ForkChoiceTest { /// Assert the given slot is greater than the head slot. pub fn assert_finalized_epoch_is_less_than(self, epoch: Epoch) -> Self { - assert!( - self.harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint - .epoch - < epoch - ); + assert!(self.harness.finalized_checkpoint().epoch < epoch); self } @@ -150,11 +148,17 @@ impl ForkChoiceTest { { self.harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .update_time(self.harness.chain.slot().unwrap()) .unwrap(); - func(self.harness.chain.fork_choice.read().queued_attestations()); + func( + self.harness + .chain + .canonical_head + .fork_choice_read_lock() + .queued_attestations(), + ); self } @@ -173,7 +177,7 @@ impl ForkChoiceTest { } /// Build the chain whilst `predicate` returns `true` and `process_block_result` does not error. - pub fn apply_blocks_while(self, mut predicate: F) -> Result + pub async fn apply_blocks_while(self, mut predicate: F) -> Result where F: FnMut(BeaconBlockRef<'_, E>, &BeaconState) -> bool, { @@ -182,12 +186,12 @@ impl ForkChoiceTest { let validators = self.harness.get_all_validators(); loop { let slot = self.harness.get_current_slot(); - let (block, state_) = self.harness.make_block(state, slot); + let (block, state_) = self.harness.make_block(state, slot).await; state = state_; if !predicate(block.message(), &state) { break; } - if let Ok(block_hash) = self.harness.process_block_result(block.clone()) { + if let Ok(block_hash) = self.harness.process_block_result(block.clone()).await { self.harness.attest_block( &state, block.state_root(), @@ -205,25 +209,29 @@ impl ForkChoiceTest { } /// Apply `count` blocks to the chain (with attestations). - pub fn apply_blocks(self, count: usize) -> Self { + pub async fn apply_blocks(self, count: usize) -> Self { self.harness.advance_slot(); - self.harness.extend_chain( - count, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + self.harness + .extend_chain( + count, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; self } /// Apply `count` blocks to the chain (without attestations). - pub fn apply_blocks_without_new_attestations(self, count: usize) -> Self { + pub async fn apply_blocks_without_new_attestations(self, count: usize) -> Self { self.harness.advance_slot(); - self.harness.extend_chain( - count, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + self.harness + .extend_chain( + count, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; self } @@ -256,9 +264,9 @@ impl ForkChoiceTest { /// Applies a block directly to fork choice, bypassing the beacon chain. /// /// Asserts the block was applied successfully. - pub fn apply_block_directly_to_fork_choice(self, mut func: F) -> Self + pub async fn apply_block_directly_to_fork_choice(self, mut func: F) -> Self where - F: FnMut(&mut BeaconBlock, &mut BeaconState), + F: FnMut(&mut SignedBeaconBlock, &mut BeaconState), { let state = self .harness @@ -269,18 +277,17 @@ impl ForkChoiceTest { ) .unwrap(); let slot = self.harness.get_current_slot(); - let (signed_block, mut state) = self.harness.make_block(state, slot); - let (mut block, _) = signed_block.deconstruct(); - func(&mut block, &mut state); + let (mut signed_block, mut state) = self.harness.make_block(state, slot).await; + func(&mut signed_block, &mut state); let current_slot = self.harness.get_current_slot(); self.harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .on_block( current_slot, - &block, - block.canonical_root(), + signed_block.message(), + signed_block.canonical_root(), Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, @@ -293,13 +300,13 @@ impl ForkChoiceTest { /// Applies a block directly to fork choice, bypassing the beacon chain. /// /// Asserts that an error occurred and allows inspecting it via `comparison_func`. - pub fn apply_invalid_block_directly_to_fork_choice( + pub async fn apply_invalid_block_directly_to_fork_choice( self, mut mutation_func: F, mut comparison_func: G, ) -> Self where - F: FnMut(&mut BeaconBlock, &mut BeaconState), + F: FnMut(&mut SignedBeaconBlock, &mut BeaconState), G: FnMut(ForkChoiceError), { let state = self @@ -311,19 +318,18 @@ impl ForkChoiceTest { ) .unwrap(); let slot = self.harness.get_current_slot(); - let (signed_block, mut state) = self.harness.make_block(state, slot); - let (mut block, _) = signed_block.deconstruct(); - mutation_func(&mut block, &mut state); + let (mut signed_block, mut state) = self.harness.make_block(state, slot).await; + mutation_func(&mut signed_block, &mut state); let current_slot = self.harness.get_current_slot(); let err = self .harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .on_block( current_slot, - &block, - block.canonical_root(), + signed_block.message(), + signed_block.canonical_root(), Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, @@ -339,7 +345,7 @@ impl ForkChoiceTest { /// database. fn check_justified_balances(&self) { let harness = &self.harness; - let fc = self.harness.chain.fork_choice.read(); + let fc = self.harness.chain.canonical_head.fork_choice_read_lock(); let state_root = harness .chain @@ -377,7 +383,7 @@ impl ForkChoiceTest { /// Returns an attestation that is valid for some slot in the given `chain`. /// /// Also returns some info about who created it. - fn apply_attestation_to_chain( + async fn apply_attestation_to_chain( self, delay: MutationDelay, mut mutation_func: F, @@ -387,7 +393,7 @@ impl ForkChoiceTest { F: FnMut(&mut IndexedAttestation, &BeaconChain>), G: FnMut(Result<(), BeaconChainError>), { - let head = self.harness.chain.head().expect("should get head"); + let head = self.harness.chain.head_snapshot(); let current_slot = self.harness.chain.slot().expect("should get slot"); let mut attestation = self @@ -438,11 +444,13 @@ impl ForkChoiceTest { if let MutationDelay::Blocks(slots) = delay { self.harness.advance_slot(); - self.harness.extend_chain( - slots, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + self.harness + .extend_chain( + slots, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; } mutation_func( @@ -464,17 +472,9 @@ impl ForkChoiceTest { pub fn check_finalized_block_is_accessible(self) -> Self { self.harness .chain - .fork_choice - .write() - .get_block( - &self - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint - .root, - ) + .canonical_head + .fork_choice_read_lock() + .get_block(&self.harness.finalized_checkpoint().root) .unwrap(); self @@ -488,7 +488,7 @@ fn is_safe_to_update(slot: Slot, spec: &ChainSpec) -> bool { #[test] fn justified_and_finalized_blocks() { let tester = ForkChoiceTest::new(); - let fork_choice = tester.harness.chain.fork_choice.read(); + let fork_choice = tester.harness.chain.canonical_head.fork_choice_read_lock(); let justified_checkpoint = fork_choice.justified_checkpoint(); assert_eq!(justified_checkpoint.epoch, 0); @@ -503,44 +503,50 @@ fn justified_and_finalized_blocks() { /// - The new justified checkpoint descends from the current. /// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -#[test] -fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { +#[tokio::test] +async fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .move_inside_safe_to_update() .assert_justified_epoch(0) .apply_blocks(1) + .await .assert_justified_epoch(2); } /// - The new justified checkpoint descends from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - This is **not** the first justification since genesis -#[test] -fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { +#[tokio::test] +async fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch <= 2) + .await .unwrap() .move_outside_safe_to_update() .assert_justified_epoch(2) .assert_best_justified_epoch(2) .apply_blocks(1) + .await .assert_justified_epoch(3); } /// - The new justified checkpoint descends from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - This is the first justification since genesis -#[test] -fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { +#[tokio::test] +async fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .move_to_next_unsafe_period() .assert_justified_epoch(0) .assert_best_justified_epoch(0) .apply_blocks(1) + .await .assert_justified_epoch(2) .assert_best_justified_epoch(2); } @@ -548,12 +554,14 @@ fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { /// - The new justified checkpoint **does not** descend from the current. /// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - Finalized epoch has **not** increased. -#[test] -fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() { +#[tokio::test] +async fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .move_inside_safe_to_update() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { @@ -567,6 +575,7 @@ fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_fi .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); }) + .await .assert_justified_epoch(3) .assert_best_justified_epoch(3); } @@ -574,12 +583,14 @@ fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_fi /// - The new justified checkpoint **does not** descend from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED`. /// - Finalized epoch has **not** increased. -#[test] -fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() { +#[tokio::test] +async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .move_to_next_unsafe_period() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { @@ -593,6 +604,7 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_f .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); }) + .await .assert_justified_epoch(2) .assert_best_justified_epoch(3); } @@ -600,12 +612,14 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_f /// - The new justified checkpoint **does not** descend from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - Finalized epoch has increased. -#[test] -fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() { +#[tokio::test] +async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .move_to_next_unsafe_period() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { @@ -619,17 +633,20 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_fina .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); }) + .await .assert_justified_epoch(3) .assert_best_justified_epoch(3); } /// Check that the balances are obtained correctly. -#[test] -fn justified_balances() { +#[tokio::test] +async fn justified_balances() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_justified_epoch(2) .check_justified_balances() } @@ -648,15 +665,16 @@ macro_rules! assert_invalid_block { /// Specification v0.12.1 /// /// assert block.parent_root in store.block_states -#[test] -fn invalid_block_unknown_parent() { +#[tokio::test] +async fn invalid_block_unknown_parent() { let junk = Hash256::from_low_u64_be(42); ForkChoiceTest::new() .apply_blocks(2) + .await .apply_invalid_block_directly_to_fork_choice( |block, _| { - *block.parent_root_mut() = junk; + *block.message_mut().parent_root_mut() = junk; }, |err| { assert_invalid_block!( @@ -665,36 +683,42 @@ fn invalid_block_unknown_parent() { if parent == junk ) }, - ); + ) + .await; } /// Specification v0.12.1 /// /// assert get_current_slot(store) >= block.slot -#[test] -fn invalid_block_future_slot() { +#[tokio::test] +async fn invalid_block_future_slot() { ForkChoiceTest::new() .apply_blocks(2) + .await .apply_invalid_block_directly_to_fork_choice( |block, _| { - *block.slot_mut() += 1; + *block.message_mut().slot_mut() += 1; }, |err| assert_invalid_block!(err, InvalidBlock::FutureSlot { .. }), - ); + ) + .await; } /// Specification v0.12.1 /// /// assert block.slot > finalized_slot -#[test] -fn invalid_block_finalized_slot() { +#[tokio::test] +async fn invalid_block_finalized_slot() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .apply_invalid_block_directly_to_fork_choice( |block, _| { - *block.slot_mut() = Epoch::new(2).start_slot(E::slots_per_epoch()) - 1; + *block.message_mut().slot_mut() = + Epoch::new(2).start_slot(E::slots_per_epoch()) - 1; }, |err| { assert_invalid_block!( @@ -703,7 +727,8 @@ fn invalid_block_finalized_slot() { if finalized_slot == Epoch::new(2).start_slot(E::slots_per_epoch()) ) }, - ); + ) + .await; } /// Specification v0.12.1 @@ -714,18 +739,20 @@ fn invalid_block_finalized_slot() { /// Note: we technically don't do this exact check, but an equivalent check. Reference: /// /// https://github.com/ethereum/eth2.0-specs/pull/1884 -#[test] -fn invalid_block_finalized_descendant() { +#[tokio::test] +async fn invalid_block_finalized_descendant() { let invalid_ancestor = Mutex::new(Hash256::zero()); ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2) .apply_invalid_block_directly_to_fork_choice( |block, state| { - *block.parent_root_mut() = *state + *block.message_mut().parent_root_mut() = *state .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); *invalid_ancestor.lock().unwrap() = block.parent_root(); @@ -737,7 +764,8 @@ fn invalid_block_finalized_descendant() { if block_ancestor == Some(*invalid_ancestor.lock().unwrap()) ) }, - ); + ) + .await; } macro_rules! assert_invalid_attestation { @@ -754,23 +782,26 @@ macro_rules! assert_invalid_attestation { } /// Ensure we can process a valid attestation. -#[test] -fn valid_attestation() { +#[tokio::test] +async fn valid_attestation() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |_, _| {}, |result| assert_eq!(result.unwrap(), ()), - ); + ) + .await; } /// This test is not in the specification, however we reject an attestation with an empty /// aggregation bitfield since it has no purpose beyond wasting our time. -#[test] -fn invalid_attestation_empty_bitfield() { +#[tokio::test] +async fn invalid_attestation_empty_bitfield() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -779,7 +810,8 @@ fn invalid_attestation_empty_bitfield() { |result| { assert_invalid_attestation!(result, InvalidAttestation::EmptyAggregationBitfield) }, - ); + ) + .await; } /// Specification v0.12.1: @@ -787,10 +819,11 @@ fn invalid_attestation_empty_bitfield() { /// assert target.epoch in [expected_current_epoch, previous_epoch] /// /// (tests epoch after current epoch) -#[test] -fn invalid_attestation_future_epoch() { +#[tokio::test] +async fn invalid_attestation_future_epoch() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -803,7 +836,8 @@ fn invalid_attestation_future_epoch() { if attestation_epoch == Epoch::new(2) && current_epoch == Epoch::new(0) ) }, - ); + ) + .await; } /// Specification v0.12.1: @@ -811,10 +845,11 @@ fn invalid_attestation_future_epoch() { /// assert target.epoch in [expected_current_epoch, previous_epoch] /// /// (tests epoch prior to previous epoch) -#[test] -fn invalid_attestation_past_epoch() { +#[tokio::test] +async fn invalid_attestation_past_epoch() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(E::slots_per_epoch() as usize * 3 + 1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -827,16 +862,18 @@ fn invalid_attestation_past_epoch() { if attestation_epoch == Epoch::new(0) && current_epoch == Epoch::new(3) ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert target.epoch == compute_epoch_at_slot(attestation.data.slot) -#[test] -fn invalid_attestation_target_epoch() { +#[tokio::test] +async fn invalid_attestation_target_epoch() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(E::slots_per_epoch() as usize + 1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -849,18 +886,20 @@ fn invalid_attestation_target_epoch() { if target == Epoch::new(1) && slot == Slot::new(1) ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert target.root in store.blocks -#[test] -fn invalid_attestation_unknown_target_root() { +#[tokio::test] +async fn invalid_attestation_unknown_target_root() { let junk = Hash256::from_low_u64_be(42); ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -873,18 +912,20 @@ fn invalid_attestation_unknown_target_root() { if root == junk ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert attestation.data.beacon_block_root in store.blocks -#[test] -fn invalid_attestation_unknown_beacon_block_root() { +#[tokio::test] +async fn invalid_attestation_unknown_beacon_block_root() { let junk = Hash256::from_low_u64_be(42); ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -897,16 +938,18 @@ fn invalid_attestation_unknown_beacon_block_root() { if beacon_block_root == junk ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot -#[test] -fn invalid_attestation_future_block() { +#[tokio::test] +async fn invalid_attestation_future_block() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::Blocks(1), |attestation, chain| { @@ -923,19 +966,21 @@ fn invalid_attestation_future_block() { if block == 2 && attestation == 1 ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert target.root == get_ancestor(store, attestation.data.beacon_block_root, target_slot) -#[test] -fn invalid_attestation_inconsistent_ffg_vote() { +#[tokio::test] +async fn invalid_attestation_inconsistent_ffg_vote() { let local_opt = Mutex::new(None); let attestation_opt = Mutex::new(None); ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, chain| { @@ -962,22 +1007,25 @@ fn invalid_attestation_inconsistent_ffg_vote() { && local == local_opt.lock().unwrap().unwrap() ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert get_current_slot(store) >= attestation.data.slot + 1 -#[test] -fn invalid_attestation_delayed_slot() { +#[tokio::test] +async fn invalid_attestation_delayed_slot() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .inspect_queued_attestations(|queue| assert_eq!(queue.len(), 0)) .apply_attestation_to_chain( MutationDelay::NoDelay, |_, _| {}, |result| assert_eq!(result.unwrap(), ()), ) + .await .inspect_queued_attestations(|queue| assert_eq!(queue.len(), 1)) .skip_slot() .inspect_queued_attestations(|queue| assert_eq!(queue.len(), 0)); @@ -985,10 +1033,11 @@ fn invalid_attestation_delayed_slot() { /// Tests that the correct target root is used when the attested-to block is in a prior epoch to /// the attestation. -#[test] -fn valid_attestation_skip_across_epoch() { +#[tokio::test] +async fn valid_attestation_skip_across_epoch() { ForkChoiceTest::new() .apply_blocks(E::slots_per_epoch() as usize - 1) + .await .skip_slots(2) .apply_attestation_to_chain( MutationDelay::NoDelay, @@ -999,15 +1048,18 @@ fn valid_attestation_skip_across_epoch() { ) }, |result| result.unwrap(), - ); + ) + .await; } -#[test] -fn can_read_finalized_block() { +#[tokio::test] +async fn can_read_finalized_block() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .check_finalized_block_is_accessible(); } @@ -1025,8 +1077,8 @@ fn weak_subjectivity_fail_on_startup() { ForkChoiceTest::new_with_chain_config(chain_config); } -#[test] -fn weak_subjectivity_pass_on_startup() { +#[tokio::test] +async fn weak_subjectivity_pass_on_startup() { let epoch = Epoch::new(0); let root = Hash256::zero(); @@ -1037,23 +1089,21 @@ fn weak_subjectivity_pass_on_startup() { ForkChoiceTest::new_with_chain_config(chain_config) .apply_blocks(E::slots_per_epoch() as usize) + .await .assert_shutdown_signal_not_sent(); } -#[test] -fn weak_subjectivity_check_passes() { +#[tokio::test] +async fn weak_subjectivity_check_passes() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let checkpoint = setup_harness.harness.finalized_checkpoint(); let chain_config = ChainConfig { weak_subjectivity_checkpoint: Some(checkpoint), @@ -1062,26 +1112,25 @@ fn weak_subjectivity_check_passes() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2) .assert_shutdown_signal_not_sent(); } -#[test] -fn weak_subjectivity_check_fails_early_epoch() { +#[tokio::test] +async fn weak_subjectivity_check_fails_early_epoch() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let mut checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let mut checkpoint = setup_harness.harness.finalized_checkpoint(); checkpoint.epoch = checkpoint.epoch - 1; @@ -1092,25 +1141,23 @@ fn weak_subjectivity_check_fails_early_epoch() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 3) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); } -#[test] -fn weak_subjectivity_check_fails_late_epoch() { +#[tokio::test] +async fn weak_subjectivity_check_fails_late_epoch() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let mut checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let mut checkpoint = setup_harness.harness.finalized_checkpoint(); checkpoint.epoch = checkpoint.epoch + 1; @@ -1121,25 +1168,23 @@ fn weak_subjectivity_check_fails_late_epoch() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 4) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); } -#[test] -fn weak_subjectivity_check_fails_incorrect_root() { +#[tokio::test] +async fn weak_subjectivity_check_fails_incorrect_root() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let mut checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let mut checkpoint = setup_harness.harness.finalized_checkpoint(); checkpoint.root = Hash256::zero(); @@ -1150,27 +1195,31 @@ fn weak_subjectivity_check_fails_incorrect_root() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 3) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); } -#[test] -fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { +#[tokio::test] +async fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { let setup_harness = ForkChoiceTest::new() // first two epochs .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap(); // get the head, it will become the finalized root of epoch 4 - let checkpoint_root = setup_harness.harness.chain.head_info().unwrap().block_root; + let checkpoint_root = setup_harness.harness.head_block_root(); setup_harness // epoch 3 will be entirely skip slots .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(5); // the checkpoint at epoch 4 should become the root of last block of epoch 2 @@ -1187,31 +1236,37 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { // recreate the chain exactly ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(5) .assert_shutdown_signal_not_sent(); } -#[test] -fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { +#[tokio::test] +async fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { let setup_harness = ForkChoiceTest::new() // first two epochs .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap(); // get the head, it will become the finalized root of epoch 4 - let checkpoint_root = setup_harness.harness.chain.head_info().unwrap().block_root; + let checkpoint_root = setup_harness.harness.head_block_root(); setup_harness // epoch 3 will be entirely skip slots .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(5); // Invalid checkpoint (epoch too early) @@ -1228,9 +1283,11 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { // recreate the chain exactly ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 6) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index acdb42897aa..22d457ca3e3 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -16,6 +16,7 @@ four_byte_option_impl!(four_byte_option_usize, usize); four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); /// Defines an operation which may invalidate the `execution_status` of some nodes. +#[derive(Clone)] pub enum InvalidationOperation { /// Invalidate only `block_root` and it's descendants. Don't invalidate any ancestors. InvalidateOne { block_root: Hash256 }, diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index a0ce237481b..c7ed4b308df 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" [dev-dependencies] env_logger = "0.9.0" beacon_chain = { path = "../../beacon_node/beacon_chain" } +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } [dependencies] bls = { path = "../../crypto/bls" } diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index b75a79c72e8..2daefdacadb 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -27,7 +27,7 @@ lazy_static! { static ref KEYPAIRS: Vec = generate_deterministic_keypairs(MAX_VALIDATOR_COUNT); } -fn get_harness( +async fn get_harness( epoch_offset: u64, num_validators: usize, ) -> BeaconChainHarness> { @@ -41,27 +41,31 @@ fn get_harness( .build(); let state = harness.get_current_state(); if last_slot_of_epoch > Slot::new(0) { - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - (1..last_slot_of_epoch.as_u64()) - .map(Slot::new) - .collect::>() - .as_slice(), - (0..num_validators).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + (1..last_slot_of_epoch.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..num_validators).collect::>().as_slice(), + ) + .await; } harness } -#[test] -fn valid_block_ok() { +#[tokio::test] +async fn valid_block_ok() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (block, mut state) = harness.make_block_return_pre_state(state, slot + Slot::new(1)); + let (block, mut state) = harness + .make_block_return_pre_state(state, slot + Slot::new(1)) + .await; let result = per_block_processing( &mut state, @@ -75,15 +79,15 @@ fn valid_block_ok() { assert!(result.is_ok()); } -#[test] -fn invalid_block_header_state_slot() { +#[tokio::test] +async fn invalid_block_header_state_slot() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot() + Slot::new(1); - let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot); + let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot).await; let (mut block, signature) = signed_block.deconstruct(); *block.slot_mut() = slot + Slot::new(1); @@ -104,15 +108,17 @@ fn invalid_block_header_state_slot() { ); } -#[test] -fn invalid_parent_block_root() { +#[tokio::test] +async fn invalid_parent_block_root() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot + Slot::new(1)); + let (signed_block, mut state) = harness + .make_block_return_pre_state(state, slot + Slot::new(1)) + .await; let (mut block, signature) = signed_block.deconstruct(); *block.parent_root_mut() = Hash256::from([0xAA; 32]); @@ -136,14 +142,16 @@ fn invalid_parent_block_root() { ); } -#[test] -fn invalid_block_signature() { +#[tokio::test] +async fn invalid_block_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot + Slot::new(1)); + let (signed_block, mut state) = harness + .make_block_return_pre_state(state, slot + Slot::new(1)) + .await; let (block, _) = signed_block.deconstruct(); let result = per_block_processing( @@ -164,17 +172,19 @@ fn invalid_block_signature() { ); } -#[test] -fn invalid_randao_reveal_signature() { +#[tokio::test] +async fn invalid_randao_reveal_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness.make_block_with_modifier(state, slot + 1, |block| { - *block.body_mut().randao_reveal_mut() = Signature::empty(); - }); + let (signed_block, mut state) = harness + .make_block_with_modifier(state, slot + 1, |block| { + *block.body_mut().randao_reveal_mut() = Signature::empty(); + }) + .await; let result = per_block_processing( &mut state, @@ -189,16 +199,22 @@ fn invalid_randao_reveal_signature() { assert_eq!(result, Err(BlockProcessingError::RandaoSignatureInvalid)); } -#[test] -fn valid_4_deposits() { +#[tokio::test] +async fn valid_4_deposits() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 4, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); @@ -207,16 +223,22 @@ fn valid_4_deposits() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_deposit_deposit_count_too_big() { +#[tokio::test] +async fn invalid_deposit_deposit_count_too_big() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let big_deposit_count = NUM_DEPOSITS + 1; @@ -233,16 +255,22 @@ fn invalid_deposit_deposit_count_too_big() { ); } -#[test] -fn invalid_deposit_count_too_small() { +#[tokio::test] +async fn invalid_deposit_count_too_small() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let small_deposit_count = NUM_DEPOSITS - 1; @@ -259,16 +287,22 @@ fn invalid_deposit_count_too_small() { ); } -#[test] -fn invalid_deposit_bad_merkle_proof() { +#[tokio::test] +async fn invalid_deposit_bad_merkle_proof() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let bad_index = state.eth1_deposit_index() as usize; @@ -287,17 +321,23 @@ fn invalid_deposit_bad_merkle_proof() { ); } -#[test] -fn invalid_deposit_wrong_sig() { +#[tokio::test] +async fn invalid_deposit_wrong_sig() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, Some(SignatureBytes::empty())); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); @@ -305,17 +345,23 @@ fn invalid_deposit_wrong_sig() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_deposit_invalid_pub_key() { +#[tokio::test] +async fn invalid_deposit_invalid_pub_key() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, Some(PublicKeyBytes::empty()), None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); @@ -324,13 +370,19 @@ fn invalid_deposit_invalid_pub_key() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_attestation_no_committee_for_index() { +#[tokio::test] +async fn invalid_attestation_no_committee_for_index() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0] .data .index += 1; @@ -352,13 +404,19 @@ fn invalid_attestation_no_committee_for_index() { ); } -#[test] -fn invalid_attestation_wrong_justified_checkpoint() { +#[tokio::test] +async fn invalid_attestation_wrong_justified_checkpoint() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; let old_justified_checkpoint = head_block.body().attestations()[0].data.source; let mut new_justified_checkpoint = old_justified_checkpoint; new_justified_checkpoint.epoch += Epoch::new(1); @@ -389,13 +447,19 @@ fn invalid_attestation_wrong_justified_checkpoint() { ); } -#[test] -fn invalid_attestation_bad_aggregation_bitfield_len() { +#[tokio::test] +async fn invalid_attestation_bad_aggregation_bitfield_len() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0].aggregation_bits = Bitfield::with_capacity(spec.target_committee_size).unwrap(); @@ -416,13 +480,19 @@ fn invalid_attestation_bad_aggregation_bitfield_len() { ); } -#[test] -fn invalid_attestation_bad_signature() { +#[tokio::test] +async fn invalid_attestation_bad_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, 97); // minimal number of required validators for this test + let harness = get_harness::(EPOCH_OFFSET, 97).await; // minimal number of required validators for this test let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0].signature = AggregateSignature::empty(); let result = process_operations::process_attestations( @@ -444,13 +514,19 @@ fn invalid_attestation_bad_signature() { ); } -#[test] -fn invalid_attestation_included_too_early() { +#[tokio::test] +async fn invalid_attestation_included_too_early() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; let new_attesation_slot = head_block.body().attestations()[0].data.slot + Slot::new(MainnetEthSpec::slots_per_epoch()); head_block.to_mut().body_mut().attestations_mut()[0] @@ -479,14 +555,20 @@ fn invalid_attestation_included_too_early() { ); } -#[test] -fn invalid_attestation_included_too_late() { +#[tokio::test] +async fn invalid_attestation_included_too_late() { let spec = MainnetEthSpec::default_spec(); // note to maintainer: might need to increase validator count if we get NoCommittee - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; let new_attesation_slot = head_block.body().attestations()[0].data.slot - Slot::new(MainnetEthSpec::slots_per_epoch()); head_block.to_mut().body_mut().attestations_mut()[0] @@ -512,14 +594,20 @@ fn invalid_attestation_included_too_late() { ); } -#[test] -fn invalid_attestation_target_epoch_slot_mismatch() { +#[tokio::test] +async fn invalid_attestation_target_epoch_slot_mismatch() { let spec = MainnetEthSpec::default_spec(); // note to maintainer: might need to increase validator count if we get NoCommittee - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0] .data .target @@ -544,10 +632,10 @@ fn invalid_attestation_target_epoch_slot_mismatch() { ); } -#[test] -fn valid_insert_attester_slashing() { +#[tokio::test] +async fn valid_insert_attester_slashing() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let attester_slashing = harness.make_attester_slashing(vec![1, 2]); @@ -563,10 +651,10 @@ fn valid_insert_attester_slashing() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_attester_slashing_not_slashable() { +#[tokio::test] +async fn invalid_attester_slashing_not_slashable() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); attester_slashing.attestation_1 = attester_slashing.attestation_2.clone(); @@ -589,10 +677,10 @@ fn invalid_attester_slashing_not_slashable() { ); } -#[test] -fn invalid_attester_slashing_1_invalid() { +#[tokio::test] +async fn invalid_attester_slashing_1_invalid() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); @@ -618,10 +706,10 @@ fn invalid_attester_slashing_1_invalid() { ); } -#[test] -fn invalid_attester_slashing_2_invalid() { +#[tokio::test] +async fn invalid_attester_slashing_2_invalid() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); @@ -647,10 +735,10 @@ fn invalid_attester_slashing_2_invalid() { ); } -#[test] -fn valid_insert_proposer_slashing() { +#[tokio::test] +async fn valid_insert_proposer_slashing() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let proposer_slashing = harness.make_proposer_slashing(1); let mut state = harness.get_current_state(); let result = process_operations::process_proposer_slashings( @@ -663,10 +751,10 @@ fn valid_insert_proposer_slashing() { assert!(result.is_ok()); } -#[test] -fn invalid_proposer_slashing_proposals_identical() { +#[tokio::test] +async fn invalid_proposer_slashing_proposals_identical() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.message = proposer_slashing.signed_header_2.message.clone(); @@ -689,10 +777,10 @@ fn invalid_proposer_slashing_proposals_identical() { ); } -#[test] -fn invalid_proposer_slashing_proposer_unknown() { +#[tokio::test] +async fn invalid_proposer_slashing_proposer_unknown() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.message.proposer_index = 3_141_592; @@ -716,10 +804,10 @@ fn invalid_proposer_slashing_proposer_unknown() { ); } -#[test] -fn invalid_proposer_slashing_duplicate_slashing() { +#[tokio::test] +async fn invalid_proposer_slashing_duplicate_slashing() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let proposer_slashing = harness.make_proposer_slashing(1); let mut state = harness.get_current_state(); @@ -747,10 +835,10 @@ fn invalid_proposer_slashing_duplicate_slashing() { ); } -#[test] -fn invalid_bad_proposal_1_signature() { +#[tokio::test] +async fn invalid_bad_proposal_1_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.signature = Signature::empty(); let mut state = harness.get_current_state(); @@ -771,10 +859,10 @@ fn invalid_bad_proposal_1_signature() { ); } -#[test] -fn invalid_bad_proposal_2_signature() { +#[tokio::test] +async fn invalid_bad_proposal_2_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_2.signature = Signature::empty(); let mut state = harness.get_current_state(); @@ -795,10 +883,10 @@ fn invalid_bad_proposal_2_signature() { ); } -#[test] -fn invalid_proposer_slashing_proposal_epoch_mismatch() { +#[tokio::test] +async fn invalid_proposer_slashing_proposal_epoch_mismatch() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.message.slot = Slot::new(0); proposer_slashing.signed_header_2.message.slot = Slot::new(128); diff --git a/consensus/state_processing/src/per_epoch_processing/tests.rs b/consensus/state_processing/src/per_epoch_processing/tests.rs index 4379547bfe6..14bbfbc071d 100644 --- a/consensus/state_processing/src/per_epoch_processing/tests.rs +++ b/consensus/state_processing/src/per_epoch_processing/tests.rs @@ -6,8 +6,8 @@ use bls::Hash256; use env_logger::{Builder, Env}; use types::Slot; -#[test] -fn runs_without_error() { +#[tokio::test] +async fn runs_without_error() { Builder::from_env(Env::default().default_filter_or("error")).init(); let harness = BeaconChainHarness::builder(MinimalEthSpec) @@ -22,15 +22,17 @@ fn runs_without_error() { (MinimalEthSpec::genesis_epoch() + 4).end_slot(MinimalEthSpec::slots_per_epoch()); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - (1..target_slot.as_u64()) - .map(Slot::new) - .collect::>() - .as_slice(), - (0..8).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + (1..target_slot.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..8).collect::>().as_slice(), + ) + .await; let mut new_head_state = harness.get_current_state(); process_epoch(&mut new_head_state, &spec).unwrap(); @@ -45,8 +47,8 @@ mod release_tests { use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; use types::{Epoch, ForkName, InconsistentFork, MainnetEthSpec}; - #[test] - fn altair_state_on_base_fork() { + #[tokio::test] + async fn altair_state_on_base_fork() { let mut spec = MainnetEthSpec::default_spec(); let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); // The Altair fork happens at epoch 1. @@ -61,12 +63,14 @@ mod release_tests { harness.advance_slot(); - harness.extend_chain( - // Build out enough blocks so we get an Altair block at the very end of an epoch. - (slots_per_epoch * 2 - 1) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + // Build out enough blocks so we get an Altair block at the very end of an epoch. + (slots_per_epoch * 2 - 1) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.get_current_state() }; @@ -103,8 +107,8 @@ mod release_tests { ); } - #[test] - fn base_state_on_altair_fork() { + #[tokio::test] + async fn base_state_on_altair_fork() { let mut spec = MainnetEthSpec::default_spec(); let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); // The Altair fork never happens. @@ -119,12 +123,14 @@ mod release_tests { harness.advance_slot(); - harness.extend_chain( - // Build out enough blocks so we get a block at the very end of an epoch. - (slots_per_epoch * 2 - 1) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + // Build out enough blocks so we get a block at the very end of an epoch. + (slots_per_epoch * 2 - 1) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.get_current_state() }; diff --git a/consensus/tree_hash/examples/flamegraph_beacon_state.rs b/consensus/tree_hash/examples/flamegraph_beacon_state.rs index cb9fc9390a3..e5b505bb91c 100644 --- a/consensus/tree_hash/examples/flamegraph_beacon_state.rs +++ b/consensus/tree_hash/examples/flamegraph_beacon_state.rs @@ -17,7 +17,7 @@ fn get_harness() -> BeaconChainHarness> { } fn build_state() -> BeaconState { - let state = get_harness::().chain.head_beacon_state().unwrap(); + let state = get_harness::().chain.head_beacon_state_cloned(); assert_eq!(state.as_base().unwrap().validators.len(), VALIDATOR_COUNT); assert_eq!(state.as_base().unwrap().balances.len(), VALIDATOR_COUNT); diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 96018230f0d..c3e454fdfcc 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -53,6 +53,7 @@ criterion = "0.3.3" beacon_chain = { path = "../../beacon_node/beacon_chain" } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } state_processing = { path = "../state_processing" } +tokio = "1.14.0" [features] default = ["sqlite", "legacy-arith"] diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 6eb12ddf05e..2d7e68a5c4a 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -38,7 +38,7 @@ use tree_hash_derive::TreeHash; derive(Debug, PartialEq, TreeHash), tree_hash(enum_behaviour = "transparent") ), - map_ref_into(BeaconBlockBodyRef), + map_ref_into(BeaconBlockBodyRef, BeaconBlock), map_ref_mut_into(BeaconBlockBodyRefMut) )] #[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] @@ -541,6 +541,50 @@ impl_from!(BeaconBlockBase, >, >, |body: impl_from!(BeaconBlockAltair, >, >, |body: BeaconBlockBodyAltair<_, _>| body.into()); impl_from!(BeaconBlockMerge, >, >, |body: BeaconBlockBodyMerge<_, _>| body.into()); +// We can clone blocks with payloads to blocks without payloads, without cloning the payload. +macro_rules! impl_clone_as_blinded { + ($ty_name:ident, <$($from_params:ty),*>, <$($to_params:ty),*>) => { + impl $ty_name<$($from_params),*> + { + pub fn clone_as_blinded(&self) -> $ty_name<$($to_params),*> { + let $ty_name { + slot, + proposer_index, + parent_root, + state_root, + body, + } = self; + + $ty_name { + slot: *slot, + proposer_index: *proposer_index, + parent_root: *parent_root, + state_root: *state_root, + body: body.clone_as_blinded(), + } + } + } + } +} + +impl_clone_as_blinded!(BeaconBlockBase, >, >); +impl_clone_as_blinded!(BeaconBlockAltair, >, >); +impl_clone_as_blinded!(BeaconBlockMerge, >, >); + +// A reference to a full beacon block can be cloned into a blinded beacon block, without cloning the +// execution payload. +impl<'a, E: EthSpec> From>> + for BeaconBlock> +{ + fn from( + full_block: BeaconBlockRef<'a, E, FullPayload>, + ) -> BeaconBlock> { + map_beacon_block_ref_into_beacon_block!(&'a _, full_block, |inner, cons| { + cons(inner.clone_as_blinded()) + }) + } +} + impl From>> for ( BeaconBlock>, diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 34761ea9a7f..381a9bd43e3 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -251,6 +251,53 @@ impl From>> } } +// We can clone a full block into a blinded block, without cloning the payload. +impl BeaconBlockBodyBase> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyBase> { + let (block_body, _payload) = self.clone().into(); + block_body + } +} + +impl BeaconBlockBodyAltair> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyAltair> { + let (block_body, _payload) = self.clone().into(); + block_body + } +} + +impl BeaconBlockBodyMerge> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyMerge> { + let BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayload { execution_payload }, + } = self; + + BeaconBlockBodyMerge { + randao_reveal: randao_reveal.clone(), + eth1_data: eth1_data.clone(), + graffiti: *graffiti, + proposer_slashings: proposer_slashings.clone(), + attester_slashings: attester_slashings.clone(), + attestations: attestations.clone(), + deposits: deposits.clone(), + voluntary_exits: voluntary_exits.clone(), + sync_aggregate: sync_aggregate.clone(), + execution_payload: BlindedPayload { + execution_payload_header: From::from(execution_payload), + }, + } + } +} + impl From>> for ( BeaconBlockBody>, diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/src/beacon_state/committee_cache/tests.rs index 48998e26d0d..db431138aac 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/src/beacon_state/committee_cache/tests.rs @@ -34,32 +34,34 @@ fn default_values() { assert!(cache.get_beacon_committees_at_slot(Slot::new(0)).is_err()); } -fn new_state(validator_count: usize, slot: Slot) -> BeaconState { +async fn new_state(validator_count: usize, slot: Slot) -> BeaconState { let harness = get_harness(validator_count); let head_state = harness.get_current_state(); if slot > Slot::new(0) { - harness.add_attested_blocks_at_slots( - head_state, - Hash256::zero(), - (1..slot.as_u64()) - .map(Slot::new) - .collect::>() - .as_slice(), - (0..validator_count).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + head_state, + Hash256::zero(), + (1..slot.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..validator_count).collect::>().as_slice(), + ) + .await; } harness.get_current_state() } -#[test] +#[tokio::test] #[should_panic] -fn fails_without_validators() { - new_state::(0, Slot::new(0)); +async fn fails_without_validators() { + new_state::(0, Slot::new(0)).await; } -#[test] -fn initializes_with_the_right_epoch() { - let state = new_state::(16, Slot::new(0)); +#[tokio::test] +async fn initializes_with_the_right_epoch() { + let state = new_state::(16, Slot::new(0)).await; let spec = &MinimalEthSpec::default_spec(); let cache = CommitteeCache::default(); @@ -75,13 +77,13 @@ fn initializes_with_the_right_epoch() { assert!(cache.is_initialized_at(state.next_epoch().unwrap())); } -#[test] -fn shuffles_for_the_right_epoch() { +#[tokio::test] +async fn shuffles_for_the_right_epoch() { let num_validators = MinimalEthSpec::minimum_validator_count() * 2; let epoch = Epoch::new(6); let slot = epoch.start_slot(MinimalEthSpec::slots_per_epoch()); - let mut state = new_state::(num_validators, slot); + let mut state = new_state::(num_validators, slot).await; let spec = &MinimalEthSpec::default_spec(); let distinct_hashes: Vec = (0..MinimalEthSpec::epochs_per_historical_vector()) diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index b88b49e1a39..d65d0a9e6ce 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -25,7 +25,7 @@ lazy_static! { static ref KEYPAIRS: Vec = generate_deterministic_keypairs(MAX_VALIDATOR_COUNT); } -fn get_harness( +async fn get_harness( validator_count: usize, slot: Slot, ) -> BeaconChainHarness> { @@ -41,24 +41,26 @@ fn get_harness( .map(Slot::new) .collect::>(); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - slots.as_slice(), - (0..validator_count).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + slots.as_slice(), + (0..validator_count).collect::>().as_slice(), + ) + .await; } harness } -fn build_state(validator_count: usize) -> BeaconState { +async fn build_state(validator_count: usize) -> BeaconState { get_harness(validator_count, Slot::new(0)) + .await .chain - .head_beacon_state() - .unwrap() + .head_beacon_state_cloned() } -fn test_beacon_proposer_index() { +async fn test_beacon_proposer_index() { let spec = T::default_spec(); // Get the i'th candidate proposer for the given state and slot @@ -85,20 +87,20 @@ fn test_beacon_proposer_index() { // Test where we have one validator per slot. // 0th candidate should be chosen every time. - let state = build_state(T::slots_per_epoch() as usize); + let state = build_state(T::slots_per_epoch() as usize).await; for i in 0..T::slots_per_epoch() { test(&state, Slot::from(i), 0); } // Test where we have two validators per slot. // 0th candidate should be chosen every time. - let state = build_state((T::slots_per_epoch() as usize).mul(2)); + let state = build_state((T::slots_per_epoch() as usize).mul(2)).await; for i in 0..T::slots_per_epoch() { test(&state, Slot::from(i), 0); } // Test with two validators per slot, first validator has zero balance. - let mut state = build_state::((T::slots_per_epoch() as usize).mul(2)); + let mut state = build_state::((T::slots_per_epoch() as usize).mul(2)).await; let slot0_candidate0 = ith_candidate(&state, Slot::new(0), 0, &spec); state.validators_mut()[slot0_candidate0].effective_balance = 0; test(&state, Slot::new(0), 1); @@ -107,9 +109,9 @@ fn test_beacon_proposer_index() { } } -#[test] -fn beacon_proposer_index() { - test_beacon_proposer_index::(); +#[tokio::test] +async fn beacon_proposer_index() { + test_beacon_proposer_index::().await; } /// Test that @@ -144,11 +146,11 @@ fn test_cache_initialization( ); } -#[test] -fn cache_initialization() { +#[tokio::test] +async fn cache_initialization() { let spec = MinimalEthSpec::default_spec(); - let mut state = build_state::(16); + let mut state = build_state::(16).await; *state.slot_mut() = (MinimalEthSpec::genesis_epoch() + 1).start_slot(MinimalEthSpec::slots_per_epoch()); @@ -211,11 +213,11 @@ fn test_clone_config(base_state: &BeaconState, clone_config: Clon } } -#[test] -fn clone_config() { +#[tokio::test] +async fn clone_config() { let spec = MinimalEthSpec::default_spec(); - let mut state = build_state::(16); + let mut state = build_state::(16).await; state.build_all_caches(&spec).unwrap(); state @@ -314,7 +316,7 @@ mod committees { assert!(expected_indices_iter.next().is_none()); } - fn committee_consistency_test( + async fn committee_consistency_test( validator_count: usize, state_epoch: Epoch, cache_epoch: RelativeEpoch, @@ -322,7 +324,7 @@ mod committees { let spec = &T::default_spec(); let slot = state_epoch.start_slot(T::slots_per_epoch()); - let harness = get_harness::(validator_count, slot); + let harness = get_harness::(validator_count, slot).await; let mut new_head_state = harness.get_current_state(); let distinct_hashes: Vec = (0..T::epochs_per_historical_vector()) @@ -350,7 +352,7 @@ mod committees { ); } - fn committee_consistency_test_suite(cached_epoch: RelativeEpoch) { + async fn committee_consistency_test_suite(cached_epoch: RelativeEpoch) { let spec = T::default_spec(); let validator_count = spec @@ -359,13 +361,15 @@ mod committees { .mul(spec.target_committee_size) .add(1); - committee_consistency_test::(validator_count as usize, Epoch::new(0), cached_epoch); + committee_consistency_test::(validator_count as usize, Epoch::new(0), cached_epoch) + .await; committee_consistency_test::( validator_count as usize, T::genesis_epoch() + 4, cached_epoch, - ); + ) + .await; committee_consistency_test::( validator_count as usize, @@ -374,38 +378,39 @@ mod committees { .mul(T::slots_per_epoch()) .mul(4), cached_epoch, - ); + ) + .await; } - #[test] - fn current_epoch_committee_consistency() { - committee_consistency_test_suite::(RelativeEpoch::Current); + #[tokio::test] + async fn current_epoch_committee_consistency() { + committee_consistency_test_suite::(RelativeEpoch::Current).await; } - #[test] - fn previous_epoch_committee_consistency() { - committee_consistency_test_suite::(RelativeEpoch::Previous); + #[tokio::test] + async fn previous_epoch_committee_consistency() { + committee_consistency_test_suite::(RelativeEpoch::Previous).await; } - #[test] - fn next_epoch_committee_consistency() { - committee_consistency_test_suite::(RelativeEpoch::Next); + #[tokio::test] + async fn next_epoch_committee_consistency() { + committee_consistency_test_suite::(RelativeEpoch::Next).await; } } mod get_outstanding_deposit_len { use super::*; - fn state() -> BeaconState { + async fn state() -> BeaconState { get_harness(16, Slot::new(0)) + .await .chain - .head_beacon_state() - .unwrap() + .head_beacon_state_cloned() } - #[test] - fn returns_ok() { - let mut state = state(); + #[tokio::test] + async fn returns_ok() { + let mut state = state().await; assert_eq!(state.get_outstanding_deposit_len(), Ok(0)); state.eth1_data_mut().deposit_count = 17; @@ -413,9 +418,9 @@ mod get_outstanding_deposit_len { assert_eq!(state.get_outstanding_deposit_len(), Ok(1)); } - #[test] - fn returns_err_if_the_state_is_invalid() { - let mut state = state(); + #[tokio::test] + async fn returns_err_if_the_state_is_invalid() { + let mut state = state().await; // The state is invalid, deposit count is lower than deposit index. state.eth1_data_mut().deposit_count = 16; *state.eth1_deposit_index_mut() = 17; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index d736f0be193..a21eeb63c27 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -28,6 +28,8 @@ pub trait ExecPayload: + Hash + TryFrom> + From> + + Send + + 'static { fn block_type() -> BlockType; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 54880706882..5c40c4685c3 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -346,6 +346,14 @@ impl From> for SignedBlindedBeaconBlock { } } +// We can blind borrowed blocks with payloads by converting the payload into a header (without +// cloning the payload contents). +impl SignedBeaconBlock { + pub fn clone_as_blinded(&self) -> SignedBlindedBeaconBlock { + SignedBeaconBlock::from_block(self.message().into(), self.signature().clone()) + } +} + #[cfg(test)] mod test { use super::*; diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 6717bb0f467..50295df4b0e 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -222,7 +222,7 @@ pub fn migrate_db( runtime_context: &RuntimeContext, log: Logger, ) -> Result<(), Error> { - let spec = runtime_context.eth2_config.spec.clone(); + let spec = &runtime_context.eth2_config.spec; let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); @@ -236,7 +236,7 @@ pub fn migrate_db( Ok(()) }, client_config.store.clone(), - spec, + spec.clone(), log.clone(), )?; @@ -253,6 +253,7 @@ pub fn migrate_db( from, to, log, + spec, ) } diff --git a/slasher/service/src/service.rs b/slasher/service/src/service.rs index 88feff0bbc4..091a95dc4cb 100644 --- a/slasher/service/src/service.rs +++ b/slasher/service/src/service.rs @@ -216,14 +216,7 @@ impl SlasherService { }; // Add to local op pool. - if let Err(e) = beacon_chain.import_attester_slashing(verified_slashing) { - error!( - log, - "Beacon chain refused attester slashing"; - "error" => ?e, - "slashing" => ?slashing, - ); - } + beacon_chain.import_attester_slashing(verified_slashing); // Publish to the network if broadcast is enabled. if slasher.config().broadcast { diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index ac9ca8993cc..64f4aa75389 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -81,11 +81,23 @@ pub struct Cases { } impl Cases { - pub fn test_results(&self, fork_name: ForkName) -> Vec { - self.test_cases - .into_par_iter() - .enumerate() - .map(|(i, (ref path, ref tc))| CaseResult::new(i, path, tc, tc.result(i, fork_name))) - .collect() + pub fn test_results(&self, fork_name: ForkName, use_rayon: bool) -> Vec { + if use_rayon { + self.test_cases + .into_par_iter() + .enumerate() + .map(|(i, (ref path, ref tc))| { + CaseResult::new(i, path, tc, tc.result(i, fork_name)) + }) + .collect() + } else { + self.test_cases + .iter() + .enumerate() + .map(|(i, (ref path, ref tc))| { + CaseResult::new(i, path, tc, tc.result(i, fork_name)) + }) + .collect() + } } } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 92c28aeb045..4f9f4dacad3 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -7,15 +7,17 @@ use beacon_chain::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, }, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainTypes, HeadInfo, + BeaconChainTypes, CachedHead, }; use serde_derive::Deserialize; use ssz_derive::Decode; use state_processing::state_advance::complete_state_advance; +use std::future::Future; +use std::sync::Arc; use std::time::Duration; use types::{ - Attestation, BeaconBlock, BeaconState, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, - ForkName, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, + Attestation, BeaconBlock, BeaconState, Checkpoint, EthSpec, ExecutionBlockHash, ForkName, + Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] @@ -287,19 +289,20 @@ impl Tester { Ok(self.spec.genesis_slot + slots_since_genesis) } - fn find_head(&self) -> Result { + fn block_on_dangerous(&self, future: F) -> Result { self.harness .chain - .fork_choice() - .map_err(|e| Error::InternalError(format!("failed to find head with {:?}", e)))?; - self.harness - .chain - .head_info() - .map_err(|e| Error::InternalError(format!("failed to read head with {:?}", e))) + .task_executor + .clone() + .block_on_dangerous(future, "ef_tests_block_on") + .ok_or_else(|| Error::InternalError("runtime shutdown".into())) } - fn genesis_epoch(&self) -> Epoch { - self.spec.genesis_slot.epoch(E::slots_per_epoch()) + fn find_head(&self) -> Result, Error> { + let chain = self.harness.chain.clone(); + self.block_on_dangerous(chain.recompute_head_at_current_slot())? + .map_err(|e| Error::InternalError(format!("failed to find head with {:?}", e)))?; + Ok(self.harness.chain.canonical_head.cached_head()) } pub fn set_tick(&self, tick: u64) { @@ -314,15 +317,16 @@ impl Tester { self.harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .update_time(slot) .unwrap(); } pub fn process_block(&self, block: SignedBeaconBlock, valid: bool) -> Result<(), Error> { - let result = self.harness.chain.process_block(block.clone()); let block_root = block.canonical_root(); + let block = Arc::new(block); + let result = self.block_on_dangerous(self.harness.chain.process_block(block.clone()))?; if result.is_ok() != valid { return Err(Error::DidntFail(format!( "block with root {} was valid={} whilst test expects valid={}. result: {:?}", @@ -367,16 +371,20 @@ impl Tester { .seconds_from_current_slot_start(self.spec.seconds_per_slot) .unwrap(); - let (block, _) = block.deconstruct(); - let result = self.harness.chain.fork_choice.write().on_block( - self.harness.chain.slot().unwrap(), - &block, - block_root, - block_delay, - &state, - PayloadVerificationStatus::Irrelevant, - &self.harness.chain.spec, - ); + let result = self + .harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_block( + self.harness.chain.slot().unwrap(), + block.message(), + block_root, + block_delay, + &state, + PayloadVerificationStatus::Irrelevant, + &self.harness.chain.spec, + ); if result.is_ok() { return Err(Error::DidntFail(format!( @@ -424,10 +432,11 @@ impl Tester { } pub fn check_head(&self, expected_head: Head) -> Result<(), Error> { - let chain_head = self.find_head().map(|head| Head { - slot: head.slot, - root: head.block_root, - })?; + let head = self.find_head()?; + let chain_head = Head { + slot: head.head_slot(), + root: head.head_block_root(), + }; check_equal("head", chain_head, expected_head) } @@ -446,15 +455,15 @@ impl Tester { } pub fn check_justified_checkpoint(&self, expected_checkpoint: Checkpoint) -> Result<(), Error> { - let head_checkpoint = self.find_head()?.current_justified_checkpoint; - let fc_checkpoint = self.harness.chain.fork_choice.read().justified_checkpoint(); - - assert_checkpoints_eq( - "justified_checkpoint", - self.genesis_epoch(), - head_checkpoint, - fc_checkpoint, - ); + let head_checkpoint = self.find_head()?.justified_checkpoint(); + let fc_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .justified_checkpoint(); + + assert_checkpoints_eq("justified_checkpoint", head_checkpoint, fc_checkpoint); check_equal("justified_checkpoint", fc_checkpoint, expected_checkpoint) } @@ -463,15 +472,15 @@ impl Tester { &self, expected_checkpoint_root: Hash256, ) -> Result<(), Error> { - let head_checkpoint = self.find_head()?.current_justified_checkpoint; - let fc_checkpoint = self.harness.chain.fork_choice.read().justified_checkpoint(); + let head_checkpoint = self.find_head()?.justified_checkpoint(); + let fc_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .justified_checkpoint(); - assert_checkpoints_eq( - "justified_checkpoint_root", - self.genesis_epoch(), - head_checkpoint, - fc_checkpoint, - ); + assert_checkpoints_eq("justified_checkpoint_root", head_checkpoint, fc_checkpoint); check_equal( "justified_checkpoint_root", @@ -481,15 +490,15 @@ impl Tester { } pub fn check_finalized_checkpoint(&self, expected_checkpoint: Checkpoint) -> Result<(), Error> { - let head_checkpoint = self.find_head()?.finalized_checkpoint; - let fc_checkpoint = self.harness.chain.fork_choice.read().finalized_checkpoint(); - - assert_checkpoints_eq( - "finalized_checkpoint", - self.genesis_epoch(), - head_checkpoint, - fc_checkpoint, - ); + let head_checkpoint = self.find_head()?.finalized_checkpoint(); + let fc_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .finalized_checkpoint(); + + assert_checkpoints_eq("finalized_checkpoint", head_checkpoint, fc_checkpoint); check_equal("finalized_checkpoint", fc_checkpoint, expected_checkpoint) } @@ -501,8 +510,8 @@ impl Tester { let best_justified_checkpoint = self .harness .chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .best_justified_checkpoint(); check_equal( "best_justified_checkpoint", @@ -515,7 +524,12 @@ impl Tester { &self, expected_proposer_boost_root: Hash256, ) -> Result<(), Error> { - let proposer_boost_root = self.harness.chain.fork_choice.read().proposer_boost_root(); + let proposer_boost_root = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .proposer_boost_root(); check_equal( "proposer_boost_root", proposer_boost_root, @@ -530,20 +544,8 @@ impl Tester { /// This function is necessary due to a quirk documented in this issue: /// /// https://github.com/ethereum/consensus-specs/issues/2566 -fn assert_checkpoints_eq(name: &str, genesis_epoch: Epoch, head: Checkpoint, fc: Checkpoint) { - if fc.epoch == genesis_epoch { - assert_eq!( - head, - Checkpoint { - epoch: genesis_epoch, - root: Hash256::zero() - }, - "{} (genesis)", - name - ) - } else { - assert_eq!(head, fc, "{} (non-genesis)", name) - } +fn assert_checkpoints_eq(name: &str, head: Checkpoint, fc: Checkpoint) { + assert_eq!(head, fc, "{}", name) } /// Convenience function to create `Error` messages. diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index be6c495aaed..25299bf5775 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -30,6 +30,10 @@ pub trait Handler { } } + fn use_rayon() -> bool { + true + } + fn run_for_fork(&self, fork_name: ForkName) { let fork_name_str = fork_name.to_string(); @@ -59,7 +63,7 @@ pub trait Handler { }) .collect(); - let results = Cases { test_cases }.test_results(fork_name); + let results = Cases { test_cases }.test_results(fork_name, Self::use_rayon()); let name = format!( "{}/{}/{}", @@ -460,6 +464,11 @@ impl Handler for ForkChoiceHandler { self.handler_name.clone() } + fn use_rayon() -> bool { + // The fork choice tests use `block_on` which can cause panics with rayon. + false + } + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { // Merge block tests are only enabled for Bellatrix or later. if self.handler_name == "on_merge_block" diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index a5bab4ed781..5b23af4fa15 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -98,10 +98,9 @@ impl TestRig { } pub fn perform_tests_blocking(&self) { - self.ee_a - .execution_layer - .block_on_generic(|_| async { self.perform_tests().await }) - .unwrap() + self.runtime + .handle() + .block_on(async { self.perform_tests().await }); } pub async fn wait_until_synced(&self) { diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 4e93db3b326..6da9f2f4a6f 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -12,3 +12,4 @@ types = { path = "../../consensus/types" } eth2_ssz = "0.4.1" beacon_chain = { path = "../../beacon_node/beacon_chain" } lazy_static = "1.4.0" +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index 75f82b3132f..3e4bb7bf3f9 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -37,11 +37,12 @@ impl Default for ExitTest { } impl ExitTest { - fn block_and_pre_state(self) -> (SignedBeaconBlock, BeaconState) { + async fn block_and_pre_state(self) -> (SignedBeaconBlock, BeaconState) { let harness = get_harness::( self.state_epoch.start_slot(E::slots_per_epoch()), VALIDATOR_COUNT, - ); + ) + .await; let mut state = harness.get_current_state(); (self.state_modifier)(&mut state); @@ -49,11 +50,12 @@ impl ExitTest { let validator_index = self.validator_index; let exit_epoch = self.exit_epoch; - let (signed_block, state) = - harness.make_block_with_modifier(state.clone(), state.slot() + 1, |block| { + let (signed_block, state) = harness + .make_block_with_modifier(state.clone(), state.slot() + 1, |block| { harness.add_voluntary_exit(block, validator_index, exit_epoch); block_modifier(&harness, block); - }); + }) + .await; (signed_block, state) } @@ -72,12 +74,12 @@ impl ExitTest { } #[cfg(all(test, not(debug_assertions)))] - fn run(self) -> BeaconState { + async fn run(self) -> BeaconState { let spec = &E::default_spec(); let expected = self.expected.clone(); assert_eq!(STATE_EPOCH, spec.shard_committee_period); - let (block, mut state) = self.block_and_pre_state(); + let (block, mut state) = self.block_and_pre_state().await; let result = Self::process(&block, &mut state); @@ -86,8 +88,8 @@ impl ExitTest { state } - fn test_vector(self, title: String) -> TestVector { - let (block, pre_state) = self.block_and_pre_state(); + async fn test_vector(self, title: String) -> TestVector { + let (block, pre_state) = self.block_and_pre_state().await; let mut post_state = pre_state.clone(); let (post_state, error) = match Self::process(&block, &mut post_state) { Ok(_) => (Some(post_state), None), @@ -334,14 +336,14 @@ mod custom_tests { ); } - #[test] - fn valid() { - let state = ExitTest::default().run(); + #[tokio::test] + async fn valid() { + let state = ExitTest::default().run().await; assert_exited(&state, VALIDATOR_INDEX as usize); } - #[test] - fn valid_three() { + #[tokio::test] + async fn valid_three() { let state = ExitTest { block_modifier: Box::new(|harness, block| { harness.add_voluntary_exit(block, 1, STATE_EPOCH); @@ -349,7 +351,8 @@ mod custom_tests { }), ..ExitTest::default() } - .run(); + .run() + .await; for i in &[VALIDATOR_INDEX, 1, 2] { assert_exited(&state, *i as usize); diff --git a/testing/state_transition_vectors/src/macros.rs b/testing/state_transition_vectors/src/macros.rs index 81f81718525..5dafbf549a0 100644 --- a/testing/state_transition_vectors/src/macros.rs +++ b/testing/state_transition_vectors/src/macros.rs @@ -4,11 +4,11 @@ /// - `mod tests`: runs all the test vectors locally. macro_rules! vectors_and_tests { ($($name: ident, $test: expr),*) => { - pub fn vectors() -> Vec { + pub async fn vectors() -> Vec { let mut vec = vec![]; $( - vec.push($test.test_vector(stringify!($name).into())); + vec.push($test.test_vector(stringify!($name).into()).await); )* vec @@ -18,9 +18,9 @@ macro_rules! vectors_and_tests { mod tests { use super::*; $( - #[test] - fn $name() { - $test.run(); + #[tokio::test] + async fn $name() { + $test.run().await; } )* } diff --git a/testing/state_transition_vectors/src/main.rs b/testing/state_transition_vectors/src/main.rs index d66842e5a10..3e7c37af543 100644 --- a/testing/state_transition_vectors/src/main.rs +++ b/testing/state_transition_vectors/src/main.rs @@ -25,8 +25,9 @@ pub const BASE_VECTOR_DIR: &str = "vectors"; pub const SLOT_OFFSET: u64 = 1; /// Writes all known test vectors to `CARGO_MANIFEST_DIR/vectors`. -fn main() { - match write_all_vectors() { +#[tokio::main] +async fn main() { + match write_all_vectors().await { Ok(()) => exit(0), Err(e) => { eprintln!("Error: {}", e); @@ -49,7 +50,7 @@ lazy_static! { static ref KEYPAIRS: Vec = generate_deterministic_keypairs(VALIDATOR_COUNT); } -fn get_harness( +async fn get_harness( slot: Slot, validator_count: usize, ) -> BeaconChainHarness> { @@ -61,23 +62,25 @@ fn get_harness( let skip_to_slot = slot - SLOT_OFFSET; if skip_to_slot > Slot::new(0) { let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - (skip_to_slot.as_u64()..slot.as_u64()) - .map(Slot::new) - .collect::>() - .as_slice(), - (0..validator_count).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + (skip_to_slot.as_u64()..slot.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..validator_count).collect::>().as_slice(), + ) + .await; } harness } /// Writes all vectors to file. -fn write_all_vectors() -> Result<(), String> { - write_vectors_to_file("exit", &exit::vectors()) +async fn write_all_vectors() -> Result<(), String> { + write_vectors_to_file("exit", &exit::vectors().await) } /// Writes a list of `vectors` to the `title` dir. From 66bb5c716ca0f419cd29a81fff9830c869a0224a Mon Sep 17 00:00:00 2001 From: Divma Date: Sun, 3 Jul 2022 05:36:51 +0000 Subject: [PATCH 11/15] Use latest tags for nethermind and geth in the execution engine integration test (#3303) ## Issue Addressed Currently the execution-engine-integration test uses latest master for nethermind and geth, and right now the test fails using the latest unreleased commits. ## Proposed Changes Fix the nethermind and geth revisions the test uses to the latest tag in each repo. This way we are not continuously testing unreleased code, which might even get reverted, and reduce the failures only to releases in each one. Also improve error handling of the commands used to manage the git repos. ## Additional Info na Co-authored-by: Michael Sproul --- .../src/build_utils.rs | 118 +++++++++++++----- .../execution_engine_integration/src/geth.rs | 19 ++- .../src/nethermind.rs | 21 ++-- 3 files changed, 102 insertions(+), 56 deletions(-) diff --git a/testing/execution_engine_integration/src/build_utils.rs b/testing/execution_engine_integration/src/build_utils.rs index 4d4a7bf1ce1..966a3bfb436 100644 --- a/testing/execution_engine_integration/src/build_utils.rs +++ b/testing/execution_engine_integration/src/build_utils.rs @@ -15,51 +15,101 @@ pub fn prepare_dir() -> PathBuf { execution_clients_dir } -pub fn clone_repo(repo_dir: &Path, repo_url: &str) -> bool { - Command::new("git") - .arg("clone") - .arg(repo_url) - .arg("--recursive") - .current_dir(repo_dir) - .output() - .unwrap_or_else(|_| panic!("failed to clone repo at {}", repo_url)) - .status - .success() +pub fn clone_repo(repo_dir: &Path, repo_url: &str) -> Result<(), String> { + output_to_result( + Command::new("git") + .arg("clone") + .arg(repo_url) + .arg("--recursive") + .current_dir(repo_dir) + .output() + .map_err(|_| format!("failed to clone repo at {repo_url}"))?, + |_| {}, + ) } -pub fn checkout_branch(repo_dir: &Path, branch_name: &str) -> bool { - Command::new("git") - .arg("checkout") - .arg(branch_name) - .current_dir(repo_dir) - .output() - .unwrap_or_else(|_| { - panic!( - "failed to checkout branch at {:?}/{}", - repo_dir, branch_name, - ) - }) - .status - .success() +pub fn checkout(repo_dir: &Path, revision_or_branch: &str) -> Result<(), String> { + output_to_result( + Command::new("git") + .arg("checkout") + .arg(revision_or_branch) + .current_dir(repo_dir) + .output() + .map_err(|_| { + format!( + "failed to checkout branch or revision at {repo_dir:?}/{revision_or_branch}", + ) + })?, + |_| {}, + ) } -pub fn update_branch(repo_dir: &Path, branch_name: &str) -> bool { - Command::new("git") - .arg("pull") - .current_dir(repo_dir) - .output() - .unwrap_or_else(|_| panic!("failed to update branch at {:?}/{}", repo_dir, branch_name)) - .status - .success() +/// Gets the last annotated tag of the given repo. +pub fn get_latest_release(repo_dir: &Path, branch_name: &str) -> Result { + // If the directory was already present it is possible we don't have the most recent tags. + // Fetch them + output_to_result( + Command::new("git") + .arg("fetch") + .arg("--tags") + .current_dir(repo_dir) + .output() + .map_err(|e| format!("Failed to fetch tags for {repo_dir:?}: Err: {e}"))?, + |_| {}, + )?; + output_to_result( + Command::new("git") + .arg("describe") + .arg(format!("origin/{branch_name}")) + .arg("--abbrev=0") + .arg("--tags") + .current_dir(repo_dir) + .output() + .map_err(|e| format!("Failed to get latest tag for {repo_dir:?}: Err: {e}"))?, + |stdout| { + let tag = String::from_utf8_lossy(&stdout); + tag.trim().to_string() + }, + ) } -pub fn check_command_output(output: Output, failure_msg: &'static str) { +#[allow(dead_code)] +pub fn update_branch(repo_dir: &Path, branch_name: &str) -> Result<(), String> { + output_to_result( + Command::new("git") + .arg("pull") + .current_dir(repo_dir) + .output() + .map_err(|_| format!("failed to update branch at {:?}/{}", repo_dir, branch_name))?, + |_| {}, + ) +} + +/// Checks the status of the [`std::process::Output`] and applies `f` to `stdout` if the process +/// succeedded. If not, builds a readable error containing stdout and stderr. +fn output_to_result(output: Output, f: OnSuccessFn) -> Result +where + OnSuccessFn: Fn(Vec) -> T, +{ + if !output.status.success() { + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + Err(format!("stderr: {stderr}\nstdout: {stdout}")) + } else { + Ok(f(output.stdout)) + } +} + +pub fn check_command_output(output: Output, failure_msg: F) +where + F: Fn() -> String, +{ if !output.status.success() { if !SUPPRESS_LOGS { dbg!(String::from_utf8_lossy(&output.stdout)); dbg!(String::from_utf8_lossy(&output.stderr)); } - panic!("{}", failure_msg); + panic!("{}", failure_msg()); } } diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 7a6a3803e66..129faea9076 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -23,20 +23,17 @@ pub fn build(execution_clients_dir: &Path) { if !repo_dir.exists() { // Clone the repo - assert!(build_utils::clone_repo( - execution_clients_dir, - GETH_REPO_URL - )); + build_utils::clone_repo(execution_clients_dir, GETH_REPO_URL).unwrap(); } - // Checkout the correct branch - assert!(build_utils::checkout_branch(&repo_dir, GETH_BRANCH)); - - // Update the branch - assert!(build_utils::update_branch(&repo_dir, GETH_BRANCH)); + // Get the latest tag on the branch + let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); + build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap(); // Build geth - build_utils::check_command_output(build_result(&repo_dir), "make failed"); + build_utils::check_command_output(build_result(&repo_dir), || { + format!("geth make failed using release {last_release}") + }); } /* @@ -75,7 +72,7 @@ impl GenericExecutionEngine for GethEngine { .output() .expect("failed to init geth"); - build_utils::check_command_output(output, "geth init failed"); + build_utils::check_command_output(output, || "geth init failed".into()); datadir } diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index be638fe0424..df345f36be2 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -25,24 +25,23 @@ pub fn build(execution_clients_dir: &Path) { if !repo_dir.exists() { // Clone the repo - assert!(build_utils::clone_repo( - execution_clients_dir, - NETHERMIND_REPO_URL - )); + build_utils::clone_repo(execution_clients_dir, NETHERMIND_REPO_URL).unwrap() } - // Checkout the correct branch - assert!(build_utils::checkout_branch(&repo_dir, NETHERMIND_BRANCH)); - - // Update the branch - assert!(build_utils::update_branch(&repo_dir, NETHERMIND_BRANCH)); + // Get the latest tag + let last_release = build_utils::get_latest_release(&repo_dir, NETHERMIND_BRANCH).unwrap(); + build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap(); // Build nethermind - build_utils::check_command_output(build_result(&repo_dir), "dotnet build failed"); + build_utils::check_command_output(build_result(&repo_dir), || { + format!("nethermind build failed using release {last_release}") + }); // Build nethermind a second time to enable Merge-related features. // Not sure why this is necessary. - build_utils::check_command_output(build_result(&repo_dir), "dotnet build failed"); + build_utils::check_command_output(build_result(&repo_dir), || { + format!("nethermind build failed using release {last_release}") + }); } /* From 61ed5f0ec626ea4fda52b98a58c94970eda96e89 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 4 Jul 2022 02:56:11 +0000 Subject: [PATCH 12/15] Optimize historic committee calculation for the HTTP API (#3272) ## Issue Addressed Closes https://github.com/sigp/lighthouse/issues/3270 ## Proposed Changes Optimize the calculation of historic beacon committees in the HTTP API. This is achieved by allowing committee caches to be constructed for historic epochs, and constructing these committee caches on the fly in the API. This is much faster than reconstructing the state at the requested epoch, which usually takes upwards of 20s, and sometimes minutes with SPRP=8192. The depth of the `randao_mixes` array allows us to look back 64K epochs/0.8 years from a single state, which is pretty awesome! We always use the `state_id` provided by the caller, but will return a nice 400 error if the epoch requested is out of range for the state requested, e.g. ```bash # Prater curl "http://localhost:5052/eth/v1/beacon/states/3170304/committees?epoch=33538" ``` ```json {"code":400,"message":"BAD_REQUEST: epoch out of bounds, try state at slot 1081344","stacktraces":[]} ``` Queries will be fastest when aligned to `slot % SPRP == 0`, so the hint suggests a slot that is 0 mod 8192. --- beacon_node/http_api/src/lib.rs | 47 +++++++++++------- consensus/types/src/beacon_state.rs | 7 +++ .../types/src/beacon_state/committee_cache.rs | 14 +++++- .../src/beacon_state/committee_cache/tests.rs | 48 +++++++++++++++---- 4 files changed, 88 insertions(+), 28 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index ff4d46efcb4..606dfb64dc4 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -657,26 +657,41 @@ pub fn serve( .and(warp::path::end()) .and_then( |state_id: StateId, chain: Arc>, query: api_types::CommitteesQuery| { - // the api spec says if the epoch is not present then the epoch of the state should be used - let query_state_id = query.epoch.map_or(state_id, |epoch| { - StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) - }); - blocking_json_task(move || { - query_state_id.map_state(&chain, |state| { - let epoch = state.slot().epoch(T::EthSpec::slots_per_epoch()); + state_id.map_state(&chain, |state| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); - let committee_cache = if state - .committee_cache_is_initialized(RelativeEpoch::Current) + let committee_cache = match RelativeEpoch::from_epoch(current_epoch, epoch) { - state - .committee_cache(RelativeEpoch::Current) - .map(Cow::Borrowed) - } else { - CommitteeCache::initialized(state, epoch, &chain.spec).map(Cow::Owned) + Ok(relative_epoch) + if state.committee_cache_is_initialized(relative_epoch) => + { + state.committee_cache(relative_epoch).map(Cow::Borrowed) + } + _ => CommitteeCache::initialized(state, epoch, &chain.spec) + .map(Cow::Owned), } - .map_err(BeaconChainError::BeaconStateError) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(|e| match e { + BeaconStateError::EpochOutOfBounds => { + let max_sprp = T::EthSpec::slots_per_historical_root() as u64; + let first_subsequent_restore_point_slot = + ((epoch.start_slot(T::EthSpec::slots_per_epoch()) / max_sprp) + + 1) + * max_sprp; + if epoch < current_epoch { + warp_utils::reject::custom_bad_request(format!( + "epoch out of bounds, try state at slot {}", + first_subsequent_restore_point_slot, + )) + } else { + warp_utils::reject::custom_bad_request( + "epoch out of bounds, too far in future".into(), + ) + } + } + _ => warp_utils::reject::beacon_chain_error(e.into()), + })?; // Use either the supplied slot or all slots in the epoch. let slots = query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 66656d35894..fca200312f1 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -963,6 +963,13 @@ impl BeaconState { } } + /// Return the minimum epoch for which `get_randao_mix` will return a non-error value. + pub fn min_randao_epoch(&self) -> Epoch { + self.current_epoch() + .saturating_add(1u64) + .saturating_sub(T::EpochsPerHistoricalVector::to_u64()) + } + /// XOR-assigns the existing `epoch` randao mix with the hash of the `signature`. /// /// # Errors: diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 8a87cddac82..7a526acc583 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -38,8 +38,18 @@ impl CommitteeCache { epoch: Epoch, spec: &ChainSpec, ) -> Result { - RelativeEpoch::from_epoch(state.current_epoch(), epoch) - .map_err(|_| Error::EpochOutOfBounds)?; + // Check that the cache is being built for an in-range epoch. + // + // We allow caches to be constructed for historic epochs, per: + // + // https://github.com/sigp/lighthouse/issues/3270 + let reqd_randao_epoch = epoch + .saturating_sub(spec.min_seed_lookahead) + .saturating_sub(1u64); + + if reqd_randao_epoch < state.min_randao_epoch() || epoch > state.current_epoch() + 1 { + return Err(Error::EpochOutOfBounds); + } // May cause divide-by-zero errors. if T::slots_per_epoch() == 0 { diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/src/beacon_state/committee_cache/tests.rs index db431138aac..11cc6095da8 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/src/beacon_state/committee_cache/tests.rs @@ -42,7 +42,7 @@ async fn new_state(validator_count: usize, slot: Slot) -> BeaconStat .add_attested_blocks_at_slots( head_state, Hash256::zero(), - (1..slot.as_u64()) + (1..=slot.as_u64()) .map(Slot::new) .collect::>() .as_slice(), @@ -86,6 +86,8 @@ async fn shuffles_for_the_right_epoch() { let mut state = new_state::(num_validators, slot).await; let spec = &MinimalEthSpec::default_spec(); + assert_eq!(state.current_epoch(), epoch); + let distinct_hashes: Vec = (0..MinimalEthSpec::epochs_per_historical_vector()) .map(|i| Hash256::from_low_u64_be(i as u64)) .collect(); @@ -124,15 +126,41 @@ async fn shuffles_for_the_right_epoch() { } }; - let cache = CommitteeCache::initialized(&state, state.current_epoch(), spec).unwrap(); - assert_eq!(cache.shuffling(), shuffling_with_seed(current_seed)); - assert_shuffling_positions_accurate(&cache); + // We can initialize the committee cache at recent epochs in the past, and one epoch into the + // future. + for e in (0..=epoch.as_u64() + 1).map(Epoch::new) { + let seed = state.get_seed(e, Domain::BeaconAttester, spec).unwrap(); + let cache = CommitteeCache::initialized(&state, e, spec) + .unwrap_or_else(|_| panic!("failed at epoch {}", e)); + assert_eq!(cache.shuffling(), shuffling_with_seed(seed)); + assert_shuffling_positions_accurate(&cache); + } - let cache = CommitteeCache::initialized(&state, state.previous_epoch(), spec).unwrap(); - assert_eq!(cache.shuffling(), shuffling_with_seed(previous_seed)); - assert_shuffling_positions_accurate(&cache); + // We should *not* be able to build a committee cache for the epoch after the next epoch. + assert_eq!( + CommitteeCache::initialized(&state, epoch + 2, spec), + Err(BeaconStateError::EpochOutOfBounds) + ); +} - let cache = CommitteeCache::initialized(&state, state.next_epoch().unwrap(), spec).unwrap(); - assert_eq!(cache.shuffling(), shuffling_with_seed(next_seed)); - assert_shuffling_positions_accurate(&cache); +#[tokio::test] +async fn min_randao_epoch_correct() { + let num_validators = MinimalEthSpec::minimum_validator_count() * 2; + let current_epoch = Epoch::new(MinimalEthSpec::epochs_per_historical_vector() as u64 * 2); + + let mut state = new_state::( + num_validators, + Epoch::new(1).start_slot(MinimalEthSpec::slots_per_epoch()), + ) + .await; + + // Override the epoch so that there's some room to move. + *state.slot_mut() = current_epoch.start_slot(MinimalEthSpec::slots_per_epoch()); + assert_eq!(state.current_epoch(), current_epoch); + + // The min_randao_epoch should be the minimum epoch such that `get_randao_mix` returns `Ok`. + let min_randao_epoch = state.min_randao_epoch(); + state.get_randao_mix(min_randao_epoch).unwrap(); + state.get_randao_mix(min_randao_epoch - 1).unwrap_err(); + state.get_randao_mix(min_randao_epoch + 1).unwrap(); } From 1219da9a45b531c064a449e2d05fd926111c623b Mon Sep 17 00:00:00 2001 From: Divma Date: Mon, 4 Jul 2022 02:56:13 +0000 Subject: [PATCH 13/15] Simplify error handling after engines fallback removal (#3283) ## Issue Addressed Part of #3118, continuation of #3257 ## Proposed Changes - the [ `first_success_without_retry` ](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/engines.rs#L348-L351) function returns a single error. - the [`first_success`](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/engines.rs#L324) function returns a single error. - [ `EngineErrors` ](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/lib.rs#L69) carries a single error. - [`EngineError`](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/engines.rs#L173-L177) now does not need to carry an Id - [`process_multiple_payload_statuses`](https://github.com/sigp/lighthouse/blob/9c429d0764ed91cf56efb8a47a35a556b54a86a4/beacon_node/execution_layer/src/payload_status.rs#L46-L50) now doesn't need to receive an iterator of statuses and weight in different errors ## Additional Info This is built on top of #3294 --- beacon_node/execution_layer/src/engines.rs | 72 ++--- beacon_node/execution_layer/src/lib.rs | 62 ++--- .../execution_layer/src/payload_status.rs | 254 +++++++----------- 3 files changed, 147 insertions(+), 241 deletions(-) diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 88c94162f82..34eef8a3fb7 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -57,7 +57,6 @@ struct PayloadIdCacheKey { /// An execution engine. pub struct Engine { - pub id: String, pub api: HttpJsonRpc, payload_id_cache: Mutex>, state: RwLock, @@ -65,9 +64,8 @@ pub struct Engine { impl Engine { /// Creates a new, offline engine. - pub fn new(id: String, api: HttpJsonRpc) -> Self { + pub fn new(api: HttpJsonRpc) -> Self { Self { - id, api, payload_id_cache: Mutex::new(LruCache::new(PAYLOAD_ID_LRU_CACHE_SIZE)), state: RwLock::new(EngineState::Offline), @@ -135,10 +133,10 @@ pub struct Engines { #[derive(Debug)] pub enum EngineError { - Offline { id: String }, - Api { id: String, error: EngineApiError }, + Offline, + Api { error: EngineApiError }, BuilderApi { error: EngineApiError }, - Auth { id: String }, + Auth, } impl Engines { @@ -159,7 +157,6 @@ impl Engines { self.log, "No need to call forkchoiceUpdated"; "msg" => "head does not have execution enabled", - "id" => &self.engine.id, ); return; } @@ -168,7 +165,6 @@ impl Engines { self.log, "Issuing forkchoiceUpdated"; "forkchoice_state" => ?forkchoice_state, - "id" => &self.engine.id, ); // For simplicity, payload attributes are never included in this call. It may be @@ -183,14 +179,12 @@ impl Engines { self.log, "Failed to issue latest head to engine"; "error" => ?e, - "id" => &self.engine.id, ); } } else { debug!( self.log, "No head, not sending to engine"; - "id" => &self.engine.id, ); } } @@ -261,45 +255,36 @@ impl Engines { } } - /// Run `func` on all engines, in the order in which they are defined, returning the first - /// successful result that is found. + /// Run `func` on the node. /// - /// This function might try to run `func` twice. If all nodes return an error on the first time - /// it runs, it will try to upcheck all offline nodes and then run the function again. - pub async fn first_success<'a, F, G, H>(&'a self, func: F) -> Result> + /// This function might try to run `func` twice. If the node returns an error it will try to + /// upcheck it and then run the function again. + pub async fn first_success<'a, F, G, H>(&'a self, func: F) -> Result where F: Fn(&'a Engine) -> G + Copy, G: Future>, { match self.first_success_without_retry(func).await { Ok(result) => Ok(result), - Err(mut first_errors) => { - // Try to recover some nodes. + Err(e) => { + debug!(self.log, "First engine call failed. Retrying"; "err" => ?e); + // Try to recover the node. self.upcheck_not_synced(Logging::Enabled).await; - // Retry the call on all nodes. - match self.first_success_without_retry(func).await { - Ok(result) => Ok(result), - Err(second_errors) => { - first_errors.extend(second_errors); - Err(first_errors) - } - } + // Try again. + self.first_success_without_retry(func).await } } } - /// Run `func` on all engines, in the order in which they are defined, returning the first - /// successful result that is found. + /// Run `func` on the node. pub async fn first_success_without_retry<'a, F, G, H>( &'a self, func: F, - ) -> Result> + ) -> Result where F: Fn(&'a Engine) -> G, G: Future>, { - let mut errors = vec![]; - let (engine_synced, engine_auth_failed) = { let state = self.engine.state.read().await; ( @@ -309,32 +294,22 @@ impl Engines { }; if engine_synced { match func(&self.engine).await { - Ok(result) => return Ok(result), + Ok(result) => Ok(result), Err(error) => { debug!( self.log, "Execution engine call failed"; "error" => ?error, - "id" => &&self.engine.id ); *self.engine.state.write().await = EngineState::Offline; - errors.push(EngineError::Api { - id: self.engine.id.clone(), - error, - }) + Err(EngineError::Api { error }) } } } else if engine_auth_failed { - errors.push(EngineError::Auth { - id: self.engine.id.clone(), - }) + Err(EngineError::Auth) } else { - errors.push(EngineError::Offline { - id: self.engine.id.clone(), - }) + Err(EngineError::Offline) } - - Err(errors) } /// Runs `func` on the node. @@ -363,9 +338,7 @@ impl Engines { { let func = &func; if *self.engine.state.read().await == EngineState::Offline { - Err(EngineError::Offline { - id: self.engine.id.clone(), - }) + Err(EngineError::Offline) } else { match func(&self.engine).await { Ok(res) => Ok(res), @@ -376,10 +349,7 @@ impl Engines { "error" => ?error, ); *self.engine.state.write().await = EngineState::Offline; - Err(EngineError::Api { - id: self.engine.id.clone(), - error, - }) + Err(EngineError::Api { error }) } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 61f1c569d43..8897f8f67af 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -12,7 +12,7 @@ pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; pub use engines::ForkChoiceState; use engines::{Engine, EngineError, Engines, Logging}; use lru::LruCache; -use payload_status::process_multiple_payload_statuses; +use payload_status::process_payload_status; pub use payload_status::PayloadStatus; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; @@ -68,11 +68,10 @@ pub enum Error { NoPayloadBuilder, ApiError(ApiError), Builder(builder_client::Error), - EngineErrors(Vec), + EngineError(Box), NotSynced, ShuttingDown, FeeRecipientUnspecified, - ConsensusFailure, MissingLatestValidHash, InvalidJWTSecret(String), } @@ -200,12 +199,11 @@ impl ExecutionLayer { }?; let engine: Engine = { - let id = execution_url.to_string(); let auth = Auth::new(jwt_key, jwt_id, jwt_version); - debug!(log, "Loaded execution endpoint"; "endpoint" => %id, "jwt_path" => ?secret_file.as_path()); + debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path()); let api = HttpJsonRpc::::new_with_auth(execution_url, auth) .map_err(Error::ApiError)?; - Engine::::new(id, api) + Engine::::new(api) }; let builder = builder_url @@ -709,7 +707,8 @@ impl ExecutionLayer { }) }) .await - .map_err(Error::EngineErrors) + .map_err(Box::new) + .map_err(Error::EngineError) } /// Maps to the `engine_newPayload` JSON-RPC call. @@ -742,16 +741,14 @@ impl ExecutionLayer { "block_number" => execution_payload.block_number, ); - let broadcast_results = self + let broadcast_result = self .engines() .broadcast(|engine| engine.api.new_payload_v1(execution_payload.clone())) .await; - process_multiple_payload_statuses( - execution_payload.block_hash, - Some(broadcast_results).into_iter(), - self.log(), - ) + process_payload_status(execution_payload.block_hash, broadcast_result, self.log()) + .map_err(Box::new) + .map_err(Error::EngineError) } /// Register that the given `validator_index` is going to produce a block at `slot`. @@ -879,7 +876,7 @@ impl ExecutionLayer { .set_latest_forkchoice_state(forkchoice_state) .await; - let broadcast_results = self + let broadcast_result = self .engines() .broadcast(|engine| async move { engine @@ -888,13 +885,13 @@ impl ExecutionLayer { }) .await; - process_multiple_payload_statuses( + process_payload_status( head_block_hash, - Some(broadcast_results) - .into_iter() - .map(|result| result.map(|response| response.payload_status)), + broadcast_result.map(|response| response.payload_status), self.log(), ) + .map_err(Box::new) + .map_err(Error::EngineError) } pub async fn exchange_transition_configuration(&self, spec: &ChainSpec) -> Result<(), Error> { @@ -909,9 +906,6 @@ impl ExecutionLayer { .broadcast(|engine| engine.api.exchange_transition_configuration_v1(local)) .await; - let mut errors = vec![]; - // Having no fallbacks, the id of the used node is 0 - let i = 0usize; match broadcast_result { Ok(remote) => { if local.terminal_total_difficulty != remote.terminal_total_difficulty @@ -922,20 +916,18 @@ impl ExecutionLayer { "Execution client config mismatch"; "msg" => "ensure lighthouse and the execution client are up-to-date and \ configured consistently", - "execution_endpoint" => i, "remote" => ?remote, "local" => ?local, ); - errors.push(EngineError::Api { - id: i.to_string(), + Err(Error::EngineError(Box::new(EngineError::Api { error: ApiError::TransitionConfigurationMismatch, - }); + }))) } else { debug!( self.log(), "Execution client config is OK"; - "execution_endpoint" => i ); + Ok(()) } } Err(e) => { @@ -943,17 +935,10 @@ impl ExecutionLayer { self.log(), "Unable to get transition config"; "error" => ?e, - "execution_endpoint" => i, ); - errors.push(e); + Err(Error::EngineError(Box::new(e))) } } - - if errors.is_empty() { - Ok(()) - } else { - Err(Error::EngineErrors(errors)) - } } /// Used during block production to determine if the merge has been triggered. @@ -992,7 +977,8 @@ impl ExecutionLayer { .await }) .await - .map_err(Error::EngineErrors)?; + .map_err(Box::new) + .map_err(Error::EngineError)?; if let Some(hash) = &hash_opt { info!( @@ -1102,7 +1088,8 @@ impl ExecutionLayer { Ok(None) }) .await - .map_err(|e| Error::EngineErrors(vec![e])) + .map_err(Box::new) + .map_err(Error::EngineError) } /// This function should remain internal. @@ -1160,7 +1147,8 @@ impl ExecutionLayer { .await }) .await - .map_err(Error::EngineErrors) + .map_err(Box::new) + .map_err(Error::EngineError) } async fn get_payload_by_block_hash_from_engine( diff --git a/beacon_node/execution_layer/src/payload_status.rs b/beacon_node/execution_layer/src/payload_status.rs index e0b1a01b43a..46917a0aa50 100644 --- a/beacon_node/execution_layer/src/payload_status.rs +++ b/beacon_node/execution_layer/src/payload_status.rs @@ -1,7 +1,6 @@ use crate::engine_api::{Error as ApiError, PayloadStatusV1, PayloadStatusV1Status}; use crate::engines::EngineError; -use crate::Error; -use slog::{crit, warn, Logger}; +use slog::{warn, Logger}; use types::ExecutionBlockHash; /// Provides a simpler, easier to parse version of `PayloadStatusV1` for upstream users. @@ -24,168 +23,117 @@ pub enum PayloadStatus { }, } -/// Processes the responses from multiple execution engines, finding the "best" status and returning -/// it (if any). -/// -/// This function has the following basic goals: -/// -/// - Detect a consensus failure between nodes. -/// - Find the most-synced node by preferring a definite response (valid/invalid) over a -/// syncing/accepted response or error. -/// -/// # Details -/// -/// - If there are conflicting valid/invalid responses, always return an error. -/// - If there are syncing/accepted responses but valid/invalid responses exist, return the -/// valid/invalid responses since they're definite. -/// - If there are multiple valid responses, return the first one processed. -/// - If there are multiple invalid responses, return the first one processed. -/// - Syncing/accepted responses are grouped, if there are multiple of them, return the first one -/// processed. -/// - If there are no responses (only errors or nothing), return an error. -pub fn process_multiple_payload_statuses( +/// Processes the response from the execution engine. +pub fn process_payload_status( head_block_hash: ExecutionBlockHash, - statuses: impl Iterator>, + status: Result, log: &Logger, -) -> Result { - let mut errors = vec![]; - let mut valid_statuses = vec![]; - let mut invalid_statuses = vec![]; - let mut other_statuses = vec![]; - - for status in statuses { - match status { - Err(e) => errors.push(e), - Ok(response) => match &response.status { - PayloadStatusV1Status::Valid => { - if response - .latest_valid_hash - .map_or(false, |h| h == head_block_hash) - { - // The response is only valid if `latest_valid_hash` is not `null` and - // equal to the provided `block_hash`. - valid_statuses.push(PayloadStatus::Valid) - } else { - errors.push(EngineError::Api { - id: "unknown".to_string(), - error: ApiError::BadResponse( - format!( - "new_payload: response.status = VALID but invalid latest_valid_hash. Expected({:?}) Found({:?})", - head_block_hash, - response.latest_valid_hash, - ) - ), - }); - } - } - PayloadStatusV1Status::Invalid => { - if let Some(latest_valid_hash) = response.latest_valid_hash { - // The response is only valid if `latest_valid_hash` is not `null`. - invalid_statuses.push(PayloadStatus::Invalid { - latest_valid_hash, - validation_error: response.validation_error.clone(), - }) - } else { - errors.push(EngineError::Api { - id: "unknown".to_string(), - error: ApiError::BadResponse( - "new_payload: response.status = INVALID but null latest_valid_hash" - .to_string(), - ), - }); - } +) -> Result { + match status { + Err(error) => { + warn!( + log, + "Error whilst processing payload status"; + "error" => ?error, + ); + Err(error) + } + Ok(response) => match &response.status { + PayloadStatusV1Status::Valid => { + if response + .latest_valid_hash + .map_or(false, |h| h == head_block_hash) + { + // The response is only valid if `latest_valid_hash` is not `null` and + // equal to the provided `block_hash`. + Ok(PayloadStatus::Valid) + } else { + let error = format!( + "new_payload: response.status = VALID but invalid latest_valid_hash. Expected({:?}) Found({:?})", + head_block_hash, + response.latest_valid_hash + ); + Err(EngineError::Api { + error: ApiError::BadResponse(error), + }) } - PayloadStatusV1Status::InvalidBlockHash => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - - invalid_statuses.push(PayloadStatus::InvalidBlockHash { + } + PayloadStatusV1Status::Invalid => { + if let Some(latest_valid_hash) = response.latest_valid_hash { + // The response is only valid if `latest_valid_hash` is not `null`. + Ok(PayloadStatus::Invalid { + latest_valid_hash, validation_error: response.validation_error.clone(), - }); + }) + } else { + Err(EngineError::Api { + error: ApiError::BadResponse( + "new_payload: response.status = INVALID but null latest_valid_hash" + .to_string(), + ), + }) } - PayloadStatusV1Status::InvalidTerminalBlock => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - - invalid_statuses.push(PayloadStatus::InvalidTerminalBlock { - validation_error: response.validation_error.clone(), - }); + } + PayloadStatusV1Status::InvalidBlockHash => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) } - PayloadStatusV1Status::Syncing => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - other_statuses.push(PayloadStatus::Syncing) + Ok(PayloadStatus::InvalidBlockHash { + validation_error: response.validation_error.clone(), + }) + } + PayloadStatusV1Status::InvalidTerminalBlock => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) } - PayloadStatusV1Status::Accepted => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - other_statuses.push(PayloadStatus::Accepted) + Ok(PayloadStatus::InvalidTerminalBlock { + validation_error: response.validation_error.clone(), + }) + } + PayloadStatusV1Status::Syncing => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) } - }, - } - } - if !valid_statuses.is_empty() && !invalid_statuses.is_empty() { - crit!( - log, - "Consensus failure between execution nodes"; - "invalid_statuses" => ?invalid_statuses, - "valid_statuses" => ?valid_statuses, - ); - - // Choose to exit and ignore the valid response. This preferences correctness over - // liveness. - return Err(Error::ConsensusFailure); - } + Ok(PayloadStatus::Syncing) + } + PayloadStatusV1Status::Accepted => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } - // Log any errors to assist with troubleshooting. - for error in &errors { - warn!( - log, - "Error whilst processing payload status"; - "error" => ?error, - ); + Ok(PayloadStatus::Accepted) + } + }, } - - valid_statuses - .first() - .or_else(|| invalid_statuses.first()) - .or_else(|| other_statuses.first()) - .cloned() - .map(Result::Ok) - .unwrap_or_else(|| Err(Error::EngineErrors(errors))) } From 1cc8a97d4eb6807ff7d83f4c181f3c0333c3f0cb Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Mon, 4 Jul 2022 02:56:14 +0000 Subject: [PATCH 14/15] Remove unused method in HandlerNetworkContext (#3299) ## Issue Addressed N/A ## Proposed Changes Removed unused method in `HandlerNetworkContext`. --- beacon_node/network/src/router/processor.rs | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 9d86c3e55a6..ce11cbdcef3 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -409,22 +409,6 @@ impl HandlerNetworkContext { response, }) } - - /// Sends an error response to the network task. - pub fn _send_error_response( - &mut self, - peer_id: PeerId, - id: PeerRequestId, - error: RPCResponseErrorCode, - reason: String, - ) { - self.inform_network(NetworkMessage::SendErrorResponse { - peer_id, - error, - id, - reason, - }) - } } fn timestamp_now() -> Duration { From 748475be1ded2c9a29cbd8b985f6ae2980720a36 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 4 Jul 2022 02:56:15 +0000 Subject: [PATCH 15/15] Ensure caches are built for block_rewards POST API (#3305) ## Issue Addressed Follow up to https://github.com/sigp/lighthouse/pull/3290 that fixes a caching bug ## Proposed Changes Build the committee cache for the new `POST /lighthouse/analysis/block_rewards` API. Due to an unusual quirk of the total active balance cache the API endpoint would sometimes fail after loading a state from disk which had a current epoch cache _but not_ a total active balance cache. This PR adds calls to build the caches immediately before they're required, and has been running smoothly with `blockdreamer` the last few days. --- beacon_node/http_api/src/block_rewards.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 05550372101..682828aee48 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -56,6 +56,8 @@ pub fn get_block_rewards( let block_replayer = BlockReplayer::new(state, &chain.spec) .pre_block_hook(Box::new(|state, block| { + state.build_all_committee_caches(&chain.spec)?; + // Compute block reward. let block_reward = chain.compute_block_reward( block.message(), @@ -154,8 +156,13 @@ pub fn compute_block_rewards( ); } + let mut state = block_replayer.into_state(); + state + .build_all_committee_caches(&chain.spec) + .map_err(beacon_state_error)?; + state_cache - .get_or_insert((parent_root, block.slot()), || block_replayer.into_state()) + .get_or_insert((parent_root, block.slot()), || state) .ok_or_else(|| { custom_server_error("LRU cache insert should always succeed".into()) })?