From 6621e1d0c5fc8a310bec51a19a7a721f9fb91bb9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 19 Jun 2023 23:53:25 +0000 Subject: [PATCH 01/15] Improve ENR logic for ipv6 (#4395) Currently, the ENR of the node may not be correctly updated when specifying ipv6 fields through the CLI if an ENR exists on disk. This remedies a bug where we were not checking for ipv6 fields when comparing whether to use an on-disk ENR or updating based on CLI configuration parameters. --- beacon_node/lighthouse_network/src/discovery/enr.rs | 6 +++++- boot_node/src/cli.rs | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 938e7cfa257..f85c4b3e5cb 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -213,13 +213,17 @@ pub fn build_enr( fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { // take preference over disk_enr address if one is not specified (local_enr.ip4().is_none() || local_enr.ip4() == disk_enr.ip4()) + && + (local_enr.ip6().is_none() || local_enr.ip6() == disk_enr.ip6()) // tcp ports must match && local_enr.tcp4() == disk_enr.tcp4() + && local_enr.tcp6() == disk_enr.tcp6() // must match on the same fork && local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY) // take preference over disk udp port if one is not specified && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) - // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match, + && (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6()) + // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match, // otherwise we use a new ENR. This will likely only be true for non-validating nodes && local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY) && local_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) diff --git a/boot_node/src/cli.rs b/boot_node/src/cli.rs index b13f47f7524..d7ea5ab0b35 100644 --- a/boot_node/src/cli.rs +++ b/boot_node/src/cli.rs @@ -102,7 +102,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("network-dir") .value_name("NETWORK_DIR") .long("network-dir") - .help("The directory which contains the enr and it's assoicated private key") + .help("The directory which contains the enr and it's associated private key") .takes_value(true) ) } From c76afc6630740216aa8f73eecd2e56f9a833719c Mon Sep 17 00:00:00 2001 From: Mac L Date: Tue, 20 Jun 2023 05:20:36 +0000 Subject: [PATCH 02/15] Remove legacy `max-skip-slots` checks (#4403) ## Proposed Changes Remove `max-skip-slots` checks when processing blocks. This was legacy code which was previously used in the Medalla testnet to sync to the correct fork. With the addition of checkpoint sync which allows us to sync to any arbitrary fork, this is no longer a necessary feature, so it has been removed for simplicity. ## Additional Notes The CLI flag and checks for attestation processing have been retained as it still may have uses in DoS protection. --- .../beacon_chain/src/block_verification.rs | 35 ------------------- beacon_node/beacon_chain/src/chain_config.rs | 3 +- .../beacon_processor/worker/gossip_methods.rs | 1 - beacon_node/src/cli.rs | 2 +- 4 files changed, 2 insertions(+), 39 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index dba38af9bdd..3cb8fbdb524 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -141,8 +141,6 @@ pub enum BlockError { /// It's unclear if this block is valid, but it cannot be processed without already knowing /// its parent. ParentUnknown(Arc>), - /// The block skips too many slots and is a DoS risk. - TooManySkippedSlots { parent_slot: Slot, block_slot: Slot }, /// The block slot is greater than the present slot. /// /// ## Peer scoring @@ -786,9 +784,6 @@ impl GossipVerifiedBlock { parent_block.root }; - // Reject any block that exceeds our limit on skipped slots. - check_block_skip_slots(chain, parent_block.slot, block.message())?; - // We assign to a variable instead of using `if let Some` directly to ensure we drop the // write lock before trying to acquire it again in the `else` clause. let proposer_opt = chain @@ -942,9 +937,6 @@ impl SignatureVerifiedBlock { let (mut parent, block) = load_parent(block_root, block, chain)?; - // Reject any block that exceeds our limit on skipped slots. - check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?; - let state = cheap_state_advance_to_obtain_committees( &mut parent.pre_state, parent.beacon_state_root, @@ -1135,9 +1127,6 @@ impl ExecutionPendingBlock { return Err(BlockError::ParentUnknown(block)); } - // Reject any block that exceeds our limit on skipped slots. - check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?; - /* * Perform cursory checks to see if the block is even worth processing. */ @@ -1492,30 +1481,6 @@ impl ExecutionPendingBlock { } } -/// Check that the count of skip slots between the block and its parent does not exceed our maximum -/// value. -/// -/// Whilst this is not part of the specification, we include this to help prevent us from DoS -/// attacks. In times of dire network circumstance, the user can configure the -/// `import_max_skip_slots` value. -fn check_block_skip_slots( - chain: &BeaconChain, - parent_slot: Slot, - block: BeaconBlockRef<'_, T::EthSpec>, -) -> Result<(), BlockError> { - // Reject any block that exceeds our limit on skipped slots. - if let Some(max_skip_slots) = chain.config.import_max_skip_slots { - if block.slot() > parent_slot + max_skip_slots { - return Err(BlockError::TooManySkippedSlots { - parent_slot, - block_slot: block.slot(), - }); - } - } - - Ok(()) -} - /// Returns `Ok(())` if the block's slot is greater than the anchor block's slot (if any). fn check_block_against_anchor_slot( block: BeaconBlockRef<'_, T::EthSpec>, diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index a74fdced1f6..34a5c9a4ec9 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -17,8 +17,7 @@ pub const FORK_CHOICE_LOOKAHEAD_FACTOR: u32 = 24; #[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)] pub struct ChainConfig { - /// Maximum number of slots to skip when importing a consensus message (e.g., block, - /// attestation, etc). + /// Maximum number of slots to skip when importing an attestation. /// /// If `None`, there is no limit. pub import_max_skip_slots: Option, diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 121a27fecfb..e3cff00103b 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -835,7 +835,6 @@ impl Worker { | Err(e @ BlockError::NonLinearParentRoots) | Err(e @ BlockError::BlockIsNotLaterThanParent { .. }) | Err(e @ BlockError::InvalidSignature) - | Err(e @ BlockError::TooManySkippedSlots { .. }) | Err(e @ BlockError::WeakSubjectivityConflict) | Err(e @ BlockError::InconsistentFork(_)) | Err(e @ BlockError::ExecutionPayloadError(_)) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index e763d93f82a..206cd3c72f3 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -685,7 +685,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("max-skip-slots") .long("max-skip-slots") .help( - "Refuse to skip more than this many slots when processing a block or attestation. \ + "Refuse to skip more than this many slots when processing an attestation. \ This prevents nodes on minority forks from wasting our time and disk space, \ but could also cause unnecessary consensus failures, so is disabled by default." ) From bd6a015fe73f37c77f365d5baa4d955ff6e61f48 Mon Sep 17 00:00:00 2001 From: chonghe Date: Tue, 20 Jun 2023 05:20:37 +0000 Subject: [PATCH 03/15] Update Lighthouse book on Doppelganger Protection (#4418) Revise the page by removing the info on sync committee delay. Also added an faq on changing the port. --- book/src/faq.md | 10 +++++++--- book/src/validator-doppelganger.md | 5 ++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/book/src/faq.md b/book/src/faq.md index 5651f108a29..d3e25438a79 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -25,10 +25,11 @@ ## [Network, Monitoring and Maintenance](#network-monitoring-and-maintenance-1) - [I have a low peer count and it is not increasing](#net-peer) - [How do I update lighthouse?](#net-update) -- [Do I need to set up any port mappings (port forwarding)?](#net-port) +- [Do I need to set up any port mappings (port forwarding)?](#net-port-forwarding) - [How can I monitor my validators?](#net-monitor) - [My beacon node and validator client are on different servers. How can I point the validator client to the beacon node?](#net-bn-vc) - [Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address?](#net-ip) +- [How to change the TCP/UDP port 9000 that Lighthouse listens on?](#net-port) ## [Miscellaneous](#miscellaneous-1) @@ -360,7 +361,7 @@ $ docker pull sigp/lighthouse:v1.0.0 If you are building a docker image, the process will be similar to the one described [here.](./docker.md#building-the-docker-image) You just need to make sure the code you have checked out is up to date. -### Do I need to set up any port mappings (port forwarding)? +### Do I need to set up any port mappings (port forwarding)? It is not strictly required to open any ports for Lighthouse to connect and participate in the network. Lighthouse should work out-of-the-box. However, if @@ -386,7 +387,7 @@ For these reasons, we recommend that you make your node publicly accessible. Lighthouse supports UPnP. If you are behind a NAT with a router that supports UPnP, you can simply ensure UPnP is enabled (Lighthouse will inform you in its -initial logs if a route has been established). You can also manually [set up port mappings/port forwarding](./advanced_networking.md/#how-to-open-ports) in your router to your local Lighthouse instance. By default, +initial logs if a route has been established). You can also manually [set up port mappings/port forwarding](./advanced_networking.md#how-to-open-ports) in your router to your local Lighthouse instance. By default, Lighthouse uses port 9000 for both TCP and UDP. Opening both these ports will make your Lighthouse node maximally contactable. @@ -421,6 +422,9 @@ The settings are as follows: ### Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address? No. Lighthouse will auto-detect the change and update your Ethereum Node Record (ENR). You just need to make sure you are not manually setting the ENR with `--enr-address` (which, for common use cases, this flag is not used). +### How to change the TCP/UDP port 9000 that Lighthouse listens on? +Use the flag ```--port ``` in the beacon node. This flag can be useful when you are running two beacon nodes at the same time. You can leave one beacon node as the default port 9000, and configure the second beacon node to listen on, e.g., ```--port 9001```. + ## Miscellaneous ### What should I do if I lose my slashing protection database? diff --git a/book/src/validator-doppelganger.md b/book/src/validator-doppelganger.md index 6eaddcc7b0b..7ce2868e9b0 100644 --- a/book/src/validator-doppelganger.md +++ b/book/src/validator-doppelganger.md @@ -43,13 +43,12 @@ DP works by staying silent on the network for 2-3 epochs before starting to sign Staying silent and refusing to sign messages will cause the following: - 2-3 missed attestations, incurring penalties and missed rewards. -- 2-3 epochs of missed sync committee contributions (if the validator is in a sync committee, which is unlikely), incurring penalties and missed rewards. - Potentially missed rewards by missing a block proposal (if the validator is an elected block proposer, which is unlikely). The loss of rewards and penalties incurred due to the missed duties will be very small in -dollar-values. Generally, they will equate to around one US dollar (at August 2021 figures) or about -2% of the reward for one validator for one day. Since DP costs so little but can protect a user from +dollar-values. Neglecting block proposals, generally they will equate to around 0.00002 ETH (equivalent to USD 0.04 assuming ETH is trading at USD 2000), or less than +1% of the reward for one validator for one day. Since DP costs so little but can protect a user from slashing, many users will consider this a worthwhile trade-off. The 2-3 epochs of missed duties will be incurred whenever the VC is started (e.g., after an update From 3cac6d9ed50d14e9aec0d90cc0fb1bc484c10605 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 22 Jun 2023 02:14:56 +0000 Subject: [PATCH 04/15] Configure the `validator/register_validator` batch size via the CLI (#4399) ## Issue Addressed NA ## Proposed Changes Adds the `--validator-registration-batch-size` flag to the VC to allow runtime configuration of the number of validators POSTed to the [`validator/register_validator`](https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Validator/registerValidator) endpoint. There are builders (Agnostic and Eden) that are timing out with `regsiterValidator` requests with ~400 validators, even with a 9 second timeout. Exposing the batch size will help tune batch sizes to (hopefully) avoid this. This PR should not change the behavior of Lighthouse when the new flag is not provided (i.e., the same default value is used). ## Additional Info NA --- lighthouse/tests/validator_client.rs | 21 +++++++++++++++++++++ validator_client/src/cli.rs | 10 ++++++++++ validator_client/src/config.rs | 9 +++++++++ validator_client/src/lib.rs | 1 + validator_client/src/preparation_service.rs | 21 ++++++++++++++++----- 5 files changed, 57 insertions(+), 5 deletions(-) diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 8c1f0477c42..27d7c10e96c 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -499,3 +499,24 @@ fn latency_measurement_service() { assert!(!config.enable_latency_measurement_service); }); } + +#[test] +fn validator_registration_batch_size() { + CommandLineTest::new().run().with_config(|config| { + assert_eq!(config.validator_registration_batch_size, 500); + }); + CommandLineTest::new() + .flag("validator-registration-batch-size", Some("100")) + .run() + .with_config(|config| { + assert_eq!(config.validator_registration_batch_size, 100); + }); +} + +#[test] +#[should_panic] +fn validator_registration_batch_size_zero_value() { + CommandLineTest::new() + .flag("validator-registration-batch-size", Some("0")) + .run(); +} diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 6e199cb1731..436b8eb4d5c 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -333,6 +333,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("true") .takes_value(true), ) + .arg( + Arg::with_name("validator-registration-batch-size") + .long("validator-registration-batch-size") + .value_name("INTEGER") + .help("Defines the number of validators per \ + validator/register_validator request sent to the BN. This value \ + can be reduced to avoid timeouts from builders.") + .default_value("500") + .takes_value(true), + ) /* * Experimental/development options. */ diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index fa297dcfedd..e0dd12e1005 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -77,6 +77,8 @@ pub struct Config { pub disable_run_on_all: bool, /// Enables a service which attempts to measure latency between the VC and BNs. pub enable_latency_measurement_service: bool, + /// Defines the number of validators per `validator/register_validator` request sent to the BN. + pub validator_registration_batch_size: usize, } impl Default for Config { @@ -117,6 +119,7 @@ impl Default for Config { gas_limit: None, disable_run_on_all: false, enable_latency_measurement_service: true, + validator_registration_batch_size: 500, } } } @@ -380,6 +383,12 @@ impl Config { config.enable_latency_measurement_service = parse_optional(cli_args, "latency-measurement-service")?.unwrap_or(true); + config.validator_registration_batch_size = + parse_required(cli_args, "validator-registration-batch-size")?; + if config.validator_registration_batch_size == 0 { + return Err("validator-registration-batch-size cannot be 0".to_string()); + } + /* * Experimental */ diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 6e4a8da6ac2..60943a260c1 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -487,6 +487,7 @@ impl ProductionValidatorClient { .beacon_nodes(beacon_nodes.clone()) .runtime_context(context.service_context("preparation".into())) .builder_registration_timestamp_override(config.builder_registration_timestamp_override) + .validator_registration_batch_size(config.validator_registration_batch_size) .build()?; let sync_committee_service = SyncCommitteeService::new( diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index 5bd93a50532..7d6e1744c83 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -23,9 +23,6 @@ const PROPOSER_PREPARATION_LOOKAHEAD_EPOCHS: u64 = 2; /// Number of epochs to wait before re-submitting validator registration. const EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION: u64 = 1; -/// The number of validator registrations to include per request to the beacon node. -const VALIDATOR_REGISTRATION_BATCH_SIZE: usize = 500; - /// Builds an `PreparationService`. pub struct PreparationServiceBuilder { validator_store: Option>>, @@ -33,6 +30,7 @@ pub struct PreparationServiceBuilder { beacon_nodes: Option>>, context: Option>, builder_registration_timestamp_override: Option, + validator_registration_batch_size: Option, } impl PreparationServiceBuilder { @@ -43,6 +41,7 @@ impl PreparationServiceBuilder { beacon_nodes: None, context: None, builder_registration_timestamp_override: None, + validator_registration_batch_size: None, } } @@ -74,6 +73,14 @@ impl PreparationServiceBuilder { self } + pub fn validator_registration_batch_size( + mut self, + validator_registration_batch_size: usize, + ) -> Self { + self.validator_registration_batch_size = Some(validator_registration_batch_size); + self + } + pub fn build(self) -> Result, String> { Ok(PreparationService { inner: Arc::new(Inner { @@ -91,6 +98,9 @@ impl PreparationServiceBuilder { .ok_or("Cannot build PreparationService without runtime_context")?, builder_registration_timestamp_override: self .builder_registration_timestamp_override, + validator_registration_batch_size: self.validator_registration_batch_size.ok_or( + "Cannot build PreparationService without validator_registration_batch_size", + )?, validator_registration_cache: RwLock::new(HashMap::new()), }), }) @@ -107,6 +117,7 @@ pub struct Inner { // Used to track unpublished validator registration changes. validator_registration_cache: RwLock>, + validator_registration_batch_size: usize, } #[derive(Hash, Eq, PartialEq, Debug, Clone)] @@ -447,7 +458,7 @@ impl PreparationService { } if !signed.is_empty() { - for batch in signed.chunks(VALIDATOR_REGISTRATION_BATCH_SIZE) { + for batch in signed.chunks(self.validator_registration_batch_size) { match self .beacon_nodes .first_success( @@ -462,7 +473,7 @@ impl PreparationService { Ok(()) => info!( log, "Published validator registrations to the builder network"; - "count" => registration_data_len, + "count" => batch.len(), ), Err(e) => warn!( log, From 33c942ff035268fb1ff1078707c1e5a4301a902b Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 22 Jun 2023 02:14:57 +0000 Subject: [PATCH 05/15] Add support for updating validator graffiti (#4417) ## Issue Addressed #4386 ## Proposed Changes The original proposal described in the issue adds a new endpoint to support updating validator graffiti, but I realized we already have an endpoint that we use for updating various validator fields in memory and in the validator definitions file, so I think that would be the best place to add this to. ### API endpoint `PATCH lighthouse/validators/{validator_pubkey}` This endpoint updates the graffiti in both the [ validator definition file](https://lighthouse-book.sigmaprime.io/graffiti.html#2-setting-the-graffiti-in-the-validator_definitionsyml) and the in memory `InitializedValidators`. In the next block proposal, the new graffiti will be used. Note that the [`--graffiti-file`](https://lighthouse-book.sigmaprime.io/graffiti.html#1-using-the---graffiti-file-flag-on-the-validator-client) flag has a priority over the validator definitions file, so if the caller attempts to update the graffiti while the `--graffiti-file` flag is present, the endpoint will return an error (Bad request 400). ## Tasks - [x] Add graffiti update support to `PATCH lighthouse/validators/{validator_pubkey}` - [x] Return error if user tries to update graffiti while the `--graffiti-flag` is set - [x] Update Lighthouse book --- book/src/api-vc-endpoints.md | 3 +- book/src/graffiti.md | 24 ++++++ common/eth2/src/lighthouse_vc/http_client.rs | 3 + common/eth2/src/lighthouse_vc/types.rs | 3 + validator_client/src/http_api/mod.rs | 17 +++- validator_client/src/http_api/tests.rs | 84 ++++++++++++++++++- .../src/http_api/tests/keystores.rs | 2 +- .../src/initialized_validators.rs | 18 +++- 8 files changed, 143 insertions(+), 11 deletions(-) diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 406c5b1f0ee..ee0cfd20017 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -426,7 +426,8 @@ Example Response Body ## `PATCH /lighthouse/validators/:voting_pubkey` -Update some values for the validator with `voting_pubkey`. The following example updates a validator from `enabled: true` to `enabled: false` +Update some values for the validator with `voting_pubkey`. Possible fields: `enabled`, `gas_limit`, `builder_proposals`, +and `graffiti`. The following example updates a validator from `enabled: true` to `enabled: false`. ### HTTP Specification diff --git a/book/src/graffiti.md b/book/src/graffiti.md index 75c2a86dd5d..302f8f96795 100644 --- a/book/src/graffiti.md +++ b/book/src/graffiti.md @@ -29,6 +29,8 @@ Lighthouse will first search for the graffiti corresponding to the public key of ### 2. Setting the graffiti in the `validator_definitions.yml` Users can set validator specific graffitis in `validator_definitions.yml` with the `graffiti` key. This option is recommended for static setups where the graffitis won't change on every new block proposal. +You can also update the graffitis in the `validator_definitions.yml` file using the [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey). See example in [Set Graffiti via HTTP](#set-graffiti-via-http). + Below is an example of the validator_definitions.yml with validator specific graffitis: ``` --- @@ -62,3 +64,25 @@ Usage: `lighthouse bn --graffiti fortytwo` > 3. If graffiti is not specified in `validator_definitions.yml`, load the graffiti passed in the `--graffiti` flag on the validator client. > 4. If the `--graffiti` flag on the validator client is not passed, load the graffiti passed in the `--graffiti` flag on the beacon node. > 4. If the `--graffiti` flag is not passed, load the default Lighthouse graffiti. + +### Set Graffiti via HTTP + +Use the [Lighthouse API](api-vc-endpoints.md) to set graffiti on a per-validator basis. This method updates the graffiti +both in memory and in the `validator_definitions.yml` file. The new graffiti will be used in the next block proposal +without requiring a validator client restart. + +Refer to [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey) for API specification. + +#### Example Command + +```bash +DATADIR=/var/lib/lighthouse +curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde" \ +-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ +-H "Content-Type: application/json" \ +-d '{ + "graffiti": "Mr F was here" +}' | jq +``` + +A `null` response indicates that the request is successful. \ No newline at end of file diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index e576cfcb363..720d8c77952 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -16,6 +16,7 @@ use std::path::Path; pub use reqwest; pub use reqwest::{Response, StatusCode, Url}; +use types::graffiti::GraffitiString; /// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a /// Lighthouse Validator Client HTTP server (`validator_client/src/http_api`). @@ -467,6 +468,7 @@ impl ValidatorClientHttpClient { enabled: Option, gas_limit: Option, builder_proposals: Option, + graffiti: Option, ) -> Result<(), Error> { let mut path = self.server.full.clone(); @@ -482,6 +484,7 @@ impl ValidatorClientHttpClient { enabled, gas_limit, builder_proposals, + graffiti, }, ) .await diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index dd2ed03221b..7bbe041dbdb 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -83,6 +83,9 @@ pub struct ValidatorPatchRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub builder_proposals: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub graffiti: Option, } #[derive(Clone, PartialEq, Serialize, Deserialize)] diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index fa6cde3ed54..f08c8da1bd9 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -357,7 +357,7 @@ pub fn serve( .and(warp::path("graffiti")) .and(warp::path::end()) .and(validator_store_filter.clone()) - .and(graffiti_file_filter) + .and(graffiti_file_filter.clone()) .and(graffiti_flag_filter) .and(signer.clone()) .and(log_filter.clone()) @@ -617,18 +617,27 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(validator_store_filter.clone()) + .and(graffiti_file_filter) .and(signer.clone()) .and(task_executor_filter.clone()) .and_then( |validator_pubkey: PublicKey, body: api_types::ValidatorPatchRequest, validator_store: Arc>, + graffiti_file: Option, signer, task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { + if body.graffiti.is_some() && graffiti_file.is_some() { + return Err(warp_utils::reject::custom_bad_request( + "Unable to update graffiti as the \"--graffiti-file\" flag is set" + .to_string(), + )); + } + + let maybe_graffiti = body.graffiti.clone().map(Into::into); let initialized_validators_rw_lock = validator_store.initialized_validators(); let mut initialized_validators = initialized_validators_rw_lock.write(); - match ( initialized_validators.is_enabled(&validator_pubkey), initialized_validators.validator(&validator_pubkey.compress()), @@ -641,7 +650,8 @@ pub fn serve( if Some(is_enabled) == body.enabled && initialized_validator.get_gas_limit() == body.gas_limit && initialized_validator.get_builder_proposals() - == body.builder_proposals => + == body.builder_proposals + && initialized_validator.get_graffiti() == maybe_graffiti => { Ok(()) } @@ -654,6 +664,7 @@ pub fn serve( body.enabled, body.gas_limit, body.builder_proposals, + body.graffiti, ), ) .map_err(|e| { diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 84d2fe437d5..dbb9d4d620c 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -28,12 +28,14 @@ use slot_clock::{SlotClock, TestingSlotClock}; use std::future::Future; use std::marker::PhantomData; use std::net::{IpAddr, Ipv4Addr}; +use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tempfile::{tempdir, TempDir}; use tokio::runtime::Runtime; use tokio::sync::oneshot; +use types::graffiti::GraffitiString; const PASSWORD_BYTES: &[u8] = &[42, 50, 37]; pub const TEST_DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); @@ -533,7 +535,7 @@ impl ApiTester { let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; self.client - .patch_lighthouse_validators(&validator.voting_pubkey, Some(enabled), None, None) + .patch_lighthouse_validators(&validator.voting_pubkey, Some(enabled), None, None, None) .await .unwrap(); @@ -575,7 +577,13 @@ impl ApiTester { let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; self.client - .patch_lighthouse_validators(&validator.voting_pubkey, None, Some(gas_limit), None) + .patch_lighthouse_validators( + &validator.voting_pubkey, + None, + Some(gas_limit), + None, + None, + ) .await .unwrap(); @@ -602,6 +610,7 @@ impl ApiTester { None, None, Some(builder_proposals), + None, ) .await .unwrap(); @@ -620,6 +629,34 @@ impl ApiTester { self } + + pub async fn set_graffiti(self, index: usize, graffiti: &str) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + let graffiti_str = GraffitiString::from_str(graffiti).unwrap(); + self.client + .patch_lighthouse_validators( + &validator.voting_pubkey, + None, + None, + None, + Some(graffiti_str), + ) + .await + .unwrap(); + + self + } + + pub async fn assert_graffiti(self, index: usize, graffiti: &str) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + let graffiti_str = GraffitiString::from_str(graffiti).unwrap(); + assert_eq!( + self.validator_store.graffiti(&validator.voting_pubkey), + Some(graffiti_str.into()) + ); + + self + } } struct HdValidatorScenario { @@ -723,7 +760,13 @@ fn routes_with_invalid_auth() { .await .test_with_invalid_auth(|client| async move { client - .patch_lighthouse_validators(&PublicKeyBytes::empty(), Some(false), None, None) + .patch_lighthouse_validators( + &PublicKeyBytes::empty(), + Some(false), + None, + None, + None, + ) .await }) .await @@ -931,6 +974,41 @@ fn validator_builder_proposals() { }); } +#[test] +fn validator_graffiti() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + ApiTester::new(weak_runtime) + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_graffiti(0, "Mr F was here") + .await + .assert_graffiti(0, "Mr F was here") + .await + // Test setting graffiti while the validator is disabled + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_graffiti(0, "Mr F was here again") + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_graffiti(0, "Mr F was here again") + .await + }); +} + #[test] fn keystore_validator_creation() { let runtime = build_runtime(); diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index 769d8a1d49c..7120ee5f9fb 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -468,7 +468,7 @@ fn import_and_delete_conflicting_web3_signer_keystores() { for pubkey in &pubkeys { tester .client - .patch_lighthouse_validators(pubkey, Some(false), None, None) + .patch_lighthouse_validators(pubkey, Some(false), None, None, None) .await .unwrap(); } diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 468fc2b06b2..090acbe969b 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -27,6 +27,7 @@ use std::io::{self, Read}; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; +use types::graffiti::GraffitiString; use types::{Address, Graffiti, Keypair, PublicKey, PublicKeyBytes}; use url::{ParseError, Url}; use validator_dir::Builder as ValidatorDirBuilder; @@ -147,6 +148,10 @@ impl InitializedValidator { pub fn get_index(&self) -> Option { self.index } + + pub fn get_graffiti(&self) -> Option { + self.graffiti + } } fn open_keystore(path: &Path) -> Result { @@ -671,8 +676,8 @@ impl InitializedValidators { self.validators.get(public_key) } - /// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled`, `gas_limit`, and `builder_proposals` - /// values. + /// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled`, `gas_limit`, + /// `builder_proposals`, and `graffiti` values. /// /// ## Notes /// @@ -682,7 +687,7 @@ impl InitializedValidators { /// /// If a `gas_limit` is included in the call to this function, it will also be updated and saved /// to disk. If `gas_limit` is `None` the `gas_limit` *will not* be unset in `ValidatorDefinition` - /// or `InitializedValidator`. The same logic applies to `builder_proposals`. + /// or `InitializedValidator`. The same logic applies to `builder_proposals` and `graffiti`. /// /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. pub async fn set_validator_definition_fields( @@ -691,6 +696,7 @@ impl InitializedValidators { enabled: Option, gas_limit: Option, builder_proposals: Option, + graffiti: Option, ) -> Result<(), Error> { if let Some(def) = self .definitions @@ -708,6 +714,9 @@ impl InitializedValidators { if let Some(builder_proposals) = builder_proposals { def.builder_proposals = Some(builder_proposals); } + if let Some(graffiti) = graffiti.clone() { + def.graffiti = Some(graffiti); + } } self.update_validators().await?; @@ -723,6 +732,9 @@ impl InitializedValidators { if let Some(builder_proposals) = builder_proposals { val.builder_proposals = Some(builder_proposals); } + if let Some(graffiti) = graffiti { + val.graffiti = Some(graffiti.into()); + } } self.definitions From 6d585b5885955db46d8f3ec897033539b04823ad Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 22 Jun 2023 02:14:58 +0000 Subject: [PATCH 06/15] Add `lint-fix` task to automatically fix some Clippy warnings. (#4419) ## Issue Addressed This PR adds a new `lint-fix` task to automatically fix simple Clippy warnings using `cargo clippy --fix`. Usage: ``` make lint-fix ``` --- Makefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 8e7f3fc3260..b833686e1b5 100644 --- a/Makefile +++ b/Makefile @@ -170,7 +170,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine # Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: - cargo clippy --workspace --tests -- \ + cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) -- \ -D clippy::fn_to_numeric_cast_any \ -D warnings \ -A clippy::derive_partial_eq_without_eq \ @@ -180,6 +180,10 @@ lint: -A clippy::question-mark \ -A clippy::uninlined-format-args +# Lints the code using Clippy and automatically fix some simple compiler warnings. +lint-fix: + EXTRA_CLIPPY_OPTS="--fix --allow-staged --allow-dirty" $(MAKE) lint + nightly-lint: cp .github/custom/clippy.toml . cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests --release -- \ From cc780aae3e0cb89649086a3b63cb02a4f97f7ae2 Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 22 Jun 2023 02:14:59 +0000 Subject: [PATCH 07/15] Bump `openssl` deps (#4421) ## Proposed Changes Bump the `openssl` deps to resolve the `cargo-audit` failure caused by https://rustsec.org/advisories/RUSTSEC-2023-0044.html --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 18276b3ea3f..82391837104 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5692,9 +5692,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.52" +version = "0.10.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01b8574602df80f7b85fdfc5392fa884a4e3b3f4f35402c070ab34c3d3f78d56" +checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ "bitflags", "cfg-if", @@ -5733,9 +5733,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.87" +version = "0.9.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e17f59264b2809d77ae94f0e1ebabc434773f370d6ca667bd223ea10e06cc7e" +checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" dependencies = [ "cc", "libc", From 448d3ec9b3a7c90c5209aaa1d50091f2b7303dcf Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 27 Jun 2023 01:06:49 +0000 Subject: [PATCH 08/15] Aggregate subsets (#3493) ## Issue Addressed Resolves #3238 ## Proposed Changes Please list or describe the changes introduced by this PR. ## Additional Info Please provide any additional information. For example, future considerations or information useful for reviewers. --- Cargo.lock | 5 +- beacon_node/beacon_chain/Cargo.toml | 3 +- .../src/attestation_verification.rs | 36 ++-- beacon_node/beacon_chain/src/metrics.rs | 11 + .../beacon_chain/src/observed_aggregates.rs | 198 +++++++++++++----- .../src/sync_committee_verification.rs | 38 +++- .../tests/attestation_verification.rs | 4 +- .../tests/sync_committee_verification.rs | 12 +- beacon_node/execution_layer/Cargo.toml | 2 +- beacon_node/http_api/src/lib.rs | 2 +- beacon_node/http_api/src/sync_committees.rs | 2 +- beacon_node/lighthouse_network/Cargo.toml | 2 +- beacon_node/network/Cargo.toml | 2 +- .../beacon_processor/worker/gossip_methods.rs | 4 +- consensus/cached_tree_hash/Cargo.toml | 2 +- consensus/state_processing/Cargo.toml | 2 +- consensus/types/Cargo.toml | 2 +- 17 files changed, 235 insertions(+), 92 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 82391837104..700aecb83aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -661,6 +661,7 @@ dependencies = [ "tokio", "tokio-stream", "tree_hash", + "tree_hash_derive", "types", "unused_port", ] @@ -7849,9 +7850,9 @@ dependencies = [ [[package]] name = "ssz_types" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8052a1004e979c0be24b9e55940195553103cc57d0b34f7e2c4e32793325e402" +checksum = "e43767964a80b2fdeda7a79a57a2b6cbca966688d5b81da8fe91140a94f552a1" dependencies = [ "arbitrary", "derivative", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 27d07e33385..7f884f561b3 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -32,9 +32,10 @@ sloggers = { version = "2.1.1", features = ["json"] } slot_clock = { path = "../../common/slot_clock" } ethereum_hashing = "1.0.0-beta.2" ethereum_ssz = "0.5.0" -ssz_types = "0.5.0" +ssz_types = "0.5.3" ethereum_ssz_derive = "0.5.0" state_processing = { path = "../../consensus/state_processing" } +tree_hash_derive = "0.5.0" tree_hash = "0.5.0" types = { path = "../../consensus/types" } tokio = "1.14.0" diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 04f601fad97..6df0758b2e6 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -117,14 +117,14 @@ pub enum Error { /// /// The peer has sent an invalid message. AggregatorPubkeyUnknown(u64), - /// The attestation has been seen before; either in a block, on the gossip network or from a - /// local validator. + /// The attestation or a superset of this attestation's aggregations bits for the same data + /// has been seen before; either in a block, on the gossip network or from a local validator. /// /// ## Peer scoring /// /// It's unclear if this attestation is valid, however we have already observed it and do not /// need to observe it again. - AttestationAlreadyKnown(Hash256), + AttestationSupersetKnown(Hash256), /// There has already been an aggregation observed for this validator, we refuse to process a /// second. /// @@ -268,7 +268,7 @@ enum CheckAttestationSignature { struct IndexedAggregatedAttestation<'a, T: BeaconChainTypes> { signed_aggregate: &'a SignedAggregateAndProof, indexed_attestation: IndexedAttestation, - attestation_root: Hash256, + attestation_data_root: Hash256, } /// Wraps a `Attestation` that has been verified up until the point that an `IndexedAttestation` can @@ -467,14 +467,17 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { } // Ensure the valid aggregated attestation has not already been seen locally. - let attestation_root = attestation.tree_hash_root(); + let attestation_data = &attestation.data; + let attestation_data_root = attestation_data.tree_hash_root(); + if chain .observed_attestations .write() - .is_known(attestation, attestation_root) + .is_known_subset(attestation, attestation_data_root) .map_err(|e| Error::BeaconChainError(e.into()))? { - return Err(Error::AttestationAlreadyKnown(attestation_root)); + metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS); + return Err(Error::AttestationSupersetKnown(attestation_data_root)); } let aggregator_index = signed_aggregate.message.aggregator_index; @@ -520,7 +523,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { if attestation.aggregation_bits.is_zero() { Err(Error::EmptyAggregationBitfield) } else { - Ok(attestation_root) + Ok(attestation_data_root) } } @@ -533,7 +536,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { let attestation = &signed_aggregate.message.aggregate; let aggregator_index = signed_aggregate.message.aggregator_index; - let attestation_root = match Self::verify_early_checks(signed_aggregate, chain) { + let attestation_data_root = match Self::verify_early_checks(signed_aggregate, chain) { Ok(root) => root, Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)), }; @@ -568,7 +571,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { Ok(IndexedAggregatedAttestation { signed_aggregate, indexed_attestation, - attestation_root, + attestation_data_root, }) } } @@ -577,7 +580,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { /// Run the checks that happen after the indexed attestation and signature have been checked. fn verify_late_checks( signed_aggregate: &SignedAggregateAndProof, - attestation_root: Hash256, + attestation_data_root: Hash256, chain: &BeaconChain, ) -> Result<(), Error> { let attestation = &signed_aggregate.message.aggregate; @@ -587,13 +590,14 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { // // It's important to double check that the attestation is not already known, otherwise two // attestations processed at the same time could be published. - if let ObserveOutcome::AlreadyKnown = chain + if let ObserveOutcome::Subset = chain .observed_attestations .write() - .observe_item(attestation, Some(attestation_root)) + .observe_item(attestation, Some(attestation_data_root)) .map_err(|e| Error::BeaconChainError(e.into()))? { - return Err(Error::AttestationAlreadyKnown(attestation_root)); + metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS); + return Err(Error::AttestationSupersetKnown(attestation_data_root)); } // Observe the aggregator so we don't process another aggregate from them. @@ -653,7 +657,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { let IndexedAggregatedAttestation { signed_aggregate, indexed_attestation, - attestation_root, + attestation_data_root, } = signed_aggregate; match check_signature { @@ -677,7 +681,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { CheckAttestationSignature::No => (), }; - if let Err(e) = Self::verify_late_checks(signed_aggregate, attestation_root, chain) { + if let Err(e) = Self::verify_late_checks(signed_aggregate, attestation_data_root, chain) { return Err(SignatureValid(indexed_attestation, e)); } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index d0f695062f3..dff663ded0f 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -998,6 +998,17 @@ lazy_static! { "light_client_optimistic_update_verification_success_total", "Number of light client optimistic updates verified for gossip" ); + /* + * Aggregate subset metrics + */ + pub static ref SYNC_CONTRIBUTION_SUBSETS: Result = try_create_int_counter( + "beacon_sync_contribution_subsets_total", + "Count of new sync contributions that are subsets of already known aggregates" + ); + pub static ref AGGREGATED_ATTESTATION_SUBSETS: Result = try_create_int_counter( + "beacon_aggregated_attestation_subsets_total", + "Count of new aggregated attestations that are subsets of already known aggregates" + ); } /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs index bb0132f5fe8..18a761e29e8 100644 --- a/beacon_node/beacon_chain/src/observed_aggregates.rs +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -1,7 +1,9 @@ //! Provides an `ObservedAggregates` struct which allows us to reject aggregated attestations or //! sync committee contributions if we've already seen them. -use std::collections::HashSet; +use crate::sync_committee_verification::SyncCommitteeData; +use ssz_types::{BitList, BitVector}; +use std::collections::HashMap; use std::marker::PhantomData; use tree_hash::TreeHash; use types::consts::altair::{ @@ -10,8 +12,16 @@ use types::consts::altair::{ use types::slot_data::SlotData; use types::{Attestation, EthSpec, Hash256, Slot, SyncCommitteeContribution}; -pub type ObservedSyncContributions = ObservedAggregates, E>; -pub type ObservedAggregateAttestations = ObservedAggregates, E>; +pub type ObservedSyncContributions = ObservedAggregates< + SyncCommitteeContribution, + E, + BitVector<::SyncSubcommitteeSize>, +>; +pub type ObservedAggregateAttestations = ObservedAggregates< + Attestation, + E, + BitList<::MaxValidatorsPerCommittee>, +>; /// A trait use to associate capacity constants with the type being stored in `ObservedAggregates`. pub trait Consts { @@ -69,10 +79,81 @@ impl Consts for SyncCommitteeContribution { } } +/// A trait for types that implement a behaviour where one object of that type +/// can be a subset/superset of another. +/// This trait allows us to be generic over the aggregate item that we store in the cache that +/// we want to prevent duplicates/subsets for. +pub trait SubsetItem { + /// The item that is stored for later comparison with new incoming aggregate items. + type Item; + + /// Returns `true` if `self` is a non-strict subset of `other` and `false` otherwise. + fn is_subset(&self, other: &Self::Item) -> bool; + + /// Returns `true` if `self` is a non-strict superset of `other` and `false` otherwise. + fn is_superset(&self, other: &Self::Item) -> bool; + + /// Returns the item that gets stored in `ObservedAggregates` for later subset + /// comparison with incoming aggregates. + fn get_item(&self) -> Self::Item; + + /// Returns a unique value that keys the object to the item that is being stored + /// in `ObservedAggregates`. + fn root(&self) -> Hash256; +} + +impl SubsetItem for Attestation { + type Item = BitList; + fn is_subset(&self, other: &Self::Item) -> bool { + self.aggregation_bits.is_subset(other) + } + + fn is_superset(&self, other: &Self::Item) -> bool { + other.is_subset(&self.aggregation_bits) + } + + /// Returns the sync contribution aggregation bits. + fn get_item(&self) -> Self::Item { + self.aggregation_bits.clone() + } + + /// Returns the hash tree root of the attestation data. + fn root(&self) -> Hash256 { + self.data.tree_hash_root() + } +} + +impl SubsetItem for SyncCommitteeContribution { + type Item = BitVector; + fn is_subset(&self, other: &Self::Item) -> bool { + self.aggregation_bits.is_subset(other) + } + + fn is_superset(&self, other: &Self::Item) -> bool { + other.is_subset(&self.aggregation_bits) + } + + /// Returns the sync contribution aggregation bits. + fn get_item(&self) -> Self::Item { + self.aggregation_bits.clone() + } + + /// Returns the hash tree root of the root, slot and subcommittee index + /// of the sync contribution. + fn root(&self) -> Hash256 { + SyncCommitteeData { + root: self.beacon_block_root, + slot: self.slot, + subcommittee_index: self.subcommittee_index, + } + .tree_hash_root() + } +} + #[derive(Debug, PartialEq)] pub enum ObserveOutcome { - /// This item was already known. - AlreadyKnown, + /// This item is a non-strict subset of an already known item. + Subset, /// This was the first time this item was observed. New, } @@ -94,26 +175,28 @@ pub enum Error { }, } -/// A `HashSet` that contains entries related to some `Slot`. -struct SlotHashSet { - set: HashSet, +/// A `HashMap` that contains entries related to some `Slot`. +struct SlotHashSet { + /// Contains a vector of maximally-sized aggregation bitfields/bitvectors + /// such that no bitfield/bitvector is a subset of any other in the list. + map: HashMap>, slot: Slot, max_capacity: usize, } -impl SlotHashSet { +impl SlotHashSet { pub fn new(slot: Slot, initial_capacity: usize, max_capacity: usize) -> Self { Self { slot, - set: HashSet::with_capacity(initial_capacity), + map: HashMap::with_capacity(initial_capacity), max_capacity, } } /// Store the items in self so future observations recognise its existence. - pub fn observe_item( + pub fn observe_item>( &mut self, - item: &T, + item: &S, root: Hash256, ) -> Result { if item.get_slot() != self.slot { @@ -123,29 +206,45 @@ impl SlotHashSet { }); } - if self.set.contains(&root) { - Ok(ObserveOutcome::AlreadyKnown) - } else { - // Here we check to see if this slot has reached the maximum observation count. - // - // The resulting behaviour is that we are no longer able to successfully observe new - // items, however we will continue to return `is_known` values. We could also - // disable `is_known`, however then we would stop forwarding items across the - // gossip network and I think that this is a worse case than sending some invalid ones. - // The underlying libp2p network is responsible for removing duplicate messages, so - // this doesn't risk a broadcast loop. - if self.set.len() >= self.max_capacity { - return Err(Error::ReachedMaxObservationsPerSlot(self.max_capacity)); + if let Some(aggregates) = self.map.get_mut(&root) { + for existing in aggregates { + // Check if `item` is a subset of any of the observed aggregates + if item.is_subset(existing) { + return Ok(ObserveOutcome::Subset); + // Check if `item` is a superset of any of the observed aggregates + // If true, we replace the new item with its existing subset. This allows us + // to hold fewer items in the list. + } else if item.is_superset(existing) { + *existing = item.get_item(); + return Ok(ObserveOutcome::New); + } } + } - self.set.insert(root); - - Ok(ObserveOutcome::New) + // Here we check to see if this slot has reached the maximum observation count. + // + // The resulting behaviour is that we are no longer able to successfully observe new + // items, however we will continue to return `is_known_subset` values. We could also + // disable `is_known_subset`, however then we would stop forwarding items across the + // gossip network and I think that this is a worse case than sending some invalid ones. + // The underlying libp2p network is responsible for removing duplicate messages, so + // this doesn't risk a broadcast loop. + if self.map.len() >= self.max_capacity { + return Err(Error::ReachedMaxObservationsPerSlot(self.max_capacity)); } + + let item = item.get_item(); + self.map.entry(root).or_default().push(item); + Ok(ObserveOutcome::New) } - /// Indicates if `item` has been observed before. - pub fn is_known(&self, item: &T, root: Hash256) -> Result { + /// Check if `item` is a non-strict subset of any of the already observed aggregates for + /// the given root and slot. + pub fn is_known_subset>( + &self, + item: &S, + root: Hash256, + ) -> Result { if item.get_slot() != self.slot { return Err(Error::IncorrectSlot { expected: self.slot, @@ -153,25 +252,28 @@ impl SlotHashSet { }); } - Ok(self.set.contains(&root)) + Ok(self + .map + .get(&root) + .map_or(false, |agg| agg.iter().any(|val| item.is_subset(val)))) } /// The number of observed items in `self`. pub fn len(&self) -> usize { - self.set.len() + self.map.len() } } /// Stores the roots of objects for some number of `Slots`, so we can determine if /// these have previously been seen on the network. -pub struct ObservedAggregates { +pub struct ObservedAggregates { lowest_permissible_slot: Slot, - sets: Vec, + sets: Vec>, _phantom_spec: PhantomData, _phantom_tree_hash: PhantomData, } -impl Default for ObservedAggregates { +impl Default for ObservedAggregates { fn default() -> Self { Self { lowest_permissible_slot: Slot::new(0), @@ -182,17 +284,17 @@ impl Default for ObservedAggregates } } -impl ObservedAggregates { - /// Store the root of `item` in `self`. +impl, E: EthSpec, I> ObservedAggregates { + /// Store `item` in `self` keyed at `root`. /// - /// `root` must equal `item.tree_hash_root()`. + /// `root` must equal `item.root::()`. pub fn observe_item( &mut self, item: &T, root_opt: Option, ) -> Result { let index = self.get_set_index(item.get_slot())?; - let root = root_opt.unwrap_or_else(|| item.tree_hash_root()); + let root = root_opt.unwrap_or_else(|| item.root()); self.sets .get_mut(index) @@ -200,17 +302,18 @@ impl ObservedAggregates { .and_then(|set| set.observe_item(item, root)) } - /// Check to see if the `root` of `item` is in self. + /// Check if `item` is a non-strict subset of any of the already observed aggregates for + /// the given root and slot. /// - /// `root` must equal `a.tree_hash_root()`. + /// `root` must equal `item.root::()`. #[allow(clippy::wrong_self_convention)] - pub fn is_known(&mut self, item: &T, root: Hash256) -> Result { + pub fn is_known_subset(&mut self, item: &T, root: Hash256) -> Result { let index = self.get_set_index(item.get_slot())?; self.sets .get(index) .ok_or(Error::InvalidSetIndex(index)) - .and_then(|set| set.is_known(item, root)) + .and_then(|set| set.is_known_subset(item, root)) } /// The maximum number of slots that items are stored for. @@ -296,7 +399,6 @@ impl ObservedAggregates { #[cfg(not(debug_assertions))] mod tests { use super::*; - use tree_hash::TreeHash; use types::{test_utils::test_random_instance, Hash256}; type E = types::MainnetEthSpec; @@ -330,7 +432,7 @@ mod tests { for a in &items { assert_eq!( - store.is_known(a, a.tree_hash_root()), + store.is_known_subset(a, a.root()), Ok(false), "should indicate an unknown attestation is unknown" ); @@ -343,13 +445,13 @@ mod tests { for a in &items { assert_eq!( - store.is_known(a, a.tree_hash_root()), + store.is_known_subset(a, a.root()), Ok(true), "should indicate a known attestation is known" ); assert_eq!( - store.observe_item(a, Some(a.tree_hash_root())), - Ok(ObserveOutcome::AlreadyKnown), + store.observe_item(a, Some(a.root())), + Ok(ObserveOutcome::Subset), "should acknowledge an existing attestation" ); } diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index 14cdc2400d8..246bb12cc0e 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -37,6 +37,7 @@ use bls::{verify_signature_sets, PublicKeyBytes}; use derivative::Derivative; use safe_arith::ArithError; use slot_clock::SlotClock; +use ssz_derive::{Decode, Encode}; use state_processing::per_block_processing::errors::SyncCommitteeMessageValidationError; use state_processing::signature_sets::{ signed_sync_aggregate_selection_proof_signature_set, signed_sync_aggregate_signature_set, @@ -47,6 +48,7 @@ use std::borrow::Cow; use std::collections::HashMap; use strum::AsRefStr; use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::slot_data::SlotData; use types::sync_committee::Error as SyncCommitteeError; @@ -110,14 +112,14 @@ pub enum Error { /// /// The peer has sent an invalid message. AggregatorPubkeyUnknown(u64), - /// The sync contribution has been seen before; either in a block, on the gossip network or from a - /// local validator. + /// The sync contribution or a superset of this sync contribution's aggregation bits for the same data + /// has been seen before; either in a block on the gossip network or from a local validator. /// /// ## Peer scoring /// /// It's unclear if this sync contribution is valid, however we have already observed it and do not /// need to observe it again. - SyncContributionAlreadyKnown(Hash256), + SyncContributionSupersetKnown(Hash256), /// There has already been an aggregation observed for this validator, we refuse to process a /// second. /// @@ -268,6 +270,14 @@ pub struct VerifiedSyncContribution { participant_pubkeys: Vec, } +/// The sync contribution data. +#[derive(Encode, Decode, TreeHash)] +pub struct SyncCommitteeData { + pub slot: Slot, + pub root: Hash256, + pub subcommittee_index: u64, +} + /// Wraps a `SyncCommitteeMessage` that has been verified for propagation on the gossip network. #[derive(Clone)] pub struct VerifiedSyncCommitteeMessage { @@ -314,15 +324,22 @@ impl VerifiedSyncContribution { return Err(Error::AggregatorNotInCommittee { aggregator_index }); }; - // Ensure the valid sync contribution has not already been seen locally. - let contribution_root = contribution.tree_hash_root(); + // Ensure the valid sync contribution or its superset has not already been seen locally. + let contribution_data_root = SyncCommitteeData { + slot: contribution.slot, + root: contribution.beacon_block_root, + subcommittee_index: contribution.subcommittee_index, + } + .tree_hash_root(); + if chain .observed_sync_contributions .write() - .is_known(contribution, contribution_root) + .is_known_subset(contribution, contribution_data_root) .map_err(|e| Error::BeaconChainError(e.into()))? { - return Err(Error::SyncContributionAlreadyKnown(contribution_root)); + metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_SUBSETS); + return Err(Error::SyncContributionSupersetKnown(contribution_data_root)); } // Ensure there has been no other observed aggregate for the given `aggregator_index`. @@ -376,13 +393,14 @@ impl VerifiedSyncContribution { // // It's important to double check that the contribution is not already known, otherwise two // contribution processed at the same time could be published. - if let ObserveOutcome::AlreadyKnown = chain + if let ObserveOutcome::Subset = chain .observed_sync_contributions .write() - .observe_item(contribution, Some(contribution_root)) + .observe_item(contribution, Some(contribution_data_root)) .map_err(|e| Error::BeaconChainError(e.into()))? { - return Err(Error::SyncContributionAlreadyKnown(contribution_root)); + metrics::inc_counter(&metrics::SYNC_CONTRIBUTION_SUBSETS); + return Err(Error::SyncContributionSupersetKnown(contribution_data_root)); } // Observe the aggregator so we don't process another aggregate from them. diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 1040521e5a7..5cea51090b9 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -699,8 +699,8 @@ async fn aggregated_gossip_verification() { |tester, err| { assert!(matches!( err, - AttnError::AttestationAlreadyKnown(hash) - if hash == tester.valid_aggregate.message.aggregate.tree_hash_root() + AttnError::AttestationSupersetKnown(hash) + if hash == tester.valid_aggregate.message.aggregate.data.tree_hash_root() )) }, ) diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 4204a51212a..0e4745ff6b8 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -1,6 +1,6 @@ #![cfg(not(debug_assertions))] -use beacon_chain::sync_committee_verification::Error as SyncCommitteeError; +use beacon_chain::sync_committee_verification::{Error as SyncCommitteeError, SyncCommitteeData}; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee}; use int_to_bytes::int_to_bytes32; use lazy_static::lazy_static; @@ -444,11 +444,17 @@ async fn aggregated_gossip_verification() { * subcommittee index contribution.subcommittee_index. */ + let contribution = &valid_aggregate.message.contribution; + let sync_committee_data = SyncCommitteeData { + slot: contribution.slot, + root: contribution.beacon_block_root, + subcommittee_index: contribution.subcommittee_index, + }; assert_invalid!( "aggregate that has already been seen", valid_aggregate.clone(), - SyncCommitteeError::SyncContributionAlreadyKnown(hash) - if hash == valid_aggregate.message.contribution.tree_hash_root() + SyncCommitteeError::SyncContributionSupersetKnown(hash) + if hash == sync_committee_data.tree_hash_root() ); /* diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 3ed7ba65d6a..a96cfb6cacf 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -23,7 +23,7 @@ bytes = "1.1.0" task_executor = { path = "../../common/task_executor" } hex = "0.4.2" ethereum_ssz = "0.5.0" -ssz_types = "0.5.0" +ssz_types = "0.5.3" eth2 = { path = "../../common/eth2" } state_processing = { path = "../../consensus/state_processing" } superstruct = "0.6.0" diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 55e00bab340..025b54f326e 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2843,7 +2843,7 @@ pub fn serve( // It's reasonably likely that two different validators produce // identical aggregates, especially if they're using the same beacon // node. - Err(AttnError::AttestationAlreadyKnown(_)) => continue, + Err(AttnError::AttestationSupersetKnown(_)) => continue, // If we've already seen this aggregator produce an aggregate, just // skip this one. // diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index c728fbeb14e..07dfb5c988f 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -304,7 +304,7 @@ pub fn process_signed_contribution_and_proofs( } // If we already know the contribution, don't broadcast it or attempt to // further verify it. Return success. - Err(SyncVerificationError::SyncContributionAlreadyKnown(_)) => continue, + Err(SyncVerificationError::SyncContributionSupersetKnown(_)) => continue, // If we've already seen this aggregator produce an aggregate, just // skip this one. // diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index ca15b5ef2c3..6d056d83505 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" discv5 = { version = "0.3.0", features = ["libp2p"]} unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } -ssz_types = "0.5.0" +ssz_types = "0.5.3" serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" ethereum_ssz = "0.5.0" diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index c9917289944..aa1827787c6 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -22,7 +22,7 @@ slot_clock = { path = "../../common/slot_clock" } slog = { version = "2.5.2", features = ["max_level_trace"] } hex = "0.4.2" ethereum_ssz = "0.5.0" -ssz_types = "0.5.0" +ssz_types = "0.5.3" futures = "0.3.7" error-chain = "0.12.4" tokio = { version = "1.14.0", features = ["full"] } diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index e3cff00103b..185634c3081 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -1735,7 +1735,7 @@ impl Worker { "attn_agg_not_in_committee", ); } - AttnError::AttestationAlreadyKnown { .. } => { + AttnError::AttestationSupersetKnown { .. } => { /* * The aggregate attestation has already been observed on the network or in * a block. @@ -2244,7 +2244,7 @@ impl Worker { "sync_bad_aggregator", ); } - SyncCommitteeError::SyncContributionAlreadyKnown(_) + SyncCommitteeError::SyncContributionSupersetKnown(_) | SyncCommitteeError::AggregatorAlreadyKnown(_) => { /* * The sync committee message already been observed on the network or in diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml index c2856003bfd..0f43c8890f1 100644 --- a/consensus/cached_tree_hash/Cargo.toml +++ b/consensus/cached_tree_hash/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] ethereum-types = "0.14.1" -ssz_types = "0.5.0" +ssz_types = "0.5.3" ethereum_hashing = "1.0.0-beta.2" ethereum_ssz_derive = "0.5.0" ethereum_ssz = "0.5.0" diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index c16742782c6..f19cd1d29df 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -15,7 +15,7 @@ integer-sqrt = "0.1.5" itertools = "0.10.0" ethereum_ssz = "0.5.0" ethereum_ssz_derive = "0.5.0" -ssz_types = "0.5.0" +ssz_types = "0.5.3" merkle_proof = { path = "../merkle_proof" } safe_arith = { path = "../safe_arith" } tree_hash = "0.5.0" diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 91ad3089f1c..583b940d5f4 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -27,7 +27,7 @@ serde_derive = "1.0.116" slog = "2.5.2" ethereum_ssz = { version = "0.5.0", features = ["arbitrary"] } ethereum_ssz_derive = "0.5.0" -ssz_types = { version = "0.5.0", features = ["arbitrary"] } +ssz_types = { version = "0.5.3", features = ["arbitrary"] } swap_or_not_shuffle = { path = "../swap_or_not_shuffle", features = ["arbitrary"] } test_random_derive = { path = "../../common/test_random_derive" } tree_hash = { version = "0.5.0", features = ["arbitrary"] } From 9072acbfa611367e9e88ce1c5ea1bd6b4ca9ea8d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 27 Jun 2023 01:06:50 +0000 Subject: [PATCH 09/15] Tidy formatting of `Reqwest` errors (#4336) ## Issue Addressed NA ## Proposed Changes Implements the `PrettyReqwestError` to wrap a `reqwest::Error` and give nicer `Debug` formatting. It also wraps the `Url` component in a `SensitiveUrl` to avoid leaking sensitive info in logs. ### Before ``` Reqwest(reqwest::Error { kind: Request, url: Url { scheme: "http", cannot_be_a_base: false, username: "", password: None, host: Some(Domain("localhost")), port: Some(9999), path: "/eth/v1/node/version", query: None, fragment: None }, source: hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" })) }) ``` ### After ``` HttpClient(url: http://localhost:9999/, kind: request, detail: error trying to connect: tcp connect error: Connection refused (os error 61)) ``` ## Additional Info I've also renamed the `Reqwest` error enum variants to `HttpClient`, to give people a better chance at knowing what's going on. Reqwest is pretty odd and looks like a typo. I've implemented it in the `eth2` and `execution_layer` crates. This should affect most logs in the VC and EE-related ones in the BN. I think the last crate that could benefit from the is the `beacon_node/eth1` crate. I haven't updated it in this PR since its error type is not so amenable to it (everything goes into a `String`). I don't have a whole lot of time to jig around with that at the moment and I feel that this PR as it stands is a significant enough improvement to merge on its own. Leaving it as-is is fine for the time being and we can always come back for it later (or implement in-protocol deposits!). --- Cargo.lock | 12 ++++ Cargo.toml | 1 + beacon_node/builder_client/src/lib.rs | 6 +- beacon_node/execution_layer/Cargo.toml | 1 + beacon_node/execution_layer/src/engine_api.rs | 5 +- common/eth2/Cargo.toml | 5 ++ common/eth2/src/lib.rs | 13 ++-- common/eth2/src/lighthouse.rs | 4 +- common/eth2/src/lighthouse_vc/http_client.rs | 12 ++-- common/pretty_reqwest_error/Cargo.toml | 10 +++ common/pretty_reqwest_error/src/lib.rs | 62 +++++++++++++++++++ common/sensitive_url/src/lib.rs | 2 +- 12 files changed, 113 insertions(+), 20 deletions(-) create mode 100644 common/pretty_reqwest_error/Cargo.toml create mode 100644 common/pretty_reqwest_error/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 700aecb83aa..02922b2d7e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2292,6 +2292,8 @@ dependencies = [ "libsecp256k1", "lighthouse_network", "mediatype", + "mime", + "pretty_reqwest_error", "procinfo", "proto_array", "psutil", @@ -2302,6 +2304,7 @@ dependencies = [ "serde_json", "slashing_protection", "store", + "tokio", "types", ] @@ -2742,6 +2745,7 @@ dependencies = [ "lru 0.7.8", "mev-rs", "parking_lot 0.12.1", + "pretty_reqwest_error", "rand 0.8.5", "reqwest", "sensitive_url", @@ -6204,6 +6208,14 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "pretty_reqwest_error" +version = "0.1.0" +dependencies = [ + "reqwest", + "sensitive_url", +] + [[package]] name = "prettyplease" version = "0.1.25" diff --git a/Cargo.toml b/Cargo.toml index bbe77d2096a..5c39e01ed13 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,6 +35,7 @@ members = [ "common/lru_cache", "common/malloc_utils", "common/oneshot_broadcast", + "common/pretty_reqwest_error", "common/sensitive_url", "common/slot_clock", "common/system_health", diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 255c2fdd19b..c78f686d02b 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -72,7 +72,7 @@ impl BuilderHttpClient { .await? .json() .await - .map_err(Error::Reqwest) + .map_err(Into::into) } /// Perform a HTTP GET request, returning the `Response` for further processing. @@ -85,7 +85,7 @@ impl BuilderHttpClient { if let Some(timeout) = timeout { builder = builder.timeout(timeout); } - let response = builder.send().await.map_err(Error::Reqwest)?; + let response = builder.send().await.map_err(Error::from)?; ok_or_error(response).await } @@ -114,7 +114,7 @@ impl BuilderHttpClient { if let Some(timeout) = timeout { builder = builder.timeout(timeout); } - let response = builder.json(body).send().await.map_err(Error::Reqwest)?; + let response = builder.json(body).send().await.map_err(Error::from)?; ok_or_error(response).await } diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index a96cfb6cacf..2cb28346f54 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -50,3 +50,4 @@ keccak-hash = "0.10.0" hash256-std-hasher = "0.15.2" triehash = "0.8.4" hash-db = "0.15.2" +pretty_reqwest_error = { path = "../../common/pretty_reqwest_error" } \ No newline at end of file diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 4d2eb565e1c..826294d5ff6 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -10,6 +10,7 @@ pub use ethers_core::types::Transaction; use ethers_core::utils::rlp::{self, Decodable, Rlp}; use http::deposit_methods::RpcError; pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1}; +use pretty_reqwest_error::PrettyReqwestError; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use std::convert::TryFrom; @@ -32,7 +33,7 @@ pub type PayloadId = [u8; 8]; #[derive(Debug)] pub enum Error { - Reqwest(reqwest::Error), + HttpClient(PrettyReqwestError), Auth(auth::Error), BadResponse(String), RequestFailed(String), @@ -67,7 +68,7 @@ impl From for Error { ) { Error::Auth(auth::Error::InvalidToken) } else { - Error::Reqwest(e) + Error::HttpClient(e.into()) } } } diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 4eabd3ff865..d8e1a375fd5 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -27,6 +27,11 @@ futures = "0.3.8" store = { path = "../../beacon_node/store", optional = true } slashing_protection = { path = "../../validator_client/slashing_protection", optional = true } mediatype = "0.19.13" +mime = "0.3.16" +pretty_reqwest_error = { path = "../../common/pretty_reqwest_error" } + +[dev-dependencies] +tokio = { version = "1.14.0", features = ["full"] } [target.'cfg(target_os = "linux")'.dependencies] psutil = { version = "3.2.2", optional = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index e871efbc2cf..217d3569689 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -19,6 +19,7 @@ use self::types::{Error as ResponseError, *}; use futures::Stream; use futures_util::StreamExt; use lighthouse_network::PeerId; +use pretty_reqwest_error::PrettyReqwestError; pub use reqwest; use reqwest::{IntoUrl, RequestBuilder, Response}; pub use reqwest::{StatusCode, Url}; @@ -39,7 +40,7 @@ pub const CONSENSUS_VERSION_HEADER: &str = "Eth-Consensus-Version"; #[derive(Debug)] pub enum Error { /// The `reqwest` client raised an error. - Reqwest(reqwest::Error), + HttpClient(PrettyReqwestError), /// The server returned an error message where the body was able to be parsed. ServerMessage(ErrorMessage), /// The server returned an error message with an array of errors. @@ -70,7 +71,7 @@ pub enum Error { impl From for Error { fn from(error: reqwest::Error) -> Self { - Error::Reqwest(error) + Error::HttpClient(error.into()) } } @@ -78,7 +79,7 @@ impl Error { /// If the error has a HTTP status code, return it. pub fn status(&self) -> Option { match self { - Error::Reqwest(error) => error.status(), + Error::HttpClient(error) => error.inner().status(), Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(), Error::ServerIndexedMessage(msg) => StatusCode::try_from(msg.code).ok(), Error::StatusCode(status) => Some(*status), @@ -278,7 +279,7 @@ impl BeaconNodeHttpClient { .await? .json() .await - .map_err(Error::Reqwest) + .map_err(Into::into) } /// Perform a HTTP POST request with a custom timeout. @@ -303,7 +304,7 @@ impl BeaconNodeHttpClient { .await? .json() .await - .map_err(Error::Reqwest) + .map_err(Error::from) } /// Generic POST function supporting arbitrary responses and timeouts. @@ -1645,7 +1646,7 @@ impl BeaconNodeHttpClient { .bytes_stream() .map(|next| match next { Ok(bytes) => EventKind::from_sse_bytes(bytes.as_ref()), - Err(e) => Err(Error::Reqwest(e)), + Err(e) => Err(Error::HttpClient(e.into())), })) } diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index bb933dbe121..1b4bcc0e395 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -364,12 +364,12 @@ pub struct DatabaseInfo { impl BeaconNodeHttpClient { /// Perform a HTTP GET request, returning `None` on a 404 error. async fn get_bytes_opt(&self, url: U) -> Result>, Error> { - let response = self.client.get(url).send().await.map_err(Error::Reqwest)?; + let response = self.client.get(url).send().await.map_err(Error::from)?; match ok_or_error(response).await { Ok(resp) => Ok(Some( resp.bytes() .await - .map_err(Error::Reqwest)? + .map_err(Error::from)? .into_iter() .collect::>(), )), diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 720d8c77952..cd7873c9b63 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -170,7 +170,7 @@ impl ValidatorClientHttpClient { .map_err(|_| Error::InvalidSignatureHeader)? .to_string(); - let body = response.bytes().await.map_err(Error::Reqwest)?; + let body = response.bytes().await.map_err(Error::from)?; let message = Message::parse_slice(digest(&SHA256, &body).as_ref()).expect("sha256 is 32 bytes"); @@ -222,7 +222,7 @@ impl ValidatorClientHttpClient { .headers(self.headers()?) .send() .await - .map_err(Error::Reqwest)?; + .map_err(Error::from)?; ok_or_error(response).await } @@ -236,7 +236,7 @@ impl ValidatorClientHttpClient { .await? .json() .await - .map_err(Error::Reqwest) + .map_err(Error::from) } /// Perform a HTTP GET request, returning `None` on a 404 error. @@ -266,7 +266,7 @@ impl ValidatorClientHttpClient { .json(body) .send() .await - .map_err(Error::Reqwest)?; + .map_err(Error::from)?; ok_or_error(response).await } @@ -297,7 +297,7 @@ impl ValidatorClientHttpClient { .json(body) .send() .await - .map_err(Error::Reqwest)?; + .map_err(Error::from)?; let response = ok_or_error(response).await?; self.signed_body(response).await?; Ok(()) @@ -316,7 +316,7 @@ impl ValidatorClientHttpClient { .json(body) .send() .await - .map_err(Error::Reqwest)?; + .map_err(Error::from)?; ok_or_error(response).await } diff --git a/common/pretty_reqwest_error/Cargo.toml b/common/pretty_reqwest_error/Cargo.toml new file mode 100644 index 00000000000..ca9f4812b0c --- /dev/null +++ b/common/pretty_reqwest_error/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "pretty_reqwest_error" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +reqwest = { version = "0.11.0", features = ["json","stream"] } +sensitive_url = { path = "../sensitive_url" } diff --git a/common/pretty_reqwest_error/src/lib.rs b/common/pretty_reqwest_error/src/lib.rs new file mode 100644 index 00000000000..4c605f38aeb --- /dev/null +++ b/common/pretty_reqwest_error/src/lib.rs @@ -0,0 +1,62 @@ +use sensitive_url::SensitiveUrl; +use std::error::Error as StdError; +use std::fmt; + +pub struct PrettyReqwestError(reqwest::Error); + +impl PrettyReqwestError { + pub fn inner(&self) -> &reqwest::Error { + &self.0 + } +} + +impl fmt::Debug for PrettyReqwestError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if let Some(url) = self.0.url() { + if let Ok(url) = SensitiveUrl::new(url.clone()) { + write!(f, "url: {}", url)?; + } else { + write!(f, "url: unable_to_parse")?; + }; + } + + let kind = if self.0.is_builder() { + "builder" + } else if self.0.is_redirect() { + "redirect" + } else if self.0.is_status() { + "status" + } else if self.0.is_timeout() { + "timeout" + } else if self.0.is_request() { + "request" + } else if self.0.is_connect() { + "connect" + } else if self.0.is_body() { + "body" + } else if self.0.is_decode() { + "decode" + } else { + "unknown" + }; + write!(f, ", kind: {}", kind)?; + + if let Some(status) = self.0.status() { + write!(f, ", status_code: {}", status)?; + } + + if let Some(ref source) = self.0.source() { + write!(f, ", detail: {}", source)?; + } else { + write!(f, ", source: unknown")?; + } + + Ok(()) + } +} + +impl From for PrettyReqwestError { + fn from(inner: reqwest::Error) -> Self { + Self(inner) + } +} diff --git a/common/sensitive_url/src/lib.rs b/common/sensitive_url/src/lib.rs index b6705eb6020..b6068a2dca6 100644 --- a/common/sensitive_url/src/lib.rs +++ b/common/sensitive_url/src/lib.rs @@ -75,7 +75,7 @@ impl SensitiveUrl { SensitiveUrl::new(surl) } - fn new(full: Url) -> Result { + pub fn new(full: Url) -> Result { let mut redacted = full.clone(); redacted .path_segments_mut() From ead4e60a76a47ca9221d3fc383a15131a77f8596 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 27 Jun 2023 01:06:51 +0000 Subject: [PATCH 10/15] Schedule Capella for Gnosis chain (#4433) ## Issue Addressed Closes #4422 Implements https://github.com/gnosischain/configs/pull/12 ## Proposed Changes Schedule the Capella fork for Gnosis chain at epoch 648704, August 1st 2023 11:34:20 UTC. --- .../built_in_network_configs/gnosis/config.yaml | 2 +- consensus/types/src/chain_spec.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 95ca9d01080..0fdc159ec2b 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -38,7 +38,7 @@ BELLATRIX_FORK_VERSION: 0x02000064 BELLATRIX_FORK_EPOCH: 385536 # Capella CAPELLA_FORK_VERSION: 0x03000064 -CAPELLA_FORK_EPOCH: 18446744073709551615 +CAPELLA_FORK_EPOCH: 648704 # Sharding SHARDING_FORK_VERSION: 0x03000064 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 5253dcc4b03..59571822309 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -828,7 +828,7 @@ impl ChainSpec { * Capella hard fork params */ capella_fork_version: [0x03, 0x00, 0x00, 0x64], - capella_fork_epoch: None, + capella_fork_epoch: Some(Epoch::new(648704)), max_validators_per_withdrawals_sweep: 8192, /* From 23b06aa51ee2ab9c13a90c7d5e92f8e085b3eafc Mon Sep 17 00:00:00 2001 From: int88 Date: Thu, 29 Jun 2023 09:39:15 +0000 Subject: [PATCH 11/15] avoid relocking head during builder health check (#4323) ## Issue Addressed #4314 ## Proposed Changes avoid relocking head during builder health check ## Additional Info NA --- beacon_node/beacon_chain/src/beacon_chain.rs | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index ceda7222e69..4aea7bc55fc 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -5699,13 +5699,9 @@ impl BeaconChain { /// Since we are likely calling this during the slot we are going to propose in, don't take into /// account the current slot when accounting for skips. pub fn is_healthy(&self, parent_root: &Hash256) -> Result { + let cached_head = self.canonical_head.cached_head(); // Check if the merge has been finalized. - if let Some(finalized_hash) = self - .canonical_head - .cached_head() - .forkchoice_update_parameters() - .finalized_hash - { + if let Some(finalized_hash) = cached_head.forkchoice_update_parameters().finalized_hash { if ExecutionBlockHash::zero() == finalized_hash { return Ok(ChainHealth::PreMerge); } @@ -5732,17 +5728,13 @@ impl BeaconChain { // Check slots at the head of the chain. let prev_slot = current_slot.saturating_sub(Slot::new(1)); - let head_skips = prev_slot.saturating_sub(self.canonical_head.cached_head().head_slot()); + let head_skips = prev_slot.saturating_sub(cached_head.head_slot()); let head_skips_check = head_skips.as_usize() <= self.config.builder_fallback_skips; // Check if finalization is advancing. let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - let epochs_since_finalization = current_epoch.saturating_sub( - self.canonical_head - .cached_head() - .finalized_checkpoint() - .epoch, - ); + let epochs_since_finalization = + current_epoch.saturating_sub(cached_head.finalized_checkpoint().epoch); let finalization_check = epochs_since_finalization.as_usize() <= self.config.builder_fallback_epochs_since_finalization; From 1aff082eeaf7ccb66e06153a6db24bb530a4e2af Mon Sep 17 00:00:00 2001 From: Jack McPherson Date: Thu, 29 Jun 2023 12:02:38 +0000 Subject: [PATCH 12/15] Add broadcast validation routes to Beacon Node HTTP API (#4316) ## Issue Addressed - #4293 - #4264 ## Proposed Changes *Changes largely follow those suggested in the main issue*. - Add new routes to HTTP API - `post_beacon_blocks_v2` - `post_blinded_beacon_blocks_v2` - Add new routes to `BeaconNodeHttpClient` - `post_beacon_blocks_v2` - `post_blinded_beacon_blocks_v2` - Define new Eth2 common types - `BroadcastValidation`, enum representing the level of validation to apply to blocks prior to broadcast - `BroadcastValidationQuery`, the corresponding HTTP query string type for the above type - ~~Define `_checked` variants of both `publish_block` and `publish_blinded_block` that enforce a validation level at a type level~~ - Add interactive tests to the `bn_http_api_tests` test target covering each validation level (to their own test module, `broadcast_validation_tests`) - `beacon/blocks` - `broadcast_validation=gossip` - Invalid (400) - Full Pass (200) - Partial Pass (202) - `broadcast_validation=consensus` - Invalid (400) - Only gossip (400) - Only consensus pass (i.e., equivocates) (200) - Full pass (200) - `broadcast_validation=consensus_and_equivocation` - Invalid (400) - Invalid due to early equivocation (400) - Only gossip (400) - Only consensus (400) - Pass (200) - `beacon/blinded_blocks` - `broadcast_validation=gossip` - Invalid (400) - Full Pass (200) - Partial Pass (202) - `broadcast_validation=consensus` - Invalid (400) - Only gossip (400) - ~~Only consensus pass (i.e., equivocates) (200)~~ - Full pass (200) - `broadcast_validation=consensus_and_equivocation` - Invalid (400) - Invalid due to early equivocation (400) - Only gossip (400) - Only consensus (400) - Pass (200) - Add a new trait, `IntoGossipVerifiedBlock`, which allows type-level guarantees to be made as to gossip validity - Modify the structure of the `ObservedBlockProducers` cache from a `(slot, validator_index)` mapping to a `((slot, validator_index), block_root)` mapping - Modify `ObservedBlockProducers::proposer_has_been_observed` to return a `SeenBlock` rather than a boolean on success - Punish gossip peer (low) for submitting equivocating blocks - Rename `BlockError::SlashablePublish` to `BlockError::SlashableProposal` ## Additional Info This PR contains changes that directly modify how blocks are verified within the client. For more context, consult [comments in-thread](https://github.com/sigp/lighthouse/pull/4316#discussion_r1234724202). Co-authored-by: Michael Sproul --- beacon_node/beacon_chain/src/beacon_chain.rs | 5 +- .../beacon_chain/src/block_verification.rs | 81 +- beacon_node/beacon_chain/src/builder.rs | 1 - beacon_node/beacon_chain/src/errors.rs | 1 + beacon_node/beacon_chain/src/lib.rs | 2 +- .../src/observed_block_producers.rs | 225 ++- beacon_node/beacon_chain/src/test_utils.rs | 17 +- .../beacon_chain/tests/block_verification.rs | 19 +- .../tests/payload_invalidation.rs | 14 +- beacon_node/beacon_chain/tests/store_tests.rs | 1 + beacon_node/beacon_chain/tests/tests.rs | 1 + beacon_node/http_api/src/lib.rs | 101 +- beacon_node/http_api/src/publish_blocks.rs | 201 ++- .../tests/broadcast_validation_tests.rs | 1270 +++++++++++++++++ beacon_node/http_api/tests/main.rs | 1 + beacon_node/http_api/tests/tests.rs | 23 +- .../beacon_processor/worker/gossip_methods.rs | 22 +- .../beacon_processor/worker/sync_methods.rs | 24 +- common/eth2/src/lib.rs | 90 ++ common/eth2/src/types.rs | 46 +- consensus/types/src/beacon_block_body.rs | 2 +- testing/ef_tests/src/cases/fork_choice.rs | 1 + 22 files changed, 1965 insertions(+), 183 deletions(-) create mode 100644 beacon_node/http_api/tests/broadcast_validation_tests.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4aea7bc55fc..772e4c1529e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2578,6 +2578,7 @@ impl BeaconChain { signature_verified_block.block_root(), signature_verified_block, notify_execution_layer, + || Ok(()), ) .await { @@ -2666,6 +2667,7 @@ impl BeaconChain { block_root: Hash256, unverified_block: B, notify_execution_layer: NotifyExecutionLayer, + publish_fn: impl FnOnce() -> Result<(), BlockError> + Send + 'static, ) -> Result> { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); @@ -2684,6 +2686,7 @@ impl BeaconChain { &chain, notify_execution_layer, )?; + publish_fn()?; chain .import_execution_pending_block(execution_pending) .await @@ -2725,7 +2728,7 @@ impl BeaconChain { } // The block failed verification. Err(other) => { - trace!( + debug!( self.log, "Beacon block rejected"; "reason" => other.to_string(), diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 3cb8fbdb524..492f492521e 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -52,6 +52,7 @@ use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, }; +use crate::observed_block_producers::SeenBlock; use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; @@ -181,13 +182,6 @@ pub enum BlockError { /// /// The block is valid and we have already imported a block with this hash. BlockIsAlreadyKnown, - /// A block for this proposer and slot has already been observed. - /// - /// ## Peer scoring - /// - /// The `proposer` has already proposed a block at this slot. The existing block may or may not - /// be equal to the given block. - RepeatProposal { proposer: u64, slot: Slot }, /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER. /// /// ## Peer scoring @@ -283,6 +277,13 @@ pub enum BlockError { /// problems to worry about than losing peers, and we're doing the network a favour by /// disconnecting. ParentExecutionPayloadInvalid { parent_root: Hash256 }, + /// The block is a slashable equivocation from the proposer. + /// + /// ## Peer scoring + /// + /// Honest peers shouldn't forward more than 1 equivocating block from the same proposer, so + /// we penalise them with a mid-tolerance error. + Slashable, } /// Returned when block validation failed due to some issue verifying @@ -631,6 +632,40 @@ pub struct ExecutionPendingBlock { pub payload_verification_handle: PayloadVerificationHandle, } +pub trait IntoGossipVerifiedBlock: Sized { + fn into_gossip_verified_block( + self, + chain: &BeaconChain, + ) -> Result, BlockError>; + fn inner(&self) -> Arc>; +} + +impl IntoGossipVerifiedBlock for GossipVerifiedBlock { + fn into_gossip_verified_block( + self, + _chain: &BeaconChain, + ) -> Result, BlockError> { + Ok(self) + } + + fn inner(&self) -> Arc> { + self.block.clone() + } +} + +impl IntoGossipVerifiedBlock for Arc> { + fn into_gossip_verified_block( + self, + chain: &BeaconChain, + ) -> Result, BlockError> { + GossipVerifiedBlock::new(self, chain) + } + + fn inner(&self) -> Arc> { + self.clone() + } +} + /// Implemented on types that can be converted into a `ExecutionPendingBlock`. /// /// Used to allow functions to accept blocks at various stages of verification. @@ -727,19 +762,6 @@ impl GossipVerifiedBlock { return Err(BlockError::BlockIsAlreadyKnown); } - // Check that we have not already received a block with a valid signature for this slot. - if chain - .observed_block_producers - .read() - .proposer_has_been_observed(block.message()) - .map_err(|e| BlockError::BeaconChainError(e.into()))? - { - return Err(BlockError::RepeatProposal { - proposer: block.message().proposer_index(), - slot: block.slot(), - }); - } - // Do not process a block that doesn't descend from the finalized root. // // We check this *before* we load the parent so that we can return a more detailed error. @@ -855,17 +877,16 @@ impl GossipVerifiedBlock { // // It's important to double-check that the proposer still hasn't been observed so we don't // have a race-condition when verifying two blocks simultaneously. - if chain + match chain .observed_block_producers .write() - .observe_proposer(block.message()) + .observe_proposal(block_root, block.message()) .map_err(|e| BlockError::BeaconChainError(e.into()))? { - return Err(BlockError::RepeatProposal { - proposer: block.message().proposer_index(), - slot: block.slot(), - }); - } + SeenBlock::Slashable => return Err(BlockError::Slashable), + SeenBlock::Duplicate => return Err(BlockError::BlockIsAlreadyKnown), + SeenBlock::UniqueNonSlashable => {} + }; if block.message().proposer_index() != expected_proposer as u64 { return Err(BlockError::IncorrectBlockProposer { @@ -1101,6 +1122,12 @@ impl ExecutionPendingBlock { chain: &Arc>, notify_execution_layer: NotifyExecutionLayer, ) -> Result> { + chain + .observed_block_producers + .write() + .observe_proposal(block_root, block.message()) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; + if let Some(parent) = chain .canonical_head .fork_choice_read_lock() diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 84148fbfb18..9bb39396326 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -826,7 +826,6 @@ where observed_sync_aggregators: <_>::default(), // TODO: allow for persisting and loading the pool from disk. observed_block_producers: <_>::default(), - // TODO: allow for persisting and loading the pool from disk. observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index e789b54a21b..50bcf426536 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -213,6 +213,7 @@ pub enum BeaconChainError { BlsToExecutionConflictsWithPool, InconsistentFork(InconsistentFork), ProposerHeadForkChoiceError(fork_choice::Error), + UnableToPublish, } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index d672c168288..c5cf74e179c 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -64,7 +64,7 @@ pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; pub use block_verification::{ get_block_root, BlockError, ExecutionPayloadError, GossipVerifiedBlock, - IntoExecutionPendingBlock, + IntoExecutionPendingBlock, IntoGossipVerifiedBlock, }; pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; diff --git a/beacon_node/beacon_chain/src/observed_block_producers.rs b/beacon_node/beacon_chain/src/observed_block_producers.rs index b5995121b99..f76fc537967 100644 --- a/beacon_node/beacon_chain/src/observed_block_producers.rs +++ b/beacon_node/beacon_chain/src/observed_block_producers.rs @@ -1,9 +1,10 @@ //! Provides the `ObservedBlockProducers` struct which allows for rejecting gossip blocks from //! validators that have already produced a block. +use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; -use types::{BeaconBlockRef, Epoch, EthSpec, Slot, Unsigned}; +use types::{BeaconBlockRef, Epoch, EthSpec, Hash256, Slot, Unsigned}; #[derive(Debug, PartialEq)] pub enum Error { @@ -14,6 +15,12 @@ pub enum Error { ValidatorIndexTooHigh(u64), } +#[derive(Eq, Hash, PartialEq, Debug, Default)] +struct ProposalKey { + slot: Slot, + proposer: u64, +} + /// Maintains a cache of observed `(block.slot, block.proposer)`. /// /// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you @@ -27,7 +34,7 @@ pub enum Error { /// known_distinct_shufflings` which is much smaller. pub struct ObservedBlockProducers { finalized_slot: Slot, - items: HashMap>, + items: HashMap>, _phantom: PhantomData, } @@ -42,6 +49,24 @@ impl Default for ObservedBlockProducers { } } +pub enum SeenBlock { + Duplicate, + Slashable, + UniqueNonSlashable, +} + +impl SeenBlock { + pub fn proposer_previously_observed(self) -> bool { + match self { + Self::Duplicate | Self::Slashable => true, + Self::UniqueNonSlashable => false, + } + } + pub fn is_slashable(&self) -> bool { + matches!(self, Self::Slashable) + } +} + impl ObservedBlockProducers { /// Observe that the `block` was produced by `block.proposer_index` at `block.slot`. This will /// update `self` so future calls to it indicate that this block is known. @@ -52,16 +77,44 @@ impl ObservedBlockProducers { /// /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`. - pub fn observe_proposer(&mut self, block: BeaconBlockRef<'_, E>) -> Result { + pub fn observe_proposal( + &mut self, + block_root: Hash256, + block: BeaconBlockRef<'_, E>, + ) -> Result { self.sanitize_block(block)?; - let did_not_exist = self - .items - .entry(block.slot()) - .or_insert_with(|| HashSet::with_capacity(E::SlotsPerEpoch::to_usize())) - .insert(block.proposer_index()); - - Ok(!did_not_exist) + let key = ProposalKey { + slot: block.slot(), + proposer: block.proposer_index(), + }; + + let entry = self.items.entry(key); + + let slashable_proposal = match entry { + Entry::Occupied(mut occupied_entry) => { + let block_roots = occupied_entry.get_mut(); + let newly_inserted = block_roots.insert(block_root); + + let is_equivocation = block_roots.len() > 1; + + if is_equivocation { + SeenBlock::Slashable + } else if !newly_inserted { + SeenBlock::Duplicate + } else { + SeenBlock::UniqueNonSlashable + } + } + Entry::Vacant(vacant_entry) => { + let block_roots = HashSet::from([block_root]); + vacant_entry.insert(block_roots); + + SeenBlock::UniqueNonSlashable + } + }; + + Ok(slashable_proposal) } /// Returns `Ok(true)` if the `block` has been observed before, `Ok(false)` if not. Does not @@ -72,15 +125,33 @@ impl ObservedBlockProducers { /// /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`. - pub fn proposer_has_been_observed(&self, block: BeaconBlockRef<'_, E>) -> Result { + pub fn proposer_has_been_observed( + &self, + block: BeaconBlockRef<'_, E>, + block_root: Hash256, + ) -> Result { self.sanitize_block(block)?; - let exists = self - .items - .get(&block.slot()) - .map_or(false, |set| set.contains(&block.proposer_index())); - - Ok(exists) + let key = ProposalKey { + slot: block.slot(), + proposer: block.proposer_index(), + }; + + if let Some(block_roots) = self.items.get(&key) { + let block_already_known = block_roots.contains(&block_root); + let no_prev_known_blocks = + block_roots.difference(&HashSet::from([block_root])).count() == 0; + + if !no_prev_known_blocks { + Ok(SeenBlock::Slashable) + } else if block_already_known { + Ok(SeenBlock::Duplicate) + } else { + Ok(SeenBlock::UniqueNonSlashable) + } + } else { + Ok(SeenBlock::UniqueNonSlashable) + } } /// Returns `Ok(())` if the given `block` is sane. @@ -112,15 +183,15 @@ impl ObservedBlockProducers { } self.finalized_slot = finalized_slot; - self.items.retain(|slot, _set| *slot > finalized_slot); + self.items.retain(|key, _| key.slot > finalized_slot); } /// Returns `true` if the given `validator_index` has been stored in `self` at `epoch`. /// /// This is useful for doppelganger detection. pub fn index_seen_at_epoch(&self, validator_index: u64, epoch: Epoch) -> bool { - self.items.iter().any(|(slot, producers)| { - slot.epoch(E::slots_per_epoch()) == epoch && producers.contains(&validator_index) + self.items.iter().any(|(key, _)| { + key.slot.epoch(E::slots_per_epoch()) == epoch && key.proposer == validator_index }) } } @@ -148,9 +219,12 @@ mod tests { // Slot 0, proposer 0 let block_a = get_block(0, 0); + let block_root = block_a.canonical_root(); assert_eq!( - cache.observe_proposer(block_a.to_ref()), + cache + .observe_proposal(block_root, block_a.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(false), "can observe proposer, indicates proposer unobserved" ); @@ -164,7 +238,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(0)) + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) .expect("slot zero should be present") .len(), 1, @@ -182,7 +259,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(0)) + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) .expect("slot zero should be present") .len(), 1, @@ -207,9 +287,12 @@ mod tests { // First slot of finalized epoch, proposer 0 let block_b = get_block(E::slots_per_epoch(), 0); + let block_root_b = block_b.canonical_root(); assert_eq!( - cache.observe_proposer(block_b.to_ref()), + cache + .observe_proposal(block_root_b, block_b.to_ref()) + .map(SeenBlock::proposer_previously_observed), Err(Error::FinalizedBlock { slot: E::slots_per_epoch().into(), finalized_slot: E::slots_per_epoch().into(), @@ -229,7 +312,9 @@ mod tests { let block_b = get_block(three_epochs, 0); assert_eq!( - cache.observe_proposer(block_b.to_ref()), + cache + .observe_proposal(block_root_b, block_b.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(false), "can insert non-finalized block" ); @@ -238,7 +323,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(three_epochs)) + .get(&ProposalKey { + slot: Slot::new(three_epochs), + proposer: 0 + }) .expect("the three epochs slot should be present") .len(), 1, @@ -262,7 +350,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(three_epochs)) + .get(&ProposalKey { + slot: Slot::new(three_epochs), + proposer: 0 + }) .expect("the three epochs slot should be present") .len(), 1, @@ -276,24 +367,33 @@ mod tests { // Slot 0, proposer 0 let block_a = get_block(0, 0); + let block_root_a = block_a.canonical_root(); assert_eq!( - cache.proposer_has_been_observed(block_a.to_ref()), + cache + .proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(false), "no observation in empty cache" ); assert_eq!( - cache.observe_proposer(block_a.to_ref()), + cache + .observe_proposal(block_root_a, block_a.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(false), "can observe proposer, indicates proposer unobserved" ); assert_eq!( - cache.proposer_has_been_observed(block_a.to_ref()), + cache + .proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(true), "observed block is indicated as true" ); assert_eq!( - cache.observe_proposer(block_a.to_ref()), + cache + .observe_proposal(block_root_a, block_a.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(true), "observing again indicates true" ); @@ -303,7 +403,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(0)) + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) .expect("slot zero should be present") .len(), 1, @@ -312,24 +415,33 @@ mod tests { // Slot 1, proposer 0 let block_b = get_block(1, 0); + let block_root_b = block_b.canonical_root(); assert_eq!( - cache.proposer_has_been_observed(block_b.to_ref()), + cache + .proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(false), "no observation for new slot" ); assert_eq!( - cache.observe_proposer(block_b.to_ref()), + cache + .observe_proposal(block_root_b, block_b.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(false), "can observe proposer for new slot, indicates proposer unobserved" ); assert_eq!( - cache.proposer_has_been_observed(block_b.to_ref()), + cache + .proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(true), "observed block in slot 1 is indicated as true" ); assert_eq!( - cache.observe_proposer(block_b.to_ref()), + cache + .observe_proposal(block_root_b, block_b.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(true), "observing slot 1 again indicates true" ); @@ -339,7 +451,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(0)) + .get(&ProposalKey { + slot: Slot::new(0), + proposer: 0 + }) .expect("slot zero should be present") .len(), 1, @@ -348,7 +463,10 @@ mod tests { assert_eq!( cache .items - .get(&Slot::new(1)) + .get(&ProposalKey { + slot: Slot::new(1), + proposer: 0 + }) .expect("slot zero should be present") .len(), 1, @@ -357,45 +475,54 @@ mod tests { // Slot 0, proposer 1 let block_c = get_block(0, 1); + let block_root_c = block_c.canonical_root(); assert_eq!( - cache.proposer_has_been_observed(block_c.to_ref()), + cache + .proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(false), "no observation for new proposer" ); assert_eq!( - cache.observe_proposer(block_c.to_ref()), + cache + .observe_proposal(block_root_c, block_c.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(false), "can observe new proposer, indicates proposer unobserved" ); assert_eq!( - cache.proposer_has_been_observed(block_c.to_ref()), + cache + .proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root()) + .map(|x| x.proposer_previously_observed()), Ok(true), "observed new proposer block is indicated as true" ); assert_eq!( - cache.observe_proposer(block_c.to_ref()), + cache + .observe_proposal(block_root_c, block_c.to_ref()) + .map(SeenBlock::proposer_previously_observed), Ok(true), "observing new proposer again indicates true" ); assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); - assert_eq!(cache.items.len(), 2, "two slots should be present"); + assert_eq!(cache.items.len(), 3, "three slots should be present"); assert_eq!( cache .items - .get(&Slot::new(0)) - .expect("slot zero should be present") - .len(), + .iter() + .filter(|(k, _)| k.slot == cache.finalized_slot) + .count(), 2, "two proposers should be present in slot 0" ); assert_eq!( cache .items - .get(&Slot::new(1)) - .expect("slot zero should be present") - .len(), + .iter() + .filter(|(k, _)| k.slot == Slot::new(1)) + .count(), 1, "only one proposer should be present in slot 1" ); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 55ea016fbda..21f7248cee5 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -733,6 +733,15 @@ where state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() } + pub async fn make_blinded_block( + &self, + state: BeaconState, + slot: Slot, + ) -> (SignedBlindedBeaconBlock, BeaconState) { + let (unblinded, new_state) = self.make_block(state, slot).await; + (unblinded.into(), new_state) + } + /// Returns a newly created block, signed by the proposer for the given slot. pub async fn make_block( &self, @@ -1692,7 +1701,12 @@ where self.set_current_slot(slot); let block_hash: SignedBeaconBlockHash = self .chain - .process_block(block_root, Arc::new(block), NotifyExecutionLayer::Yes) + .process_block( + block_root, + Arc::new(block), + NotifyExecutionLayer::Yes, + || Ok(()), + ) .await? .into(); self.chain.recompute_head_at_current_slot().await; @@ -1709,6 +1723,7 @@ where block.canonical_root(), Arc::new(block), NotifyExecutionLayer::Yes, + || Ok(()), ) .await? .into(); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index a88931367f7..75b00b2b442 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -351,6 +351,7 @@ async fn assert_invalid_signature( snapshots[block_index].beacon_block.canonical_root(), snapshots[block_index].beacon_block.clone(), NotifyExecutionLayer::Yes, + || Ok(()), ) .await; assert!( @@ -415,6 +416,7 @@ async fn invalid_signature_gossip_block() { signed_block.canonical_root(), Arc::new(signed_block), NotifyExecutionLayer::Yes, + || Ok(()), ) .await, Err(BlockError::InvalidSignature) @@ -727,6 +729,7 @@ async fn block_gossip_verification() { gossip_verified.block_root, gossip_verified, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .expect("should import valid gossip verified block"); @@ -923,11 +926,7 @@ async fn block_gossip_verification() { assert!( matches!( unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), - BlockError::RepeatProposal { - proposer, - slot, - } - if proposer == other_proposer && slot == block.message().slot() + BlockError::BlockIsAlreadyKnown, ), "should register any valid signature against the proposer, even if the block failed later verification" ); @@ -956,11 +955,7 @@ async fn block_gossip_verification() { .await .err() .expect("should error when processing known block"), - BlockError::RepeatProposal { - proposer, - slot, - } - if proposer == block.message().proposer_index() && slot == block.message().slot() + BlockError::BlockIsAlreadyKnown ), "the second proposal by this validator should be rejected" ); @@ -998,6 +993,7 @@ async fn verify_block_for_gossip_slashing_detection() { verified_block.block_root, verified_block, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); @@ -1037,6 +1033,7 @@ async fn verify_block_for_gossip_doppelganger_detection() { verified_block.block_root, verified_block, NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); @@ -1184,6 +1181,7 @@ async fn add_base_block_to_altair_chain() { base_block.canonical_root(), Arc::new(base_block.clone()), NotifyExecutionLayer::Yes, + || Ok(()), ) .await .err() @@ -1318,6 +1316,7 @@ async fn add_altair_block_to_base_chain() { altair_block.canonical_root(), Arc::new(altair_block.clone()), NotifyExecutionLayer::Yes, + || Ok(()), ) .await .err() diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index c39bdeaf366..018defd2f00 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -697,6 +697,7 @@ async fn invalidates_all_descendants() { fork_block.canonical_root(), Arc::new(fork_block), NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); @@ -793,6 +794,7 @@ async fn switches_heads() { fork_block.canonical_root(), Arc::new(fork_block), NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); @@ -1046,7 +1048,9 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes).await, + rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes, + || Ok(()), + ).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); @@ -1332,7 +1336,12 @@ async fn build_optimistic_chain( for block in blocks { rig.harness .chain - .process_block(block.canonical_root(), block, NotifyExecutionLayer::Yes) + .process_block( + block.canonical_root(), + block, + NotifyExecutionLayer::Yes, + || Ok(()), + ) .await .unwrap(); } @@ -1892,6 +1901,7 @@ async fn recover_from_invalid_head_by_importing_blocks() { fork_block.canonical_root(), fork_block.clone(), NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 0bc7798a7ff..29027748259 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2151,6 +2151,7 @@ async fn weak_subjectivity_sync() { full_block.canonical_root(), Arc::new(full_block), NotifyExecutionLayer::Yes, + || Ok(()), ) .await .unwrap(); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index f97f7069dce..c5b2892cbdb 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -687,6 +687,7 @@ async fn run_skip_slot_test(skip_slots: u64) { harness_a.chain.head_snapshot().beacon_block_root, harness_a.chain.head_snapshot().beacon_block.clone(), NotifyExecutionLayer::Yes, + || Ok(()) ) .await .unwrap(), diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 025b54f326e..93bfe524bc2 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -31,8 +31,8 @@ use beacon_chain::{ pub use block_id::BlockId; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ - self as api_types, EndpointVersion, ForkChoice, ForkChoiceNode, SkipRandaoVerification, - ValidatorId, ValidatorStatus, + self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, + SkipRandaoVerification, ValidatorId, ValidatorStatus, }; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; @@ -40,7 +40,9 @@ use logging::SSELoggingComponents; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; use operation_pool::ReceivedPreCapella; use parking_lot::RwLock; -use publish_blocks::ProvenancedBlock; +pub use publish_blocks::{ + publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock, +}; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; @@ -324,6 +326,7 @@ pub fn serve( }; let eth_v1 = single_version(V1); + let eth_v2 = single_version(V2); // Create a `warp` filter that provides access to the network globals. let inner_network_globals = ctx.network_globals.clone(); @@ -1222,16 +1225,55 @@ pub fn serve( log: Logger| async move { publish_blocks::publish_block( None, - ProvenancedBlock::Local(block), + ProvenancedBlock::local(block), chain, &network_tx, log, + BroadcastValidation::default(), ) .await .map(|()| warp::reply().into_response()) }, ); + let post_beacon_blocks_v2 = eth_v2 + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .then( + |validation_level: api_types::BroadcastValidationQuery, + block: Arc>, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| async move { + match publish_blocks::publish_block( + None, + ProvenancedBlock::local(block), + chain, + &network_tx, + log, + validation_level.broadcast_validation, + ) + .await + { + Ok(()) => warp::reply().into_response(), + Err(e) => match warp_utils::reject::handle_rejection(e).await { + Ok(reply) => reply.into_response(), + Err(_) => warp::reply::with_status( + StatusCode::INTERNAL_SERVER_ERROR, + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(), + }, + } + }, + ); + /* * beacon/blocks */ @@ -1250,9 +1292,52 @@ pub fn serve( chain: Arc>, network_tx: UnboundedSender>, log: Logger| async move { - publish_blocks::publish_blinded_block(block, chain, &network_tx, log) - .await - .map(|()| warp::reply().into_response()) + publish_blocks::publish_blinded_block( + block, + chain, + &network_tx, + log, + BroadcastValidation::default(), + ) + .await + .map(|()| warp::reply().into_response()) + }, + ); + + let post_beacon_blinded_blocks_v2 = eth_v2 + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .then( + |validation_level: api_types::BroadcastValidationQuery, + block: SignedBeaconBlock>, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| async move { + match publish_blocks::publish_blinded_block( + block, + chain, + &network_tx, + log, + validation_level.broadcast_validation, + ) + .await + { + Ok(()) => warp::reply().into_response(), + Err(e) => match warp_utils::reject::handle_rejection(e).await { + Ok(reply) => reply.into_response(), + Err(_) => warp::reply::with_status( + StatusCode::INTERNAL_SERVER_ERROR, + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(), + }, + } }, ); @@ -3847,6 +3932,8 @@ pub fn serve( warp::post().and( post_beacon_blocks .uor(post_beacon_blinded_blocks) + .uor(post_beacon_blocks_v2) + .uor(post_beacon_blinded_blocks_v2) .uor(post_beacon_pool_attestations) .uor(post_beacon_pool_attester_slashings) .uor(post_beacon_pool_proposer_slashings) diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 8bcad6ba401..0f2f7b361c7 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -1,11 +1,16 @@ use crate::metrics; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, NotifyExecutionLayer}; +use beacon_chain::{ + BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, IntoGossipVerifiedBlock, + NotifyExecutionLayer, +}; +use eth2::types::BroadcastValidation; use execution_layer::ProvenancedPayload; use lighthouse_network::PubsubMessage; use network::NetworkMessage; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; +use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc::UnboundedSender; @@ -16,45 +21,115 @@ use types::{ }; use warp::Rejection; -pub enum ProvenancedBlock { +pub enum ProvenancedBlock> { /// The payload was built using a local EE. - Local(Arc>>), + Local(B, PhantomData), /// The payload was build using a remote builder (e.g., via a mev-boost /// compatible relay). - Builder(Arc>>), + Builder(B, PhantomData), +} + +impl> ProvenancedBlock { + pub fn local(block: B) -> Self { + Self::Local(block, PhantomData) + } + + pub fn builder(block: B) -> Self { + Self::Builder(block, PhantomData) + } } /// Handles a request from the HTTP API for full blocks. -pub async fn publish_block( +pub async fn publish_block>( block_root: Option, - provenanced_block: ProvenancedBlock, + provenanced_block: ProvenancedBlock, chain: Arc>, network_tx: &UnboundedSender>, log: Logger, + validation_level: BroadcastValidation, ) -> Result<(), Rejection> { let seen_timestamp = timestamp_now(); let (block, is_locally_built_block) = match provenanced_block { - ProvenancedBlock::Local(block) => (block, true), - ProvenancedBlock::Builder(block) => (block, false), + ProvenancedBlock::Local(block, _) => (block, true), + ProvenancedBlock::Builder(block, _) => (block, false), }; - let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); + let beacon_block = block.inner(); + let delay = get_block_delay_ms(seen_timestamp, beacon_block.message(), &chain.slot_clock); + debug!(log, "Signed block received in HTTP API"; "slot" => beacon_block.slot()); - debug!( - log, - "Signed block published to HTTP API"; - "slot" => block.slot() - ); + /* actually publish a block */ + let publish_block = move |block: Arc>, + sender, + log, + seen_timestamp| { + let publish_timestamp = timestamp_now(); + let publish_delay = publish_timestamp + .checked_sub(seen_timestamp) + .unwrap_or_else(|| Duration::from_secs(0)); - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. + info!(log, "Signed block published to network via HTTP API"; "slot" => block.slot(), "publish_delay" => ?publish_delay); - let message = PubsubMessage::BeaconBlock(block.clone()); - crate::publish_pubsub_message(network_tx, message)?; + let message = PubsubMessage::BeaconBlock(block); + crate::publish_pubsub_message(&sender, message) + .map_err(|_| BeaconChainError::UnableToPublish.into()) + }; - let block_root = block_root.unwrap_or_else(|| block.canonical_root()); + /* if we can form a `GossipVerifiedBlock`, we've passed our basic gossip checks */ + let gossip_verified_block = block.into_gossip_verified_block(&chain).map_err(|e| { + warn!(log, "Not publishing block, not gossip verified"; "slot" => beacon_block.slot(), "error" => ?e); + warp_utils::reject::custom_bad_request(e.to_string()) + })?; + + let block_root = block_root.unwrap_or(gossip_verified_block.block_root); + + if let BroadcastValidation::Gossip = validation_level { + publish_block( + beacon_block.clone(), + network_tx.clone(), + log.clone(), + seen_timestamp, + ) + .map_err(|_| warp_utils::reject::custom_server_error("unable to publish".into()))?; + } + + /* only publish if gossip- and consensus-valid and equivocation-free */ + let chain_clone = chain.clone(); + let block_clone = beacon_block.clone(); + let log_clone = log.clone(); + let sender_clone = network_tx.clone(); + + let publish_fn = move || match validation_level { + BroadcastValidation::Gossip => Ok(()), + BroadcastValidation::Consensus => { + publish_block(block_clone, sender_clone, log_clone, seen_timestamp) + } + BroadcastValidation::ConsensusAndEquivocation => { + if chain_clone + .observed_block_producers + .read() + .proposer_has_been_observed(block_clone.message(), block_root) + .map_err(|e| BlockError::BeaconChainError(e.into()))? + .is_slashable() + { + warn!( + log_clone, + "Not publishing equivocating block"; + "slot" => block_clone.slot() + ); + Err(BlockError::Slashable) + } else { + publish_block(block_clone, sender_clone, log_clone, seen_timestamp) + } + } + }; match chain - .process_block(block_root, block.clone(), NotifyExecutionLayer::Yes) + .process_block( + block_root, + gossip_verified_block, + NotifyExecutionLayer::Yes, + publish_fn, + ) .await { Ok(root) => { @@ -63,14 +138,14 @@ pub async fn publish_block( "Valid block from HTTP API"; "block_delay" => ?delay, "root" => format!("{}", root), - "proposer_index" => block.message().proposer_index(), - "slot" => block.slot(), + "proposer_index" => beacon_block.message().proposer_index(), + "slot" => beacon_block.slot(), ); // Notify the validator monitor. chain.validator_monitor.read().register_api_block( seen_timestamp, - block.message(), + beacon_block.message(), root, &chain.slot_clock, ); @@ -83,40 +158,44 @@ pub async fn publish_block( // blocks built with builders we consider the broadcast time to be // when the blinded block is published to the builder. if is_locally_built_block { - late_block_logging(&chain, seen_timestamp, block.message(), root, "local", &log) + late_block_logging( + &chain, + seen_timestamp, + beacon_block.message(), + root, + "local", + &log, + ) } Ok(()) } - Err(BlockError::BlockIsAlreadyKnown) => { - info!( - log, - "Block from HTTP API already known"; - "block" => ?block.canonical_root(), - "slot" => block.slot(), - ); - Ok(()) + Err(BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) => { + Err(warp_utils::reject::custom_server_error( + "unable to publish to network channel".to_string(), + )) } - Err(BlockError::RepeatProposal { proposer, slot }) => { - warn!( - log, - "Block ignored due to repeat proposal"; - "msg" => "this can happen when a VC uses fallback BNs. \ - whilst this is not necessarily an error, it can indicate issues with a BN \ - or between the VC and BN.", - "slot" => slot, - "proposer" => proposer, - ); + Err(BlockError::Slashable) => Err(warp_utils::reject::custom_bad_request( + "proposal for this slot and proposer has already been seen".to_string(), + )), + Err(BlockError::BlockIsAlreadyKnown) => { + info!(log, "Block from HTTP API already known"; "block" => ?block_root); Ok(()) } Err(e) => { - let msg = format!("{:?}", e); - error!( - log, - "Invalid block provided to HTTP API"; - "reason" => &msg - ); - Err(warp_utils::reject::broadcast_without_import(msg)) + if let BroadcastValidation::Gossip = validation_level { + Err(warp_utils::reject::broadcast_without_import(format!("{e}"))) + } else { + let msg = format!("{:?}", e); + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::custom_bad_request(format!( + "Invalid block: {e}" + ))) + } } } } @@ -128,21 +207,31 @@ pub async fn publish_blinded_block( chain: Arc>, network_tx: &UnboundedSender>, log: Logger, + validation_level: BroadcastValidation, ) -> Result<(), Rejection> { let block_root = block.canonical_root(); - let full_block = reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; - publish_block::(Some(block_root), full_block, chain, network_tx, log).await + let full_block: ProvenancedBlock>> = + reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; + publish_block::( + Some(block_root), + full_block, + chain, + network_tx, + log, + validation_level, + ) + .await } /// Deconstruct the given blinded block, and construct a full block. This attempts to use the /// execution layer's payload cache, and if that misses, attempts a blind block proposal to retrieve /// the full payload. -async fn reconstruct_block( +pub async fn reconstruct_block( chain: Arc>, block_root: Hash256, block: SignedBeaconBlock>, log: Logger, -) -> Result, Rejection> { +) -> Result>>, Rejection> { let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { let el = chain.execution_layer.as_ref().ok_or_else(|| { warp_utils::reject::custom_server_error("Missing execution layer".to_string()) @@ -208,15 +297,15 @@ async fn reconstruct_block( None => block .try_into_full_block(None) .map(Arc::new) - .map(ProvenancedBlock::Local), + .map(ProvenancedBlock::local), Some(ProvenancedPayload::Local(full_payload)) => block .try_into_full_block(Some(full_payload)) .map(Arc::new) - .map(ProvenancedBlock::Local), + .map(ProvenancedBlock::local), Some(ProvenancedPayload::Builder(full_payload)) => block .try_into_full_block(Some(full_payload)) .map(Arc::new) - .map(ProvenancedBlock::Builder), + .map(ProvenancedBlock::builder), } .ok_or_else(|| { warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs new file mode 100644 index 00000000000..4819dd99e7a --- /dev/null +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -0,0 +1,1270 @@ +use beacon_chain::{ + test_utils::{AttestationStrategy, BlockStrategy}, + GossipVerifiedBlock, +}; +use eth2::types::{BroadcastValidation, SignedBeaconBlock, SignedBlindedBeaconBlock}; +use http_api::test_utils::InteractiveTester; +use http_api::{publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock}; +use tree_hash::TreeHash; +use types::{Hash256, MainnetEthSpec, Slot}; +use warp::Rejection; +use warp_utils::reject::CustomBadRequest; + +use eth2::reqwest::StatusCode; + +type E = MainnetEthSpec; + +/* + * We have the following test cases, which are duplicated for the blinded variant of the route: + * + * - `broadcast_validation=gossip` + * - Invalid (400) + * - Full Pass (200) + * - Partial Pass (202) + * - `broadcast_validation=consensus` + * - Invalid (400) + * - Only gossip (400) + * - Only consensus pass (i.e., equivocates) (200) + * - Full pass (200) + * - `broadcast_validation=consensus_and_equivocation` + * - Invalid (400) + * - Invalid due to early equivocation (400) + * - Only gossip (400) + * - Only consensus (400) + * - Pass (200) + * + */ + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn gossip_invalid() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is valid from a gossip perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn gossip_partial_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::random() + }) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response = response.unwrap_err(); + + assert_eq!(error_response.status(), Some(StatusCode::ACCEPTED)); +} + +// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn gossip_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn consensus_invalid() { + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn consensus_gossip() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective, but nonetheless equivocates, is accepted when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn consensus_partial_pass_only_consensus() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + let test_logger = tester.harness.logger().clone(); + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block_a, state_after_a): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a.clone(), slot_b).await; + let (block_b, state_after_b): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a, slot_b).await; + + /* check for `make_block` curios */ + assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); + assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_ne!(block_a.state_root(), block_b.state_root()); + + let gossip_block_b = GossipVerifiedBlock::new(block_b.clone().into(), &tester.harness.chain); + assert!(gossip_block_b.is_ok()); + let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain); + assert!(gossip_block_a.is_err()); + + /* submit `block_b` which should induce equivocation */ + let channel = tokio::sync::mpsc::unbounded_channel(); + + let publication_result: Result<(), Rejection> = publish_block( + None, + ProvenancedBlock::local(gossip_block_b.unwrap()), + tester.harness.chain.clone(), + &channel.0, + test_logger, + validation_level.unwrap(), + ) + .await; + + assert!(publication_result.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block_b.canonical_root())); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn consensus_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn equivocation_invalid() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn equivocation_consensus_early_equivocation() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block_a, state_after_a): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a.clone(), slot_b).await; + let (block_b, state_after_b): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a, slot_b).await; + + /* check for `make_block` curios */ + assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); + assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_ne!(block_a.state_root(), block_b.state_root()); + + /* submit `block_a` as valid */ + assert!(tester + .client + .post_beacon_blocks_v2(&block_a, validation_level) + .await + .is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block_a.canonical_root())); + + /* submit `block_b` which should induce equivocation */ + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block_b, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string()) + ); +} + +/// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn equivocation_gossip() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) + .await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective but that equivocates **late** is rejected when using `broadcast_validation=consensus_and_equivocation`. +/// +/// This test is unique in that we can't actually test the HTTP API directly, but instead have to hook into the `publish_blocks` code manually. This is in order to handle the late equivocation case. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn equivocation_consensus_late_equivocation() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + let test_logger = tester.harness.logger().clone(); + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block_a, state_after_a): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a.clone(), slot_b).await; + let (block_b, state_after_b): (SignedBeaconBlock, _) = + tester.harness.make_block(state_a, slot_b).await; + + /* check for `make_block` curios */ + assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); + assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_ne!(block_a.state_root(), block_b.state_root()); + + let gossip_block_b = GossipVerifiedBlock::new(block_b.clone().into(), &tester.harness.chain); + assert!(gossip_block_b.is_ok()); + let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain); + assert!(gossip_block_a.is_err()); + + let channel = tokio::sync::mpsc::unbounded_channel(); + + let publication_result: Result<(), Rejection> = publish_block( + None, + ProvenancedBlock::local(gossip_block_b.unwrap()), + tester.harness.chain, + &channel.0, + test_logger, + validation_level.unwrap(), + ) + .await; + + assert!(publication_result.is_err()); + + let publication_error = publication_result.unwrap_err(); + + assert!(publication_error.find::().is_some()); + + assert_eq!( + *publication_error.find::().unwrap().0, + "proposal for this slot and proposer has already been seen".to_string() + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective (and does not equivocate) is accepted when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn equivocation_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester.harness.make_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_gossip_invalid() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is valid from a gossip perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_gossip_partial_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero() + }) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response = response.unwrap_err(); + + assert_eq!(error_response.status(), Some(StatusCode::ACCEPTED)); +} + +// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_gossip_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Gossip); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_consensus_invalid() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_consensus_gossip() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=consensus`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_consensus_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = Some(BroadcastValidation::Consensus); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} + +/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_equivocation_invalid() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let chain_state_before = tester.harness.get_current_state(); + let slot = chain_state_before.slot() + 1; + + tester.harness.advance_slot(); + + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(chain_state_before, slot, |b| { + *b.state_root_mut() = Hash256::zero(); + *b.parent_root_mut() = Hash256::zero(); + }) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_equivocation_consensus_early_equivocation() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block_a, state_after_a): (SignedBlindedBeaconBlock, _) = tester + .harness + .make_blinded_block(state_a.clone(), slot_b) + .await; + let (block_b, state_after_b): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + /* check for `make_blinded_block` curios */ + assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); + assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_ne!(block_a.state_root(), block_b.state_root()); + + /* submit `block_a` as valid */ + assert!(tester + .client + .post_beacon_blinded_blocks_v2(&block_a, validation_level) + .await + .is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block_a.canonical_root())); + + /* submit `block_b` which should induce equivocation */ + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&block_b, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string()) + ); +} + +/// This test checks that a block that is only valid from a gossip perspective is rejected when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_equivocation_gossip() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBeaconBlock, _) = tester + .harness + .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) + .await; + + let blinded_block: SignedBlindedBeaconBlock = block.into(); + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) + .await; + assert!(response.is_err()); + + let error_response: eth2::Error = response.err().unwrap(); + + /* mandated by Beacon API spec */ + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + + assert!( + matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()) + ); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective but that equivocates **late** is rejected when using `broadcast_validation=consensus_and_equivocation`. +/// +/// This test is unique in that we can't actually test the HTTP API directly, but instead have to hook into the `publish_blocks` code manually. This is in order to handle the late equivocation case. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_equivocation_consensus_late_equivocation() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + let test_logger = tester.harness.logger().clone(); + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block_a, state_after_a): (SignedBlindedBeaconBlock, _) = tester + .harness + .make_blinded_block(state_a.clone(), slot_b) + .await; + let (block_b, state_after_b): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + /* check for `make_blinded_block` curios */ + assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); + assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_ne!(block_a.state_root(), block_b.state_root()); + + let unblinded_block_a = reconstruct_block( + tester.harness.chain.clone(), + block_a.state_root(), + block_a, + test_logger.clone(), + ) + .await + .unwrap(); + let unblinded_block_b = reconstruct_block( + tester.harness.chain.clone(), + block_b.clone().state_root(), + block_b.clone(), + test_logger.clone(), + ) + .await + .unwrap(); + + let inner_block_a = match unblinded_block_a { + ProvenancedBlock::Local(a, _) => a, + ProvenancedBlock::Builder(a, _) => a, + }; + let inner_block_b = match unblinded_block_b { + ProvenancedBlock::Local(b, _) => b, + ProvenancedBlock::Builder(b, _) => b, + }; + + let gossip_block_b = GossipVerifiedBlock::new(inner_block_b, &tester.harness.chain); + assert!(gossip_block_b.is_ok()); + let gossip_block_a = GossipVerifiedBlock::new(inner_block_a, &tester.harness.chain); + assert!(gossip_block_a.is_err()); + + let channel = tokio::sync::mpsc::unbounded_channel(); + + let publication_result: Result<(), Rejection> = publish_blinded_block( + block_b, + tester.harness.chain, + &channel.0, + test_logger, + validation_level.unwrap(), + ) + .await; + + assert!(publication_result.is_err()); + + let publication_error: Rejection = publication_result.unwrap_err(); + + assert!(publication_error.find::().is_some()); +} + +/// This test checks that a block that is valid from both a gossip and consensus perspective (and does not equivocate) is accepted when using `broadcast_validation=consensus_and_equivocation`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn blinded_equivocation_full_pass() { + /* this test targets gossip-level validation */ + let validation_level: Option = + Some(BroadcastValidation::ConsensusAndEquivocation); + + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 64; + let num_initial: u64 = 31; + let tester = InteractiveTester::::new(None, validator_count).await; + + // Create some chain depth. + tester.harness.advance_slot(); + tester + .harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + tester.harness.advance_slot(); + + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + + let state_a = tester.harness.get_current_state(); + let (block, _): (SignedBlindedBeaconBlock, _) = + tester.harness.make_blinded_block(state_a, slot_b).await; + + let response: Result<(), eth2::Error> = tester + .client + .post_beacon_blocks_v2(&block, validation_level) + .await; + + assert!(response.is_ok()); + assert!(tester + .harness + .chain + .block_is_known_to_fork_choice(&block.canonical_root())); +} diff --git a/beacon_node/http_api/tests/main.rs b/beacon_node/http_api/tests/main.rs index f5916d8506a..e0636424e48 100644 --- a/beacon_node/http_api/tests/main.rs +++ b/beacon_node/http_api/tests/main.rs @@ -1,5 +1,6 @@ #![cfg(not(debug_assertions))] // Tests are too slow in debug. +pub mod broadcast_validation_tests; pub mod fork_tests; pub mod interactive_tests; pub mod status_tests; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index a6c49ddaeef..bcd192c699e 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -159,7 +159,7 @@ impl ApiTester { // `make_block` adds random graffiti, so this will produce an alternate block let (reorg_block, _reorg_state) = harness - .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap() + 1) .await; let head_state_root = head.beacon_state_root(); @@ -1248,14 +1248,23 @@ impl ApiTester { } pub async fn test_post_beacon_blocks_invalid(mut self) -> Self { - let mut next_block = self.next_block.clone(); - *next_block.message_mut().proposer_index_mut() += 1; + let block = self + .harness + .make_block_with_modifier( + self.harness.get_current_state(), + self.harness.get_current_slot(), + |b| { + *b.state_root_mut() = Hash256::zero(); + }, + ) + .await + .0; - assert!(self.client.post_beacon_blocks(&next_block).await.is_err()); + assert!(self.client.post_beacon_blocks(&block).await.is_err()); assert!( self.network_rx.network_recv.recv().await.is_some(), - "invalid blocks should be sent to network" + "gossip valid blocks should be sent to network" ); self @@ -4126,7 +4135,7 @@ impl ApiTester { .unwrap(); let expected_reorg = EventKind::ChainReorg(SseChainReorg { - slot: self.next_block.slot(), + slot: self.reorg_block.slot(), depth: 1, old_head_block: self.next_block.canonical_root(), old_head_state: self.next_block.state_root(), @@ -4136,6 +4145,8 @@ impl ApiTester { execution_optimistic: false, }); + self.harness.advance_slot(); + self.client .post_beacon_blocks(&self.reorg_block) .await diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 185634c3081..91ec81b18d3 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -785,6 +785,20 @@ impl Worker { verified_block } + Err(e @ BlockError::Slashable) => { + warn!( + self.log, + "Received equivocating block from peer"; + "error" => ?e + ); + /* punish peer for submitting an equivocation, but not too harshly as honest peers may conceivably forward equivocating blocks to us from time to time */ + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "gossip_block_mid", + ); + return None; + } Err(BlockError::ParentUnknown(block)) => { debug!( self.log, @@ -806,7 +820,6 @@ impl Worker { Err(e @ BlockError::FutureSlot { .. }) | Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) | Err(e @ BlockError::BlockIsAlreadyKnown) - | Err(e @ BlockError::RepeatProposal { .. }) | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); @@ -948,7 +961,12 @@ impl Worker { let result = self .chain - .process_block(block_root, verified_block, NotifyExecutionLayer::Yes) + .process_block( + block_root, + verified_block, + NotifyExecutionLayer::Yes, + || Ok(()), + ) .await; match &result { diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 7e8fce35632..ac59b1daa93 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -98,33 +98,21 @@ impl Worker { }); // Checks if a block from this proposer is already known. - let proposal_already_known = || { + let block_equivocates = || { match self .chain .observed_block_producers .read() - .proposer_has_been_observed(block.message()) + .proposer_has_been_observed(block.message(), block.canonical_root()) { - Ok(is_observed) => is_observed, - // Both of these blocks will be rejected, so reject them now rather + Ok(seen_status) => seen_status.is_slashable(), + //Both of these blocks will be rejected, so reject them now rather // than re-queuing them. Err(ObserveError::FinalizedBlock { .. }) | Err(ObserveError::ValidatorIndexTooHigh { .. }) => false, } }; - // Returns `true` if the block is already known to fork choice. Notably, - // this will return `false` for blocks that we've already imported but - // ancestors of the finalized checkpoint. That should not be an issue - // for our use here since finalized blocks will always be late and won't - // be requeued anyway. - let block_is_already_known = || { - self.chain - .canonical_head - .fork_choice_read_lock() - .contains_block(&block_root) - }; - // If we've already seen a block from this proposer *and* the block // arrived before the attestation deadline, requeue it to ensure it is // imported late enough that it won't receive a proposer boost. @@ -132,7 +120,7 @@ impl Worker { // Don't requeue blocks if they're already known to fork choice, just // push them through to block processing so they can be handled through // the normal channels. - if !block_is_late && proposal_already_known() && !block_is_already_known() { + if !block_is_late && block_equivocates() { debug!( self.log, "Delaying processing of duplicate RPC block"; @@ -165,7 +153,7 @@ impl Worker { let parent_root = block.message().parent_root(); let result = self .chain - .process_block(block_root, block, NotifyExecutionLayer::Yes) + .process_block(block_root, block, NotifyExecutionLayer::Yes, || Ok(())) .await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 217d3569689..e34916bebab 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -322,6 +322,26 @@ impl BeaconNodeHttpClient { ok_or_error(response).await } + /// Generic POST function supporting arbitrary responses and timeouts. + async fn post_generic_with_consensus_version( + &self, + url: U, + body: &T, + timeout: Option, + fork: ForkName, + ) -> Result { + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder + .header(CONSENSUS_VERSION_HEADER, fork.to_string()) + .json(body) + .send() + .await?; + ok_or_error(response).await + } + /// `GET beacon/genesis` /// /// ## Errors @@ -654,6 +674,76 @@ impl BeaconNodeHttpClient { Ok(()) } + pub fn post_beacon_blocks_v2_path( + &self, + validation_level: Option, + ) -> Result { + let mut path = self.eth_path(V2)?; + path.path_segments_mut() + .map_err(|_| Error::InvalidUrl(self.server.clone()))? + .extend(&["beacon", "blocks"]); + + path.set_query( + validation_level + .map(|v| format!("broadcast_validation={}", v)) + .as_deref(), + ); + + Ok(path) + } + + pub fn post_beacon_blinded_blocks_v2_path( + &self, + validation_level: Option, + ) -> Result { + let mut path = self.eth_path(V2)?; + path.path_segments_mut() + .map_err(|_| Error::InvalidUrl(self.server.clone()))? + .extend(&["beacon", "blinded_blocks"]); + + path.set_query( + validation_level + .map(|v| format!("broadcast_validation={}", v)) + .as_deref(), + ); + + Ok(path) + } + + /// `POST v2/beacon/blocks` + pub async fn post_beacon_blocks_v2>( + &self, + block: &SignedBeaconBlock, + validation_level: Option, + ) -> Result<(), Error> { + self.post_generic_with_consensus_version( + self.post_beacon_blocks_v2_path(validation_level)?, + block, + Some(self.timeouts.proposal), + block.message().body().fork_name(), + ) + .await?; + + Ok(()) + } + + /// `POST v2/beacon/blinded_blocks` + pub async fn post_beacon_blinded_blocks_v2( + &self, + block: &SignedBlindedBeaconBlock, + validation_level: Option, + ) -> Result<(), Error> { + self.post_generic_with_consensus_version( + self.post_beacon_blinded_blocks_v2_path(validation_level)?, + block, + Some(self.timeouts.proposal), + block.message().body().fork_name(), + ) + .await?; + + Ok(()) + } + /// Path for `v2/beacon/blocks` pub fn get_beacon_blocks_path(&self, block_id: BlockId) -> Result { let mut path = self.eth_path(V2)?; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 55759a2e158..5f2e1ada7be 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -6,7 +6,7 @@ use lighthouse_network::{ConnectionDirection, Enr, Multiaddr, PeerConnectionStat use mediatype::{names, MediaType, MediaTypeList}; use serde::{Deserialize, Serialize}; use std::convert::TryFrom; -use std::fmt; +use std::fmt::{self, Display}; use std::str::{from_utf8, FromStr}; use std::time::Duration; pub use types::*; @@ -1260,6 +1260,50 @@ pub struct ForkChoiceNode { pub execution_block_hash: Option, } +#[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum BroadcastValidation { + Gossip, + Consensus, + ConsensusAndEquivocation, +} + +impl Default for BroadcastValidation { + fn default() -> Self { + Self::Gossip + } +} + +impl Display for BroadcastValidation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Gossip => write!(f, "gossip"), + Self::Consensus => write!(f, "consensus"), + Self::ConsensusAndEquivocation => write!(f, "consensus_and_equivocation"), + } + } +} + +impl FromStr for BroadcastValidation { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + match s { + "gossip" => Ok(Self::Gossip), + "consensus" => Ok(Self::Consensus), + "consensus_and_equivocation" => Ok(Self::ConsensusAndEquivocation), + _ => Err("Invalid broadcast validation level"), + } + } +} + +#[derive(Default, Deserialize, Serialize)] +#[serde(rename_all = "snake_case")] +pub struct BroadcastValidationQuery { + #[serde(default)] + pub broadcast_validation: BroadcastValidation, +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index c0ba8694100..dce1be742ff 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -89,7 +89,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, } } -impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { +impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, Payload> { /// Get the fork_name of this object pub fn fork_name(self) -> ForkName { match self { diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index e0f4043ac21..65528de1757 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -382,6 +382,7 @@ impl Tester { block_root, block.clone(), NotifyExecutionLayer::Yes, + || Ok(()), ))?; if result.is_ok() != valid { return Err(Error::DidntFail(format!( From edd093293a34b68e5fc32382f7fa0c374f8543ed Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Fri, 30 Jun 2023 01:13:03 +0000 Subject: [PATCH 13/15] added debounce to log (#4269) ## Issue Addressed [#4259](https://github.com/sigp/lighthouse/issues/4259) ## Proposed Changes debounce spammy `Unable to send message to the beacon processor` log messages ## Additional Info We could potentially debounce other logs that have the potential to be "spammy". After some feedback we decided to additionally add the following change: create a newtype wrapper around `mpsc::Sender>`. When there is an error on the try_send method on the wrapper, we increase a counter metric with one label per work type. --- .../network/src/beacon_processor/mod.rs | 18 +++++++++++++++ beacon_node/network/src/metrics.rs | 6 +++++ beacon_node/network/src/router.rs | 22 +++++++++++++------ .../network/src/sync/block_lookups/tests.rs | 3 ++- beacon_node/network/src/sync/manager.rs | 4 ++-- .../network/src/sync/network_context.rs | 10 ++++----- .../network/src/sync/range_sync/range.rs | 4 ++-- 7 files changed, 50 insertions(+), 17 deletions(-) diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 26d2c19b519..84d8e1b07a8 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -750,6 +750,24 @@ impl std::convert::From> for WorkEvent { } } +pub struct BeaconProcessorSend(pub mpsc::Sender>); + +impl BeaconProcessorSend { + pub fn try_send(&self, message: WorkEvent) -> Result<(), Box>>> { + let work_type = message.work_type(); + match self.0.try_send(message) { + Ok(res) => Ok(res), + Err(e) => { + metrics::inc_counter_vec( + &metrics::BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE, + &[work_type], + ); + Err(Box::new(e)) + } + } + } +} + /// A consensus message (or multiple) from the network that requires processing. #[derive(Derivative)] #[derivative(Debug(bound = "T: BeaconChainTypes"))] diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 09caaaa11e3..27d7dc9625d 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -279,6 +279,12 @@ lazy_static! { "Gossipsub light_client_optimistic_update errors per error type", &["type"] ); + pub static ref BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE: Result = + try_create_int_counter_vec( + "beacon_processor_send_error_per_work_type", + "Total number of beacon processor send error per work type", + &["type"] + ); /* diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 1b0f1fb41e1..7a91f2d0b1a 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -6,7 +6,8 @@ #![allow(clippy::unit_arg)] use crate::beacon_processor::{ - BeaconProcessor, InvalidBlockStorage, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN, + BeaconProcessor, BeaconProcessorSend, InvalidBlockStorage, WorkEvent as BeaconWorkEvent, + MAX_WORK_EVENT_QUEUE_LEN, }; use crate::error; use crate::service::{NetworkMessage, RequestId}; @@ -19,6 +20,7 @@ use lighthouse_network::rpc::*; use lighthouse_network::{ MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, }; +use logging::TimeLatch; use slog::{debug, o, trace}; use slog::{error, warn}; use std::cmp; @@ -39,9 +41,11 @@ pub struct Router { /// A network context to return and handle RPC requests. network: HandlerNetworkContext, /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. - beacon_processor_send: mpsc::Sender>, + beacon_processor_send: BeaconProcessorSend, /// The `Router` logger. log: slog::Logger, + /// Provides de-bounce functionality for logging. + logger_debounce: TimeLatch, } /// Types of messages the router can receive. @@ -100,7 +104,7 @@ impl Router { beacon_chain.clone(), network_globals.clone(), network_send.clone(), - beacon_processor_send.clone(), + BeaconProcessorSend(beacon_processor_send.clone()), sync_logger, ); @@ -124,8 +128,9 @@ impl Router { chain: beacon_chain, sync_send, network: HandlerNetworkContext::new(network_send, log.clone()), - beacon_processor_send, + beacon_processor_send: BeaconProcessorSend(beacon_processor_send), log: message_handler_log, + logger_debounce: TimeLatch::default(), }; // spawn handler task and move the message handler instance into the spawned thread @@ -479,12 +484,15 @@ impl Router { self.beacon_processor_send .try_send(work) .unwrap_or_else(|e| { - let work_type = match &e { + let work_type = match &*e { mpsc::error::TrySendError::Closed(work) | mpsc::error::TrySendError::Full(work) => work.work_type(), }; - error!(&self.log, "Unable to send message to the beacon processor"; - "error" => %e, "type" => work_type) + + if self.logger_debounce.elapsed() { + error!(&self.log, "Unable to send message to the beacon processor"; + "error" => %e, "type" => work_type) + } }) } } diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 5a70944f6cb..82334db0f8e 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use crate::beacon_processor::BeaconProcessorSend; use crate::service::RequestId; use crate::sync::manager::RequestId as SyncId; use crate::NetworkMessage; @@ -54,7 +55,7 @@ impl TestRig { SyncNetworkContext::new( network_tx, globals, - beacon_processor_tx, + BeaconProcessorSend(beacon_processor_tx), log.new(slog::o!("component" => "network_context")), ) }; diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 37b63cdba78..c24d4c192b1 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -38,7 +38,7 @@ use super::block_lookups::BlockLookups; use super::network_context::SyncNetworkContext; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; -use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; +use crate::beacon_processor::{BeaconProcessorSend, ChainSegmentProcessId}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState}; @@ -188,7 +188,7 @@ pub fn spawn( beacon_chain: Arc>, network_globals: Arc>, network_send: mpsc::UnboundedSender>, - beacon_processor_send: mpsc::Sender>, + beacon_processor_send: BeaconProcessorSend, log: slog::Logger, ) -> mpsc::UnboundedSender> { assert!( diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 23d42002f47..03c466eecea 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -3,7 +3,7 @@ use super::manager::{Id, RequestId as SyncRequestId}; use super::range_sync::{BatchId, ChainId}; -use crate::beacon_processor::WorkEvent; +use crate::beacon_processor::BeaconProcessorSend; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; use beacon_chain::{BeaconChainTypes, EngineState}; @@ -37,7 +37,7 @@ pub struct SyncNetworkContext { execution_engine_state: EngineState, /// Channel to send work to the beacon processor. - beacon_processor_send: mpsc::Sender>, + beacon_processor_send: BeaconProcessorSend, /// Logger for the `SyncNetworkContext`. log: slog::Logger, @@ -47,7 +47,7 @@ impl SyncNetworkContext { pub fn new( network_send: mpsc::UnboundedSender>, network_globals: Arc>, - beacon_processor_send: mpsc::Sender>, + beacon_processor_send: BeaconProcessorSend, log: slog::Logger, ) -> Self { Self { @@ -278,12 +278,12 @@ impl SyncNetworkContext { }) } - pub fn processor_channel_if_enabled(&self) -> Option<&mpsc::Sender>> { + pub fn processor_channel_if_enabled(&self) -> Option<&BeaconProcessorSend> { self.is_execution_engine_online() .then_some(&self.beacon_processor_send) } - pub fn processor_channel(&self) -> &mpsc::Sender> { + pub fn processor_channel(&self) -> &BeaconProcessorSend { &self.beacon_processor_send } diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 0f1c00e509f..2c35c57d9e4 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -375,7 +375,7 @@ mod tests { use crate::NetworkMessage; use super::*; - use crate::beacon_processor::WorkEvent as BeaconWorkEvent; + use crate::beacon_processor::{BeaconProcessorSend, WorkEvent as BeaconWorkEvent}; use beacon_chain::builder::Witness; use beacon_chain::eth1_chain::CachingEth1Backend; use beacon_chain::parking_lot::RwLock; @@ -603,7 +603,7 @@ mod tests { let cx = SyncNetworkContext::new( network_tx, globals.clone(), - beacon_processor_tx, + BeaconProcessorSend(beacon_processor_tx), log.new(o!("component" => "network_context")), ); let test_rig = TestRig { From 826e090f50da93a0cca297bc4e57a4300f0db9f2 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Fri, 30 Jun 2023 01:13:04 +0000 Subject: [PATCH 14/15] Update node health endpoint (#4310) ## Issue Addressed [#4292](https://github.com/sigp/lighthouse/issues/4292) ## Proposed Changes Updated the node health endpoint will return a 200 status code if `!syncing && !el_offline && !optimistic` wil return a 206 if `(syncing || optimistic) && !el_offline` will return a 503 if `el_offline` ## Additional Info --- beacon_node/http_api/src/lib.rs | 53 +++++++++----- beacon_node/http_api/tests/status_tests.rs | 80 ++++++++++++++++++++++ beacon_node/http_api/tests/tests.rs | 14 ++-- 3 files changed, 125 insertions(+), 22 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 93bfe524bc2..27bcc4d8a13 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2418,24 +2418,41 @@ pub fn serve( .and(warp::path("health")) .and(warp::path::end()) .and(network_globals.clone()) - .and_then(|network_globals: Arc>| { - blocking_response_task(move || match *network_globals.sync_state.read() { - SyncState::SyncingFinalized { .. } - | SyncState::SyncingHead { .. } - | SyncState::SyncTransition - | SyncState::BackFillSyncing { .. } => Ok(warp::reply::with_status( - warp::reply(), - warp::http::StatusCode::PARTIAL_CONTENT, - )), - SyncState::Synced => Ok(warp::reply::with_status( - warp::reply(), - warp::http::StatusCode::OK, - )), - SyncState::Stalled => Err(warp_utils::reject::not_synced( - "sync stalled, beacon chain may not yet be initialized.".to_string(), - )), - }) - }); + .and(chain_filter.clone()) + .and_then( + |network_globals: Arc>, chain: Arc>| { + async move { + let el_offline = if let Some(el) = &chain.execution_layer { + el.is_offline_or_erroring().await + } else { + true + }; + + blocking_response_task(move || { + let is_optimistic = chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::beacon_chain_error)?; + + let is_syncing = !network_globals.sync_state.read().is_synced(); + + if el_offline { + Err(warp_utils::reject::not_synced("execution layer is offline".to_string())) + } else if is_syncing || is_optimistic { + Ok(warp::reply::with_status( + warp::reply(), + warp::http::StatusCode::PARTIAL_CONTENT, + )) + } else { + Ok(warp::reply::with_status( + warp::reply(), + warp::http::StatusCode::OK, + )) + } + }) + .await + } + }, + ); // GET node/peers/{peer_id} let get_node_peers_by_id = eth_v1 diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs index ce725b75a9a..95f885faa56 100644 --- a/beacon_node/http_api/tests/status_tests.rs +++ b/beacon_node/http_api/tests/status_tests.rs @@ -3,6 +3,7 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, BlockError, }; +use eth2::StatusCode; use execution_layer::{PayloadStatusV1, PayloadStatusV1Status}; use http_api::test_utils::InteractiveTester; use types::{EthSpec, ExecPayload, ForkName, MinimalEthSpec, Slot}; @@ -143,3 +144,82 @@ async fn el_error_on_new_payload() { assert_eq!(api_response.is_optimistic, Some(false)); assert_eq!(api_response.is_syncing, false); } + +/// Check `node health` endpoint when the EL is offline. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn node_health_el_offline() { + let num_blocks = E::slots_per_epoch() / 2; + let num_validators = E::slots_per_epoch(); + let tester = post_merge_tester(num_blocks, num_validators).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + + // EL offline + mock_el.server.set_syncing_response(Err("offline".into())); + mock_el.el.upcheck().await; + + let status = tester.client.get_node_health().await; + match status { + Ok(_) => { + panic!("should return 503 error status code"); + } + Err(e) => { + assert_eq!(e.status().unwrap(), 503); + } + } +} + +/// Check `node health` endpoint when the EL is online and synced. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn node_health_el_online_and_synced() { + let num_blocks = E::slots_per_epoch() / 2; + let num_validators = E::slots_per_epoch(); + let tester = post_merge_tester(num_blocks, num_validators).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + + // EL synced + mock_el.server.set_syncing_response(Ok(false)); + mock_el.el.upcheck().await; + + let status = tester.client.get_node_health().await; + match status { + Ok(response) => { + assert_eq!(response, StatusCode::OK); + } + Err(_) => { + panic!("should return 200 status code"); + } + } +} + +/// Check `node health` endpoint when the EL is online but not synced. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn node_health_el_online_and_not_synced() { + let num_blocks = E::slots_per_epoch() / 2; + let num_validators = E::slots_per_epoch(); + let tester = post_merge_tester(num_blocks, num_validators).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + + // EL not synced + harness.advance_slot(); + mock_el.server.all_payloads_syncing(true); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let status = tester.client.get_node_health().await; + match status { + Ok(response) => { + assert_eq!(response, StatusCode::PARTIAL_CONTENT); + } + Err(_) => { + panic!("should return 206 status code"); + } + } +} diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index bcd192c699e..741ee1ffc06 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -8,7 +8,7 @@ use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *}, - BeaconNodeHttpClient, Error, StatusCode, Timeouts, + BeaconNodeHttpClient, Error, Timeouts, }; use execution_layer::test_utils::TestingBuilder; use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI; @@ -1762,9 +1762,15 @@ impl ApiTester { } pub async fn test_get_node_health(self) -> Self { - let status = self.client.get_node_health().await.unwrap(); - assert_eq!(status, StatusCode::OK); - + let status = self.client.get_node_health().await; + match status { + Ok(_) => { + panic!("should return 503 error status code"); + } + Err(e) => { + assert_eq!(e.status().unwrap(), 503); + } + } self } From 46be05f7280c3afdfbd614dbc8f6a9b3985ab821 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 30 Jun 2023 01:13:06 +0000 Subject: [PATCH 15/15] Cache target attester balances for unrealized FFG progression calculation (#4362) ## Issue Addressed #4118 ## Proposed Changes This PR introduces a "progressive balances" cache on the `BeaconState`, which keeps track of the accumulated target attestation balance for the current & previous epochs. The cached values are utilised by fork choice to calculate unrealized justification and finalization (instead of converting epoch participation arrays to balances for each block we receive). This optimization will be rolled out gradually to allow for more testing. A new `--progressive-balances disabled|checked|strict|fast` flag is introduced to support this: - `checked`: enabled with checks against participation cache, and falls back to the existing epoch processing calculation if there is a total target attester balance mismatch. There is no performance gain from this as the participation cache still needs to be computed. **This is the default mode for now.** - `strict`: enabled with checks against participation cache, returns error if there is a mismatch. **Used for testing only**. - `fast`: enabled with no comparative checks and without computing the participation cache. This mode gives us the performance gains from the optimization. This is still experimental and not currently recommended for production usage, but will become the default mode in a future release. - `disabled`: disable the usage of progressive cache, and use the existing method for FFG progression calculation. This mode may be useful if we find a bug and want to stop the frequent error logs. ### Tasks - [x] Initial cache implementation in `BeaconState` - [x] Perform checks in fork choice to compare the progressive balances cache against results from `ParticipationCache` - [x] Add CLI flag, and disable the optimization by default - [x] Testing on Goerli & Benchmarking - [x] Move caching logic from state processing to the `ProgressiveBalancesCache` (see [this comment](https://github.com/sigp/lighthouse/pull/4362#discussion_r1230877001)) - [x] Add attesting balance metrics Co-authored-by: Jimmy Chen --- Cargo.lock | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 2 + beacon_node/beacon_chain/src/builder.rs | 8 +- beacon_node/beacon_chain/src/chain_config.rs | 5 +- beacon_node/beacon_chain/src/fork_revert.rs | 9 +- beacon_node/beacon_chain/src/test_utils.rs | 38 +++- beacon_node/beacon_chain/tests/capella.rs | 18 +- .../tests/payload_invalidation.rs | 3 +- beacon_node/http_api/src/block_rewards.rs | 2 +- beacon_node/src/cli.rs | 14 ++ beacon_node/src/config.rs | 6 + beacon_node/store/src/partial_beacon_state.rs | 1 + beacon_node/store/src/reconstruct.rs | 2 +- consensus/fork_choice/src/fork_choice.rs | 209 +++++++++++++++--- consensus/fork_choice/tests/tests.rs | 106 ++++++++- consensus/state_processing/src/common/mod.rs | 1 + .../src/common/slash_validator.rs | 3 + .../update_progressive_balances_cache.rs | 142 ++++++++++++ consensus/state_processing/src/genesis.rs | 4 +- consensus/state_processing/src/metrics.rs | 11 + .../src/per_block_processing.rs | 9 + .../src/per_block_processing/errors.rs | 9 + .../process_operations.rs | 11 + .../src/per_epoch_processing/altair.rs | 8 +- .../altair/participation_cache.rs | 54 ++--- .../src/per_epoch_processing/base.rs | 2 +- .../src/per_epoch_processing/capella.rs | 8 +- .../effective_balance_updates.rs | 41 +++- .../src/per_epoch_processing/slashings.rs | 2 +- .../state_processing/src/upgrade/altair.rs | 4 + .../state_processing/src/upgrade/capella.rs | 1 + .../state_processing/src/upgrade/merge.rs | 1 + consensus/types/Cargo.toml | 1 + consensus/types/benches/benches.rs | 2 +- consensus/types/src/beacon_state.rs | 55 ++++- consensus/types/src/beacon_state/balance.rs | 33 +++ .../types/src/beacon_state/clone_config.rs | 2 + .../progressive_balances_cache.rs | 184 +++++++++++++++ consensus/types/src/beacon_state/tests.rs | 5 +- lcli/src/new_testnet.rs | 2 +- lcli/src/skip_slots.rs | 2 +- lcli/src/transition_blocks.rs | 6 +- lighthouse/tests/beacon_node.rs | 30 ++- .../ef_tests/src/cases/epoch_processing.rs | 4 +- testing/ef_tests/src/cases/fork_choice.rs | 5 +- testing/ef_tests/src/cases/operations.rs | 4 + testing/ef_tests/src/cases/sanity_blocks.rs | 2 +- testing/ef_tests/src/cases/sanity_slots.rs | 2 +- 48 files changed, 953 insertions(+), 121 deletions(-) create mode 100644 consensus/state_processing/src/common/update_progressive_balances_cache.rs create mode 100644 consensus/types/src/beacon_state/balance.rs create mode 100644 consensus/types/src/beacon_state/progressive_balances_cache.rs diff --git a/Cargo.lock b/Cargo.lock index 02922b2d7e4..efc6a5d6abe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8935,6 +8935,7 @@ dependencies = [ "smallvec", "ssz_types", "state_processing", + "strum", "superstruct 0.6.0", "swap_or_not_shuffle", "tempfile", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 772e4c1529e..01343ff3b15 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2898,7 +2898,9 @@ impl BeaconChain { block_delay, &state, payload_verification_status, + self.config.progressive_balances_mode, &self.spec, + &self.log, ) .map_err(|e| BlockError::BeaconChainError(e.into()))?; } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 9bb39396326..044391c415e 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -338,7 +338,7 @@ where let beacon_block = genesis_block(&mut beacon_state, &self.spec)?; beacon_state - .build_all_caches(&self.spec) + .build_caches(&self.spec) .map_err(|e| format!("Failed to build genesis state caches: {:?}", e))?; let beacon_state_root = beacon_block.message().state_root(); @@ -437,7 +437,7 @@ where // Prime all caches before storing the state in the database and computing the tree hash // root. weak_subj_state - .build_all_caches(&self.spec) + .build_caches(&self.spec) .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; let computed_state_root = weak_subj_state @@ -687,6 +687,8 @@ where store.clone(), Some(current_slot), &self.spec, + self.chain_config.progressive_balances_mode, + &log, )?; } @@ -700,7 +702,7 @@ where head_snapshot .beacon_state - .build_all_caches(&self.spec) + .build_caches(&self.spec) .map_err(|e| format!("Failed to build state caches: {:?}", e))?; // Perform a check to ensure that the finalization points of the head and fork choice are diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 34a5c9a4ec9..cc7a957ecc8 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,7 +1,7 @@ pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use serde_derive::{Deserialize, Serialize}; use std::time::Duration; -use types::{Checkpoint, Epoch}; +use types::{Checkpoint, Epoch, ProgressiveBalancesMode}; pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2); @@ -81,6 +81,8 @@ pub struct ChainConfig { pub always_prepare_payload: bool, /// Whether backfill sync processing should be rate-limited. pub enable_backfill_rate_limiting: bool, + /// Whether to use `ProgressiveBalancesCache` in unrealized FFG progression calculation. + pub progressive_balances_mode: ProgressiveBalancesMode, } impl Default for ChainConfig { @@ -111,6 +113,7 @@ impl Default for ChainConfig { genesis_backfill: false, always_prepare_payload: false, enable_backfill_rate_limiting: true, + progressive_balances_mode: ProgressiveBalancesMode::Checked, } } } diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 084ae95e096..dc0e34277c9 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -10,7 +10,10 @@ use state_processing::{ use std::sync::Arc; use std::time::Duration; use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore}; -use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot}; +use types::{ + BeaconState, ChainSpec, EthSpec, ForkName, Hash256, ProgressiveBalancesMode, SignedBeaconBlock, + Slot, +}; const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \ consider deleting it by running with the --purge-db flag."; @@ -100,6 +103,8 @@ pub fn reset_fork_choice_to_finalization, Cold: It store: Arc>, current_slot: Option, spec: &ChainSpec, + progressive_balances_mode: ProgressiveBalancesMode, + log: &Logger, ) -> Result, E>, String> { // Fetch finalized block. let finalized_checkpoint = head_state.finalized_checkpoint(); @@ -197,7 +202,9 @@ pub fn reset_fork_choice_to_finalization, Cold: It Duration::from_secs(0), &state, payload_verification_status, + progressive_balances_mode, spec, + log, ) .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 21f7248cee5..6520c9ba9c8 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -754,9 +754,7 @@ where complete_state_advance(&mut state, None, slot, &self.spec) .expect("should be able to advance state to slot"); - state - .build_all_caches(&self.spec) - .expect("should build caches"); + state.build_caches(&self.spec).expect("should build caches"); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); @@ -803,9 +801,7 @@ where complete_state_advance(&mut state, None, slot, &self.spec) .expect("should be able to advance state to slot"); - state - .build_all_caches(&self.spec) - .expect("should build caches"); + state.build_caches(&self.spec).expect("should build caches"); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); @@ -1523,6 +1519,36 @@ where .sign(sk, &fork, genesis_validators_root, &self.chain.spec) } + pub fn add_proposer_slashing(&self, validator_index: u64) -> Result<(), String> { + let propposer_slashing = self.make_proposer_slashing(validator_index); + if let ObservationOutcome::New(verified_proposer_slashing) = self + .chain + .verify_proposer_slashing_for_gossip(propposer_slashing) + .expect("should verify proposer slashing for gossip") + { + self.chain + .import_proposer_slashing(verified_proposer_slashing); + Ok(()) + } else { + Err("should observe new proposer slashing".to_string()) + } + } + + pub fn add_attester_slashing(&self, validator_indices: Vec) -> Result<(), String> { + let attester_slashing = self.make_attester_slashing(validator_indices); + if let ObservationOutcome::New(verified_attester_slashing) = self + .chain + .verify_attester_slashing_for_gossip(attester_slashing) + .expect("should verify attester slashing for gossip") + { + self.chain + .import_attester_slashing(verified_attester_slashing); + Ok(()) + } else { + Err("should observe new attester slashing".to_string()) + } + } + pub fn add_bls_to_execution_change( &self, validator_index: u64, diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs index e910e8134f1..f0b799ec9f7 100644 --- a/beacon_node/beacon_chain/tests/capella.rs +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -133,13 +133,8 @@ async fn base_altair_merge_capella() { for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() { harness.extend_slots(1).await; let block = &harness.chain.head_snapshot().beacon_block; - let full_payload: FullPayload = block - .message() - .body() - .execution_payload() - .unwrap() - .clone() - .into(); + let full_payload: FullPayload = + block.message().body().execution_payload().unwrap().into(); // pre-capella shouldn't have withdrawals assert!(full_payload.withdrawals_root().is_err()); execution_payloads.push(full_payload); @@ -151,13 +146,8 @@ async fn base_altair_merge_capella() { for _ in 0..16 { harness.extend_slots(1).await; let block = &harness.chain.head_snapshot().beacon_block; - let full_payload: FullPayload = block - .message() - .body() - .execution_payload() - .unwrap() - .clone() - .into(); + let full_payload: FullPayload = + block.message().body().execution_payload().unwrap().into(); // post-capella should have withdrawals assert!(full_payload.withdrawals_root().is_ok()); execution_payloads.push(full_payload); diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 018defd2f00..9a8c324d09f 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1064,8 +1064,9 @@ async fn invalid_parent() { Duration::from_secs(0), &state, PayloadVerificationStatus::Optimistic, + rig.harness.chain.config.progressive_balances_mode, &rig.harness.chain.spec, - + rig.harness.logger() ), Err(ForkChoiceError::ProtoArrayStringError(message)) if message.contains(&format!( diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 828be8e5760..299bc019c40 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -49,7 +49,7 @@ pub fn get_block_rewards( .map_err(beacon_chain_error)?; state - .build_all_caches(&chain.spec) + .build_caches(&chain.spec) .map_err(beacon_state_error)?; let mut reward_cache = Default::default(); diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 206cd3c72f3..646356b6cb4 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1,5 +1,6 @@ use clap::{App, Arg}; use strum::VariantNames; +use types::ProgressiveBalancesMode; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new("beacon_node") @@ -1117,4 +1118,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { developers. This directory is not pruned, users should be careful to avoid \ filling up their disks.") ) + .arg( + Arg::with_name("progressive-balances") + .long("progressive-balances") + .value_name("MODE") + .help("Options to enable or disable the progressive balances cache for \ + unrealized FFG progression calculation. The default `checked` mode compares \ + the progressive balances from the cache against results from the existing \ + method. If there is a mismatch, it falls back to the existing method. The \ + optimized mode (`fast`) is faster but is still experimental, and is \ + not recommended for mainnet usage at this time.") + .takes_value(true) + .possible_values(ProgressiveBalancesMode::VARIANTS) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c59b297c1b2..948c70dd41f 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -800,6 +800,12 @@ pub fn get_config( client_config.network.invalid_block_storage = Some(path); } + if let Some(progressive_balances_mode) = + clap_utils::parse_optional(cli_args, "progressive-balances")? + { + client_config.chain.progressive_balances_mode = progressive_balances_mode; + } + Ok(client_config) } diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index cd923da40dc..9f2532d0a75 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -373,6 +373,7 @@ macro_rules! impl_try_into_beacon_state { // Caching total_active_balance: <_>::default(), + progressive_balances_cache: <_>::default(), committee_caches: <_>::default(), pubkey_cache: <_>::default(), exit_cache: <_>::default(), diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index cd50babdb0c..bac5d3cc823 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -63,7 +63,7 @@ where .load_cold_state_by_slot(lower_limit_slot)? .ok_or(HotColdDBError::MissingLowerLimitState(lower_limit_slot))?; - state.build_all_caches(&self.spec)?; + state.build_caches(&self.spec)?; process_results(block_root_iter, |iter| -> Result<(), Error> { let mut io_batch = vec![]; diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 5d86f99f1ab..e60774fc86e 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,10 +1,15 @@ use crate::{ForkChoiceStore, InvalidationOperation}; +use per_epoch_processing::altair::participation_cache::Error as ParticipationCacheError; use proto_array::{ Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, }; -use slog::{crit, debug, warn, Logger}; +use slog::{crit, debug, error, warn, Logger}; use ssz_derive::{Decode, Encode}; +use state_processing::per_epoch_processing::altair::ParticipationCache; +use state_processing::per_epoch_processing::{ + weigh_justification_and_finalization, JustificationAndFinalizationState, +}; use state_processing::{ per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing, }; @@ -18,6 +23,7 @@ use types::{ EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, }; +use types::{ProgressiveBalancesCache, ProgressiveBalancesMode}; #[derive(Debug)] pub enum Error { @@ -72,7 +78,9 @@ pub enum Error { }, UnrealizedVoteProcessing(state_processing::EpochProcessingError), ParticipationCacheBuild(BeaconStateError), + ParticipationCacheError(ParticipationCacheError), ValidatorStatuses(BeaconStateError), + ProgressiveBalancesCacheCheckFailed(String), } impl From for Error { @@ -93,6 +101,18 @@ impl From for Error { } } +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Error::BeaconStateError(e) + } +} + +impl From for Error { + fn from(e: ParticipationCacheError) -> Self { + Error::ParticipationCacheError(e) + } +} + #[derive(Debug, Clone, Copy)] /// Controls how fork choice should behave when restoring from a persisted fork choice. pub enum ResetPayloadStatuses { @@ -643,7 +663,9 @@ where block_delay: Duration, state: &BeaconState, payload_verification_status: PayloadVerificationStatus, + progressive_balances_mode: ProgressiveBalancesMode, spec: &ChainSpec, + log: &Logger, ) -> Result<(), Error> { // If this block has already been processed we do not need to reprocess it. // We check this immediately in case re-processing the block mutates some property of the @@ -737,43 +759,84 @@ where parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 >= block_epoch }); - let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = - if let Some((parent_justified, parent_finalized)) = parent_checkpoints { - (parent_justified, parent_finalized) - } else { - let justification_and_finalization_state = match block { - BeaconBlockRef::Capella(_) - | BeaconBlockRef::Merge(_) - | BeaconBlockRef::Altair(_) => { - let participation_cache = - per_epoch_processing::altair::ParticipationCache::new(state, spec) - .map_err(Error::ParticipationCacheBuild)?; + let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if let Some(( + parent_justified, + parent_finalized, + )) = + parent_checkpoints + { + (parent_justified, parent_finalized) + } else { + let justification_and_finalization_state = match block { + BeaconBlockRef::Capella(_) + | BeaconBlockRef::Merge(_) + | BeaconBlockRef::Altair(_) => match progressive_balances_mode { + ProgressiveBalancesMode::Disabled => { + let participation_cache = ParticipationCache::new(state, spec) + .map_err(Error::ParticipationCacheBuild)?; per_epoch_processing::altair::process_justification_and_finalization( state, &participation_cache, )? } - BeaconBlockRef::Base(_) => { - let mut validator_statuses = - per_epoch_processing::base::ValidatorStatuses::new(state, spec) - .map_err(Error::ValidatorStatuses)?; - validator_statuses - .process_attestations(state) - .map_err(Error::ValidatorStatuses)?; - per_epoch_processing::base::process_justification_and_finalization( - state, - &validator_statuses.total_balances, - spec, - )? + ProgressiveBalancesMode::Fast + | ProgressiveBalancesMode::Checked + | ProgressiveBalancesMode::Strict => { + let maybe_participation_cache = progressive_balances_mode + .perform_comparative_checks() + .then(|| { + ParticipationCache::new(state, spec) + .map_err(Error::ParticipationCacheBuild) + }) + .transpose()?; + + process_justification_and_finalization_from_progressive_cache::( + state, + maybe_participation_cache.as_ref(), + ) + .or_else(|e| { + if progressive_balances_mode != ProgressiveBalancesMode::Strict { + error!( + log, + "Processing with progressive balances cache failed"; + "info" => "falling back to the non-optimized processing method", + "error" => ?e, + ); + let participation_cache = maybe_participation_cache + .map(Ok) + .unwrap_or_else(|| ParticipationCache::new(state, spec)) + .map_err(Error::ParticipationCacheBuild)?; + per_epoch_processing::altair::process_justification_and_finalization( + state, + &participation_cache, + ).map_err(Error::from) + } else { + Err(e) + } + })? } - }; - - ( - justification_and_finalization_state.current_justified_checkpoint(), - justification_and_finalization_state.finalized_checkpoint(), - ) + }, + BeaconBlockRef::Base(_) => { + let mut validator_statuses = + per_epoch_processing::base::ValidatorStatuses::new(state, spec) + .map_err(Error::ValidatorStatuses)?; + validator_statuses + .process_attestations(state) + .map_err(Error::ValidatorStatuses)?; + per_epoch_processing::base::process_justification_and_finalization( + state, + &validator_statuses.total_balances, + spec, + )? + } }; + ( + justification_and_finalization_state.current_justified_checkpoint(), + justification_and_finalization_state.finalized_checkpoint(), + ) + }; + // Update best known unrealized justified & finalized checkpoints if unrealized_justified_checkpoint.epoch > self.fc_store.unrealized_justified_checkpoint().epoch @@ -1499,6 +1562,92 @@ where } } +/// Process justification and finalization using progressive cache. Also performs a comparative +/// check against the `ParticipationCache` if it is supplied. +/// +/// Returns an error if the cache is not initialized or if there is a mismatch on the comparative check. +fn process_justification_and_finalization_from_progressive_cache( + state: &BeaconState, + maybe_participation_cache: Option<&ParticipationCache>, +) -> Result, Error> +where + E: EthSpec, + T: ForkChoiceStore, +{ + let justification_and_finalization_state = JustificationAndFinalizationState::new(state); + if state.current_epoch() <= E::genesis_epoch() + 1 { + return Ok(justification_and_finalization_state); + } + + // Load cached balances + let progressive_balances_cache: &ProgressiveBalancesCache = state.progressive_balances_cache(); + let previous_target_balance = + progressive_balances_cache.previous_epoch_target_attesting_balance()?; + let current_target_balance = + progressive_balances_cache.current_epoch_target_attesting_balance()?; + let total_active_balance = state.get_total_active_balance()?; + + if let Some(participation_cache) = maybe_participation_cache { + check_progressive_balances::( + state, + participation_cache, + previous_target_balance, + current_target_balance, + total_active_balance, + )?; + } + + weigh_justification_and_finalization( + justification_and_finalization_state, + total_active_balance, + previous_target_balance, + current_target_balance, + ) + .map_err(Error::from) +} + +/// Perform comparative checks against `ParticipationCache`, will return error if there's a mismatch. +fn check_progressive_balances( + state: &BeaconState, + participation_cache: &ParticipationCache, + cached_previous_target_balance: u64, + cached_current_target_balance: u64, + cached_total_active_balance: u64, +) -> Result<(), Error> +where + E: EthSpec, + T: ForkChoiceStore, +{ + let slot = state.slot(); + let epoch = state.current_epoch(); + + // Check previous epoch target balances + let previous_target_balance = participation_cache.previous_epoch_target_attesting_balance()?; + if previous_target_balance != cached_previous_target_balance { + return Err(Error::ProgressiveBalancesCacheCheckFailed( + format!("Previous epoch target attesting balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, previous_target_balance, cached_previous_target_balance) + )); + } + + // Check current epoch target balances + let current_target_balance = participation_cache.current_epoch_target_attesting_balance()?; + if current_target_balance != cached_current_target_balance { + return Err(Error::ProgressiveBalancesCacheCheckFailed( + format!("Current epoch target attesting balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, current_target_balance, cached_current_target_balance) + )); + } + + // Check current epoch total balances + let total_active_balance = participation_cache.current_epoch_total_active_balance(); + if total_active_balance != cached_total_active_balance { + return Err(Error::ProgressiveBalancesCacheCheckFailed( + format!("Current epoch total active balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, total_active_balance, cached_total_active_balance) + )); + } + + Ok(()) +} + /// Helper struct that is used to encode/decode the state of the `ForkChoice` as SSZ bytes. /// /// This is used when persisting the state of the fork choice to disk. diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index ef262b58c00..d28210aa1bd 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -17,12 +17,13 @@ use fork_choice::{ use store::MemoryStore; use types::{ test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, - Epoch, EthSpec, Hash256, IndexedAttestation, MainnetEthSpec, SignedBeaconBlock, Slot, SubnetId, + Epoch, EthSpec, ForkName, Hash256, IndexedAttestation, MainnetEthSpec, ProgressiveBalancesMode, + RelativeEpoch, SignedBeaconBlock, Slot, SubnetId, }; pub type E = MainnetEthSpec; -pub const VALIDATOR_COUNT: usize = 32; +pub const VALIDATOR_COUNT: usize = 64; /// Defines some delay between when an attestation is created and when it is mutated. pub enum MutationDelay { @@ -68,6 +69,24 @@ impl ForkChoiceTest { Self { harness } } + /// Creates a new tester with the specified `ProgressiveBalancesMode` and genesis from latest fork. + fn new_with_progressive_balances_mode(mode: ProgressiveBalancesMode) -> ForkChoiceTest { + // genesis with latest fork (at least altair required to test the cache) + let spec = ForkName::latest().make_genesis_spec(ChainSpec::default()); + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec) + .chain_config(ChainConfig { + progressive_balances_mode: mode, + ..ChainConfig::default() + }) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + Self { harness } + } + /// Get a value from the `ForkChoice` instantiation. fn get(&self, func: T) -> U where @@ -212,6 +231,39 @@ impl ForkChoiceTest { self } + /// Slash a validator from the previous epoch committee. + pub async fn add_previous_epoch_attester_slashing(self) -> Self { + let state = self.harness.get_current_state(); + let previous_epoch_shuffling = state.get_shuffling(RelativeEpoch::Previous).unwrap(); + let validator_indices = previous_epoch_shuffling + .iter() + .map(|idx| *idx as u64) + .take(1) + .collect(); + + self.harness + .add_attester_slashing(validator_indices) + .unwrap(); + + self + } + + /// Slash the proposer of a block in the previous epoch. + pub async fn add_previous_epoch_proposer_slashing(self, slots_per_epoch: u64) -> Self { + let previous_epoch_slot = self.harness.get_current_slot() - slots_per_epoch; + let previous_epoch_block = self + .harness + .chain + .block_at_slot(previous_epoch_slot, WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let proposer_index: u64 = previous_epoch_block.message().proposer_index(); + + self.harness.add_proposer_slashing(proposer_index).unwrap(); + + self + } + /// Apply `count` blocks to the chain (without attestations). pub async fn apply_blocks_without_new_attestations(self, count: usize) -> Self { self.harness.advance_slot(); @@ -286,7 +338,9 @@ impl ForkChoiceTest { Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, + self.harness.chain.config.progressive_balances_mode, &self.harness.chain.spec, + self.harness.logger(), ) .unwrap(); self @@ -328,7 +382,9 @@ impl ForkChoiceTest { Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, + self.harness.chain.config.progressive_balances_mode, &self.harness.chain.spec, + self.harness.logger(), ) .err() .expect("on_block did not return an error"); @@ -1287,3 +1343,49 @@ async fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); } + +/// Checks that `ProgressiveBalancesCache` is updated correctly after an attester slashing event, +/// where the slashed validator is a target attester in previous / current epoch. +#[tokio::test] +async fn progressive_balances_cache_attester_slashing() { + ForkChoiceTest::new_with_progressive_balances_mode(ProgressiveBalancesMode::Strict) + // first two epochs + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await + .unwrap() + .add_previous_epoch_attester_slashing() + .await + // expect fork choice to import blocks successfully after a previous epoch attester is + // slashed, i.e. the slashed attester's balance is correctly excluded from + // the previous epoch total balance in `ProgressiveBalancesCache`. + .apply_blocks(1) + .await + // expect fork choice to import another epoch of blocks successfully - the slashed + // attester's balance should be excluded from the current epoch total balance in + // `ProgressiveBalancesCache` as well. + .apply_blocks(MainnetEthSpec::slots_per_epoch() as usize) + .await; +} + +/// Checks that `ProgressiveBalancesCache` is updated correctly after a proposer slashing event, +/// where the slashed validator is a target attester in previous / current epoch. +#[tokio::test] +async fn progressive_balances_cache_proposer_slashing() { + ForkChoiceTest::new_with_progressive_balances_mode(ProgressiveBalancesMode::Strict) + // first two epochs + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await + .unwrap() + .add_previous_epoch_proposer_slashing(MainnetEthSpec::slots_per_epoch()) + .await + // expect fork choice to import blocks successfully after a previous epoch proposer is + // slashed, i.e. the slashed proposer's balance is correctly excluded from + // the previous epoch total balance in `ProgressiveBalancesCache`. + .apply_blocks(1) + .await + // expect fork choice to import another epoch of blocks successfully - the slashed + // proposer's balance should be excluded from the current epoch total balance in + // `ProgressiveBalancesCache` as well. + .apply_blocks(MainnetEthSpec::slots_per_epoch() as usize) + .await; +} diff --git a/consensus/state_processing/src/common/mod.rs b/consensus/state_processing/src/common/mod.rs index 8a2e2439bb6..ffe8be3a041 100644 --- a/consensus/state_processing/src/common/mod.rs +++ b/consensus/state_processing/src/common/mod.rs @@ -7,6 +7,7 @@ mod slash_validator; pub mod altair; pub mod base; +pub mod update_progressive_balances_cache; pub use deposit_data_tree::DepositDataTree; pub use get_attestation_participation::get_attestation_participation_flag_indices; diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index d4675f5ef5d..d54da43a04b 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -1,3 +1,4 @@ +use crate::common::update_progressive_balances_cache::update_progressive_balances_on_slashing; use crate::{ common::{decrease_balance, increase_balance, initiate_validator_exit}, per_block_processing::errors::BlockProcessingError, @@ -43,6 +44,8 @@ pub fn slash_validator( .safe_div(spec.min_slashing_penalty_quotient_for_state(state))?, )?; + update_progressive_balances_on_slashing(state, slashed_index)?; + // Apply proposer and whistleblower rewards let proposer_index = ctxt.get_proposer_index(state, spec)? as usize; let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); diff --git a/consensus/state_processing/src/common/update_progressive_balances_cache.rs b/consensus/state_processing/src/common/update_progressive_balances_cache.rs new file mode 100644 index 00000000000..45b5d657a6e --- /dev/null +++ b/consensus/state_processing/src/common/update_progressive_balances_cache.rs @@ -0,0 +1,142 @@ +/// A collection of all functions that mutates the `ProgressiveBalancesCache`. +use crate::metrics::{ + PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, + PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, +}; +use crate::per_epoch_processing::altair::ParticipationCache; +use crate::{BlockProcessingError, EpochProcessingError}; +use lighthouse_metrics::set_gauge; +use ssz_types::VariableList; +use std::borrow::Cow; +use types::consts::altair::TIMELY_TARGET_FLAG_INDEX; +use types::{ + is_progressive_balances_enabled, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, + ParticipationFlags, ProgressiveBalancesCache, +}; + +/// Initializes the `ProgressiveBalancesCache` cache using balance values from the +/// `ParticipationCache`. If the optional `&ParticipationCache` is not supplied, it will be computed +/// from the `BeaconState`. +pub fn initialize_progressive_balances_cache( + state: &mut BeaconState, + maybe_participation_cache: Option<&ParticipationCache>, + spec: &ChainSpec, +) -> Result<(), BeaconStateError> { + if !is_progressive_balances_enabled(state) + || state.progressive_balances_cache().is_initialized() + { + return Ok(()); + } + + let participation_cache = match maybe_participation_cache { + Some(cache) => Cow::Borrowed(cache), + None => Cow::Owned(ParticipationCache::new(state, spec)?), + }; + + let previous_epoch_target_attesting_balance = participation_cache + .previous_epoch_target_attesting_balance_raw() + .map_err(|e| BeaconStateError::ParticipationCacheError(format!("{e:?}")))?; + + let current_epoch_target_attesting_balance = participation_cache + .current_epoch_target_attesting_balance_raw() + .map_err(|e| BeaconStateError::ParticipationCacheError(format!("{e:?}")))?; + + let current_epoch = state.current_epoch(); + state.progressive_balances_cache_mut().initialize( + current_epoch, + previous_epoch_target_attesting_balance, + current_epoch_target_attesting_balance, + ); + + update_progressive_balances_metrics(state.progressive_balances_cache())?; + + Ok(()) +} + +/// Updates the `ProgressiveBalancesCache` when a new target attestation has been processed. +pub fn update_progressive_balances_on_attestation( + state: &mut BeaconState, + epoch: Epoch, + validator_index: usize, +) -> Result<(), BlockProcessingError> { + if is_progressive_balances_enabled(state) { + let validator = state.get_validator(validator_index)?; + if !validator.slashed { + let validator_effective_balance = validator.effective_balance; + state + .progressive_balances_cache_mut() + .on_new_target_attestation(epoch, validator_effective_balance)?; + } + } + Ok(()) +} + +/// Updates the `ProgressiveBalancesCache` when a target attester has been slashed. +pub fn update_progressive_balances_on_slashing( + state: &mut BeaconState, + validator_index: usize, +) -> Result<(), BlockProcessingError> { + if is_progressive_balances_enabled(state) { + let previous_epoch_participation = state.previous_epoch_participation()?; + let is_previous_epoch_target_attester = + is_target_attester_in_epoch::(previous_epoch_participation, validator_index)?; + + let current_epoch_participation = state.current_epoch_participation()?; + let is_current_epoch_target_attester = + is_target_attester_in_epoch::(current_epoch_participation, validator_index)?; + + let validator_effective_balance = state.get_effective_balance(validator_index)?; + + state.progressive_balances_cache_mut().on_slashing( + is_previous_epoch_target_attester, + is_current_epoch_target_attester, + validator_effective_balance, + )?; + } + + Ok(()) +} + +/// Updates the `ProgressiveBalancesCache` on epoch transition. +pub fn update_progressive_balances_on_epoch_transition( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), EpochProcessingError> { + if is_progressive_balances_enabled(state) { + state + .progressive_balances_cache_mut() + .on_epoch_transition(spec)?; + + update_progressive_balances_metrics(state.progressive_balances_cache())?; + } + + Ok(()) +} + +pub fn update_progressive_balances_metrics( + cache: &ProgressiveBalancesCache, +) -> Result<(), BeaconStateError> { + set_gauge( + &PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, + cache.previous_epoch_target_attesting_balance()? as i64, + ); + + set_gauge( + &PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, + cache.current_epoch_target_attesting_balance()? as i64, + ); + + Ok(()) +} + +fn is_target_attester_in_epoch( + epoch_participation: &VariableList, + validator_index: usize, +) -> Result { + let participation_flags = epoch_participation + .get(validator_index) + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; + participation_flags + .has_flag(TIMELY_TARGET_FLAG_INDEX) + .map_err(|e| e.into()) +} diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 68f04b554e3..ebbc8f9f31e 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -92,7 +92,7 @@ pub fn initialize_beacon_state_from_eth1( } // Now that we have our validators, initialize the caches (including the committees) - state.build_all_caches(spec)?; + state.build_caches(spec)?; // Set genesis validators root for domain separation and chain versioning *state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache()?; @@ -115,7 +115,7 @@ pub fn process_activations( state: &mut BeaconState, spec: &ChainSpec, ) -> Result<(), Error> { - let (validators, balances) = state.validators_and_balances_mut(); + let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut(); for (index, validator) in validators.iter_mut().enumerate() { let balance = balances .get(index) diff --git a/consensus/state_processing/src/metrics.rs b/consensus/state_processing/src/metrics.rs index ddfaae56403..360b007678a 100644 --- a/consensus/state_processing/src/metrics.rs +++ b/consensus/state_processing/src/metrics.rs @@ -23,4 +23,15 @@ lazy_static! { "beacon_participation_prev_epoch_active_gwei_total", "Total effective balance (gwei) of validators active in the previous epoch" ); + /* + * Participation Metrics (progressive balances) + */ + pub static ref PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Result = try_create_int_gauge( + "beacon_participation_prev_epoch_target_attesting_gwei_progressive_total", + "Progressive total effective balance (gwei) of validators who attested to the target in the previous epoch" + ); + pub static ref PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Result = try_create_int_gauge( + "beacon_participation_curr_epoch_target_attesting_gwei_progressive_total", + "Progressive total effective balance (gwei) of validators who attested to the target in the current epoch" + ); } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 124fdf6500b..b8b76a499d3 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -41,6 +41,9 @@ mod verify_proposer_slashing; use crate::common::decrease_balance; use crate::StateProcessingStrategy; +use crate::common::update_progressive_balances_cache::{ + initialize_progressive_balances_cache, update_progressive_balances_metrics, +}; #[cfg(feature = "arbitrary-fuzz")] use arbitrary::Arbitrary; @@ -114,6 +117,8 @@ pub fn per_block_processing>( .fork_name(spec) .map_err(BlockProcessingError::InconsistentStateFork)?; + initialize_progressive_balances_cache(state, None, spec)?; + let verify_signatures = match block_signature_strategy { BlockSignatureStrategy::VerifyBulk => { // Verify all signatures in the block at once. @@ -182,6 +187,10 @@ pub fn per_block_processing>( )?; } + if is_progressive_balances_enabled(state) { + update_progressive_balances_metrics(state.progressive_balances_cache())?; + } + Ok(()) } diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 1aaf298d690..0aba1d83faf 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -1,6 +1,8 @@ use super::signature_sets::Error as SignatureSetError; +use crate::per_epoch_processing::altair::participation_cache; use crate::ContextError; use merkle_proof::MerkleTreeError; +use participation_cache::Error as ParticipationCacheError; use safe_arith::ArithError; use ssz::DecodeError; use types::*; @@ -83,6 +85,7 @@ pub enum BlockProcessingError { found: Hash256, }, WithdrawalCredentialsInvalid, + ParticipationCacheError(ParticipationCacheError), } impl From for BlockProcessingError { @@ -140,6 +143,12 @@ impl From> for BlockProcessingError { } } +impl From for BlockProcessingError { + fn from(e: ParticipationCacheError) -> Self { + BlockProcessingError::ParticipationCacheError(e) + } +} + /// A conversion that consumes `self` and adds an `index` variable to resulting struct. /// /// Used here to allow converting an error into an upstream error that points to the object that diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 4bee596615a..1dbcb7fb8fe 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -97,6 +97,8 @@ pub mod base { pub mod altair { use super::*; + use crate::common::update_progressive_balances_cache::update_progressive_balances_on_attestation; + use types::consts::altair::TIMELY_TARGET_FLAG_INDEX; pub fn process_attestations( state: &mut BeaconState, @@ -163,6 +165,14 @@ pub mod altair { get_base_reward(state, index, base_reward_per_increment, spec)? .safe_mul(weight)?, )?; + + if flag_index == TIMELY_TARGET_FLAG_INDEX { + update_progressive_balances_on_attestation( + state, + data.target.epoch, + index, + )?; + } } } } @@ -235,6 +245,7 @@ pub fn process_attester_slashings( Ok(()) } + /// Wrapper function to handle calling the correct version of `process_attestations` based on /// the fork. pub fn process_attestations>( diff --git a/consensus/state_processing/src/per_epoch_processing/altair.rs b/consensus/state_processing/src/per_epoch_processing/altair.rs index d5df2fc975d..0abbd16a989 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair.rs @@ -1,4 +1,7 @@ use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; +use crate::common::update_progressive_balances_cache::{ + initialize_progressive_balances_cache, update_progressive_balances_on_epoch_transition, +}; use crate::per_epoch_processing::{ effective_balance_updates::process_effective_balance_updates, historical_roots_update::process_historical_roots_update, @@ -31,6 +34,7 @@ pub fn process_epoch( // Pre-compute participating indices and total balances. let participation_cache = ParticipationCache::new(state, spec)?; let sync_committee = state.current_sync_committee()?.clone(); + initialize_progressive_balances_cache::(state, Some(&participation_cache), spec)?; // Justification and finalization. let justification_and_finalization_state = @@ -56,7 +60,7 @@ pub fn process_epoch( process_eth1_data_reset(state)?; // Update effective balances with hysteresis (lag). - process_effective_balance_updates(state, spec)?; + process_effective_balance_updates(state, Some(&participation_cache), spec)?; // Reset slashings process_slashings_reset(state)?; @@ -75,6 +79,8 @@ pub fn process_epoch( // Rotate the epoch caches to suit the epoch transition. state.advance_caches(spec)?; + update_progressive_balances_on_epoch_transition(state, spec)?; + Ok(EpochProcessingSummary::Altair { participation_cache, sync_committee, diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs index 004726923e9..a5caddd0455 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs @@ -11,49 +11,23 @@ //! Additionally, this cache is returned from the `altair::process_epoch` function and can be used //! to get useful summaries about the validator participation in an epoch. -use safe_arith::{ArithError, SafeArith}; use types::{ consts::altair::{ NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, }, - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, RelativeEpoch, + Balance, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, + RelativeEpoch, }; -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub enum Error { InvalidFlagIndex(usize), InvalidValidatorIndex(usize), } -/// A balance which will never be below the specified `minimum`. -/// -/// This is an effort to ensure the `EFFECTIVE_BALANCE_INCREMENT` minimum is always respected. -#[derive(PartialEq, Debug, Clone, Copy)] -struct Balance { - raw: u64, - minimum: u64, -} - -impl Balance { - /// Initialize the balance to `0`, or the given `minimum`. - pub fn zero(minimum: u64) -> Self { - Self { raw: 0, minimum } - } - - /// Returns the balance with respect to the initialization `minimum`. - pub fn get(&self) -> u64 { - std::cmp::max(self.raw, self.minimum) - } - - /// Add-assign to the balance. - pub fn safe_add_assign(&mut self, other: u64) -> Result<(), ArithError> { - self.raw.safe_add_assign(other) - } -} - /// Caches the participation values for one epoch (either the previous or current). -#[derive(PartialEq, Debug)] +#[derive(PartialEq, Debug, Clone)] struct SingleEpochParticipationCache { /// Maps an active validator index to their participation flags. /// @@ -95,6 +69,14 @@ impl SingleEpochParticipationCache { .ok_or(Error::InvalidFlagIndex(flag_index)) } + /// Returns the raw total balance of attesters who have `flag_index` set. + fn total_flag_balance_raw(&self, flag_index: usize) -> Result { + self.total_flag_balances + .get(flag_index) + .copied() + .ok_or(Error::InvalidFlagIndex(flag_index)) + } + /// Returns `true` if `val_index` is active, unslashed and has `flag_index` set. /// /// ## Errors @@ -173,7 +155,7 @@ impl SingleEpochParticipationCache { } /// Maintains a cache to be used during `altair::process_epoch`. -#[derive(PartialEq, Debug)] +#[derive(PartialEq, Debug, Clone)] pub struct ParticipationCache { current_epoch: Epoch, /// Caches information about active validators pertaining to `self.current_epoch`. @@ -291,6 +273,11 @@ impl ParticipationCache { .total_flag_balance(TIMELY_TARGET_FLAG_INDEX) } + pub fn current_epoch_target_attesting_balance_raw(&self) -> Result { + self.current_epoch_participation + .total_flag_balance_raw(TIMELY_TARGET_FLAG_INDEX) + } + pub fn previous_epoch_total_active_balance(&self) -> u64 { self.previous_epoch_participation.total_active_balance.get() } @@ -300,6 +287,11 @@ impl ParticipationCache { .total_flag_balance(TIMELY_TARGET_FLAG_INDEX) } + pub fn previous_epoch_target_attesting_balance_raw(&self) -> Result { + self.previous_epoch_participation + .total_flag_balance_raw(TIMELY_TARGET_FLAG_INDEX) + } + pub fn previous_epoch_source_attesting_balance(&self) -> Result { self.previous_epoch_participation .total_flag_balance(TIMELY_SOURCE_FLAG_INDEX) diff --git a/consensus/state_processing/src/per_epoch_processing/base.rs b/consensus/state_processing/src/per_epoch_processing/base.rs index cb7e7d4b300..680563ce74c 100644 --- a/consensus/state_processing/src/per_epoch_processing/base.rs +++ b/consensus/state_processing/src/per_epoch_processing/base.rs @@ -52,7 +52,7 @@ pub fn process_epoch( process_eth1_data_reset(state)?; // Update effective balances with hysteresis (lag). - process_effective_balance_updates(state, spec)?; + process_effective_balance_updates(state, None, spec)?; // Reset slashings process_slashings_reset(state)?; diff --git a/consensus/state_processing/src/per_epoch_processing/capella.rs b/consensus/state_processing/src/per_epoch_processing/capella.rs index aaf301f29ec..911510ed0ce 100644 --- a/consensus/state_processing/src/per_epoch_processing/capella.rs +++ b/consensus/state_processing/src/per_epoch_processing/capella.rs @@ -11,6 +11,9 @@ use crate::per_epoch_processing::{ }; use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; +use crate::common::update_progressive_balances_cache::{ + initialize_progressive_balances_cache, update_progressive_balances_on_epoch_transition, +}; pub use historical_summaries_update::process_historical_summaries_update; mod historical_summaries_update; @@ -27,6 +30,7 @@ pub fn process_epoch( // Pre-compute participating indices and total balances. let participation_cache = ParticipationCache::new(state, spec)?; let sync_committee = state.current_sync_committee()?.clone(); + initialize_progressive_balances_cache(state, Some(&participation_cache), spec)?; // Justification and finalization. let justification_and_finalization_state = @@ -52,7 +56,7 @@ pub fn process_epoch( process_eth1_data_reset(state)?; // Update effective balances with hysteresis (lag). - process_effective_balance_updates(state, spec)?; + process_effective_balance_updates(state, Some(&participation_cache), spec)?; // Reset slashings process_slashings_reset(state)?; @@ -71,6 +75,8 @@ pub fn process_epoch( // Rotate the epoch caches to suit the epoch transition. state.advance_caches(spec)?; + update_progressive_balances_on_epoch_transition(state, spec)?; + Ok(EpochProcessingSummary::Altair { participation_cache, sync_committee, diff --git a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs index c166667b5a9..1759f7e1402 100644 --- a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs @@ -1,11 +1,13 @@ use super::errors::EpochProcessingError; +use crate::per_epoch_processing::altair::ParticipationCache; use safe_arith::SafeArith; use types::beacon_state::BeaconState; use types::chain_spec::ChainSpec; -use types::{BeaconStateError, EthSpec}; +use types::{BeaconStateError, EthSpec, ProgressiveBalancesCache}; pub fn process_effective_balance_updates( state: &mut BeaconState, + maybe_participation_cache: Option<&ParticipationCache>, spec: &ChainSpec, ) -> Result<(), EpochProcessingError> { let hysteresis_increment = spec @@ -13,7 +15,8 @@ pub fn process_effective_balance_updates( .safe_div(spec.hysteresis_quotient)?; let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?; let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?; - let (validators, balances) = state.validators_and_balances_mut(); + let (validators, balances, progressive_balances_cache) = + state.validators_and_balances_and_progressive_balances_mut(); for (index, validator) in validators.iter_mut().enumerate() { let balance = balances .get(index) @@ -23,11 +26,43 @@ pub fn process_effective_balance_updates( if balance.safe_add(downward_threshold)? < validator.effective_balance || validator.effective_balance.safe_add(upward_threshold)? < balance { - validator.effective_balance = std::cmp::min( + let old_effective_balance = validator.effective_balance; + let new_effective_balance = std::cmp::min( balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, ); + + if let Some(participation_cache) = maybe_participation_cache { + update_progressive_balances( + participation_cache, + progressive_balances_cache, + index, + old_effective_balance, + new_effective_balance, + )?; + } + + validator.effective_balance = new_effective_balance; } } Ok(()) } + +fn update_progressive_balances( + participation_cache: &ParticipationCache, + progressive_balances_cache: &mut ProgressiveBalancesCache, + index: usize, + old_effective_balance: u64, + new_effective_balance: u64, +) -> Result<(), EpochProcessingError> { + if old_effective_balance != new_effective_balance { + let is_current_epoch_target_attester = + participation_cache.is_current_epoch_timely_target_attester(index)?; + progressive_balances_cache.on_effective_balance_change( + is_current_epoch_target_attester, + old_effective_balance, + new_effective_balance, + )?; + } + Ok(()) +} diff --git a/consensus/state_processing/src/per_epoch_processing/slashings.rs b/consensus/state_processing/src/per_epoch_processing/slashings.rs index 6d5342cd363..2d595491c11 100644 --- a/consensus/state_processing/src/per_epoch_processing/slashings.rs +++ b/consensus/state_processing/src/per_epoch_processing/slashings.rs @@ -16,7 +16,7 @@ pub fn process_slashings( total_balance, ); - let (validators, balances) = state.validators_and_balances_mut(); + let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut(); for (index, validator) in validators.iter().enumerate() { if validator.slashed && epoch.safe_add(T::EpochsPerSlashingsVector::to_u64().safe_div(2)?)? diff --git a/consensus/state_processing/src/upgrade/altair.rs b/consensus/state_processing/src/upgrade/altair.rs index 176f1af15c6..26b1192bc16 100644 --- a/consensus/state_processing/src/upgrade/altair.rs +++ b/consensus/state_processing/src/upgrade/altair.rs @@ -1,3 +1,4 @@ +use crate::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use crate::common::{get_attestation_participation_flag_indices, get_attesting_indices}; use std::mem; use std::sync::Arc; @@ -101,6 +102,7 @@ pub fn upgrade_to_altair( next_sync_committee: temp_sync_committee, // not read // Caches total_active_balance: pre.total_active_balance, + progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), committee_caches: mem::take(&mut pre.committee_caches), pubkey_cache: mem::take(&mut pre.pubkey_cache), exit_cache: mem::take(&mut pre.exit_cache), @@ -110,6 +112,8 @@ pub fn upgrade_to_altair( // Fill in previous epoch participation from the pre state's pending attestations. translate_participation(&mut post, &pre.previous_epoch_attestations, spec)?; + initialize_progressive_balances_cache(&mut post, None, spec)?; + // Fill in sync committees // Note: A duplicate committee is assigned for the current and next committee at the fork // boundary diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index 3b933fac37a..5153e35f447 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -62,6 +62,7 @@ pub fn upgrade_to_capella( historical_summaries: VariableList::default(), // Caches total_active_balance: pre.total_active_balance, + progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), committee_caches: mem::take(&mut pre.committee_caches), pubkey_cache: mem::take(&mut pre.pubkey_cache), exit_cache: mem::take(&mut pre.exit_cache), diff --git a/consensus/state_processing/src/upgrade/merge.rs b/consensus/state_processing/src/upgrade/merge.rs index c172466248a..eb744501072 100644 --- a/consensus/state_processing/src/upgrade/merge.rs +++ b/consensus/state_processing/src/upgrade/merge.rs @@ -60,6 +60,7 @@ pub fn upgrade_to_bellatrix( latest_execution_payload_header: >::default(), // Caches total_active_balance: pre.total_active_balance, + progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), committee_caches: mem::take(&mut pre.committee_caches), pubkey_cache: mem::take(&mut pre.pubkey_cache), exit_cache: mem::take(&mut pre.exit_cache), diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 583b940d5f4..ba15f6d4885 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -52,6 +52,7 @@ serde_json = "1.0.74" smallvec = "1.8.0" serde_with = "1.13.0" maplit = "1.0.2" +strum = { version = "0.24.0", features = ["derive"] } [dev-dependencies] criterion = "0.3.3" diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index 28f57e70804..bb2b527109f 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -51,7 +51,7 @@ fn all_benches(c: &mut Criterion) { let spec = Arc::new(MainnetEthSpec::default_spec()); let mut state = get_state::(validator_count); - state.build_all_caches(&spec).expect("should build caches"); + state.build_caches(&spec).expect("should build caches"); let state_bytes = state.as_ssz_bytes(); let inner_state = state.clone(); diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 4a9da364047..1fa4dee3a0e 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -26,6 +26,8 @@ pub use self::committee_cache::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, CommitteeCache, }; +pub use crate::beacon_state::balance::Balance; +pub use crate::beacon_state::progressive_balances_cache::*; use crate::historical_summary::HistoricalSummary; pub use clone_config::CloneConfig; pub use eth_spec::*; @@ -34,9 +36,11 @@ pub use tree_hash_cache::BeaconTreeHashCache; #[macro_use] mod committee_cache; +mod balance; mod clone_config; mod exit_cache; mod iter; +mod progressive_balances_cache; mod pubkey_cache; mod tests; mod tree_hash_cache; @@ -101,6 +105,9 @@ pub enum Error { SszTypesError(ssz_types::Error), TreeHashCacheNotInitialized, NonLinearTreeHashCacheHistory, + ParticipationCacheError(String), + ProgressiveBalancesCacheNotInitialized, + ProgressiveBalancesCacheInconsistent, TreeHashCacheSkippedSlot { cache: Slot, state: Slot, @@ -317,6 +324,12 @@ where #[tree_hash(skip_hashing)] #[test_random(default)] #[derivative(Clone(clone_with = "clone_default"))] + pub progressive_balances_cache: ProgressiveBalancesCache, + #[serde(skip_serializing, skip_deserializing)] + #[ssz(skip_serializing, skip_deserializing)] + #[tree_hash(skip_hashing)] + #[test_random(default)] + #[derivative(Clone(clone_with = "clone_default"))] pub committee_caches: [CommitteeCache; CACHED_EPOCHS], #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] @@ -393,6 +406,7 @@ impl BeaconState { // Caching (not in spec) total_active_balance: None, + progressive_balances_cache: <_>::default(), committee_caches: [ CommitteeCache::default(), CommitteeCache::default(), @@ -757,7 +771,7 @@ impl BeaconState { Ok(signature_hash_int.safe_rem(modulo)? == 0) } - /// Returns the beacon proposer index for the `slot` in the given `relative_epoch`. + /// Returns the beacon proposer index for the `slot` in `self.current_epoch()`. /// /// Spec v0.12.1 pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result { @@ -1150,12 +1164,30 @@ impl BeaconState { } /// Convenience accessor for validators and balances simultaneously. - pub fn validators_and_balances_mut(&mut self) -> (&mut [Validator], &mut [u64]) { + pub fn validators_and_balances_and_progressive_balances_mut( + &mut self, + ) -> (&mut [Validator], &mut [u64], &mut ProgressiveBalancesCache) { match self { - BeaconState::Base(state) => (&mut state.validators, &mut state.balances), - BeaconState::Altair(state) => (&mut state.validators, &mut state.balances), - BeaconState::Merge(state) => (&mut state.validators, &mut state.balances), - BeaconState::Capella(state) => (&mut state.validators, &mut state.balances), + BeaconState::Base(state) => ( + &mut state.validators, + &mut state.balances, + &mut state.progressive_balances_cache, + ), + BeaconState::Altair(state) => ( + &mut state.validators, + &mut state.balances, + &mut state.progressive_balances_cache, + ), + BeaconState::Merge(state) => ( + &mut state.validators, + &mut state.balances, + &mut state.progressive_balances_cache, + ), + BeaconState::Capella(state) => ( + &mut state.validators, + &mut state.balances, + &mut state.progressive_balances_cache, + ), } } @@ -1380,7 +1412,7 @@ impl BeaconState { } /// Build all caches (except the tree hash cache), if they need to be built. - pub fn build_all_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { self.build_all_committee_caches(spec)?; self.update_pubkey_cache()?; self.build_exit_cache(spec)?; @@ -1412,6 +1444,7 @@ impl BeaconState { self.drop_committee_cache(RelativeEpoch::Next)?; self.drop_pubkey_cache(); self.drop_tree_hash_cache(); + self.drop_progressive_balances_cache(); *self.exit_cache_mut() = ExitCache::default(); Ok(()) } @@ -1608,6 +1641,11 @@ impl BeaconState { *self.pubkey_cache_mut() = PubkeyCache::default() } + /// Completely drops the `progressive_balances_cache` cache, replacing it with a new, empty cache. + fn drop_progressive_balances_cache(&mut self) { + *self.progressive_balances_cache_mut() = ProgressiveBalancesCache::default(); + } + /// Initialize but don't fill the tree hash cache, if it isn't already initialized. pub fn initialize_tree_hash_cache(&mut self) { if !self.tree_hash_cache().is_initialized() { @@ -1679,6 +1717,9 @@ impl BeaconState { if config.tree_hash_cache { *res.tree_hash_cache_mut() = self.tree_hash_cache().clone(); } + if config.progressive_balances_cache { + *res.progressive_balances_cache_mut() = self.progressive_balances_cache().clone(); + } res } diff --git a/consensus/types/src/beacon_state/balance.rs b/consensus/types/src/beacon_state/balance.rs new file mode 100644 index 00000000000..e537a5b9842 --- /dev/null +++ b/consensus/types/src/beacon_state/balance.rs @@ -0,0 +1,33 @@ +use arbitrary::Arbitrary; +use safe_arith::{ArithError, SafeArith}; + +/// A balance which will never be below the specified `minimum`. +/// +/// This is an effort to ensure the `EFFECTIVE_BALANCE_INCREMENT` minimum is always respected. +#[derive(PartialEq, Debug, Clone, Copy, Arbitrary)] +pub struct Balance { + raw: u64, + minimum: u64, +} + +impl Balance { + /// Initialize the balance to `0`, or the given `minimum`. + pub fn zero(minimum: u64) -> Self { + Self { raw: 0, minimum } + } + + /// Returns the balance with respect to the initialization `minimum`. + pub fn get(&self) -> u64 { + std::cmp::max(self.raw, self.minimum) + } + + /// Add-assign to the balance. + pub fn safe_add_assign(&mut self, other: u64) -> Result<(), ArithError> { + self.raw.safe_add_assign(other) + } + + /// Sub-assign to the balance. + pub fn safe_sub_assign(&mut self, other: u64) -> Result<(), ArithError> { + self.raw.safe_sub_assign(other) + } +} diff --git a/consensus/types/src/beacon_state/clone_config.rs b/consensus/types/src/beacon_state/clone_config.rs index e5f050aee69..c6e7f47421f 100644 --- a/consensus/types/src/beacon_state/clone_config.rs +++ b/consensus/types/src/beacon_state/clone_config.rs @@ -5,6 +5,7 @@ pub struct CloneConfig { pub pubkey_cache: bool, pub exit_cache: bool, pub tree_hash_cache: bool, + pub progressive_balances_cache: bool, } impl CloneConfig { @@ -14,6 +15,7 @@ impl CloneConfig { pubkey_cache: true, exit_cache: true, tree_hash_cache: true, + progressive_balances_cache: true, } } diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/beacon_state/progressive_balances_cache.rs new file mode 100644 index 00000000000..9f5c223d573 --- /dev/null +++ b/consensus/types/src/beacon_state/progressive_balances_cache.rs @@ -0,0 +1,184 @@ +use crate::beacon_state::balance::Balance; +use crate::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec}; +use arbitrary::Arbitrary; +use safe_arith::SafeArith; +use serde_derive::{Deserialize, Serialize}; +use strum::{Display, EnumString, EnumVariantNames}; + +/// This cache keeps track of the accumulated target attestation balance for the current & previous +/// epochs. The cached values can be utilised by fork choice to calculate unrealized justification +/// and finalization instead of converting epoch participation arrays to balances for each block we +/// process. +#[derive(Default, Debug, PartialEq, Arbitrary, Clone)] +pub struct ProgressiveBalancesCache { + inner: Option, +} + +#[derive(Debug, PartialEq, Arbitrary, Clone)] +struct Inner { + pub current_epoch: Epoch, + pub previous_epoch_target_attesting_balance: Balance, + pub current_epoch_target_attesting_balance: Balance, +} + +impl ProgressiveBalancesCache { + pub fn initialize( + &mut self, + current_epoch: Epoch, + previous_epoch_target_attesting_balance: Balance, + current_epoch_target_attesting_balance: Balance, + ) { + self.inner = Some(Inner { + current_epoch, + previous_epoch_target_attesting_balance, + current_epoch_target_attesting_balance, + }); + } + + pub fn is_initialized(&self) -> bool { + self.inner.is_some() + } + + /// When a new target attestation has been processed, we update the cached + /// `current_epoch_target_attesting_balance` to include the validator effective balance. + /// If the epoch is neither the current epoch nor the previous epoch, an error is returned. + pub fn on_new_target_attestation( + &mut self, + epoch: Epoch, + validator_effective_balance: u64, + ) -> Result<(), BeaconStateError> { + let cache = self.get_inner_mut()?; + + if epoch == cache.current_epoch { + cache + .current_epoch_target_attesting_balance + .safe_add_assign(validator_effective_balance)?; + } else if epoch.safe_add(1)? == cache.current_epoch { + cache + .previous_epoch_target_attesting_balance + .safe_add_assign(validator_effective_balance)?; + } else { + return Err(BeaconStateError::ProgressiveBalancesCacheInconsistent); + } + + Ok(()) + } + + /// When a validator is slashed, we reduce the `current_epoch_target_attesting_balance` by the + /// validator's effective balance to exclude the validator weight. + pub fn on_slashing( + &mut self, + is_previous_epoch_target_attester: bool, + is_current_epoch_target_attester: bool, + effective_balance: u64, + ) -> Result<(), BeaconStateError> { + let cache = self.get_inner_mut()?; + if is_previous_epoch_target_attester { + cache + .previous_epoch_target_attesting_balance + .safe_sub_assign(effective_balance)?; + } + if is_current_epoch_target_attester { + cache + .current_epoch_target_attesting_balance + .safe_sub_assign(effective_balance)?; + } + Ok(()) + } + + /// When a current epoch target attester has its effective balance changed, we adjust the + /// its share of the target attesting balance in the cache. + pub fn on_effective_balance_change( + &mut self, + is_current_epoch_target_attester: bool, + old_effective_balance: u64, + new_effective_balance: u64, + ) -> Result<(), BeaconStateError> { + let cache = self.get_inner_mut()?; + if is_current_epoch_target_attester { + if new_effective_balance > old_effective_balance { + cache + .current_epoch_target_attesting_balance + .safe_add_assign(new_effective_balance.safe_sub(old_effective_balance)?)?; + } else { + cache + .current_epoch_target_attesting_balance + .safe_sub_assign(old_effective_balance.safe_sub(new_effective_balance)?)?; + } + } + Ok(()) + } + + /// On epoch transition, the balance from current epoch is shifted to previous epoch, and the + /// current epoch balance is reset to 0. + pub fn on_epoch_transition(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { + let cache = self.get_inner_mut()?; + cache.current_epoch.safe_add_assign(1)?; + cache.previous_epoch_target_attesting_balance = + cache.current_epoch_target_attesting_balance; + cache.current_epoch_target_attesting_balance = + Balance::zero(spec.effective_balance_increment); + Ok(()) + } + + pub fn previous_epoch_target_attesting_balance(&self) -> Result { + Ok(self + .get_inner()? + .previous_epoch_target_attesting_balance + .get()) + } + + pub fn current_epoch_target_attesting_balance(&self) -> Result { + Ok(self + .get_inner()? + .current_epoch_target_attesting_balance + .get()) + } + + fn get_inner_mut(&mut self) -> Result<&mut Inner, BeaconStateError> { + self.inner + .as_mut() + .ok_or(BeaconStateError::ProgressiveBalancesCacheNotInitialized) + } + + fn get_inner(&self) -> Result<&Inner, BeaconStateError> { + self.inner + .as_ref() + .ok_or(BeaconStateError::ProgressiveBalancesCacheNotInitialized) + } +} + +#[derive( + Debug, PartialEq, Eq, Clone, Copy, Deserialize, Serialize, Display, EnumString, EnumVariantNames, +)] +#[strum(serialize_all = "lowercase")] +pub enum ProgressiveBalancesMode { + /// Disable the usage of progressive cache, and use the existing `ParticipationCache` calculation. + Disabled, + /// Enable the usage of progressive cache, with checks against the `ParticipationCache` and falls + /// back to the existing calculation if there is a balance mismatch. + Checked, + /// Enable the usage of progressive cache, with checks against the `ParticipationCache`. Errors + /// if there is a balance mismatch. Used in testing only. + Strict, + /// Enable the usage of progressive cache, with no comparative checks against the + /// `ParticipationCache`. This is fast but an experimental mode, use with caution. + Fast, +} + +impl ProgressiveBalancesMode { + pub fn perform_comparative_checks(&self) -> bool { + match self { + Self::Disabled | Self::Fast => false, + Self::Checked | Self::Strict => true, + } + } +} + +/// `ProgressiveBalancesCache` is only enabled from `Altair` as it requires `ParticipationCache`. +pub fn is_progressive_balances_enabled(state: &BeaconState) -> bool { + match state { + BeaconState::Base(_) => false, + BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => true, + } +} diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index d63eaafc4b9..6cd9c1dbf88 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -219,17 +219,18 @@ async fn clone_config() { let mut state = build_state::(16).await; - state.build_all_caches(&spec).unwrap(); + state.build_caches(&spec).unwrap(); state .update_tree_hash_cache() .expect("should update tree hash cache"); - let num_caches = 4; + let num_caches = 5; let all_configs = (0..2u8.pow(num_caches)).map(|i| CloneConfig { committee_caches: (i & 1) != 0, pubkey_cache: ((i >> 1) & 1) != 0, exit_cache: ((i >> 2) & 1) != 0, tree_hash_cache: ((i >> 3) & 1) != 0, + progressive_balances_cache: ((i >> 4) & 1) != 0, }); for config in all_configs { diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index aa5f52eef8c..01a44cabef7 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -303,7 +303,7 @@ fn initialize_state_with_validators( } // Now that we have our validators, initialize the caches (including the committees) - state.build_all_caches(spec).unwrap(); + state.build_caches(spec).unwrap(); // Set genesis validators root for domain separation and chain versioning *state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache().unwrap(); diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 49d1dd424da..e3b2a5acbfd 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -109,7 +109,7 @@ pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), let target_slot = initial_slot + slots; state - .build_all_caches(spec) + .build_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; let state_root = if let Some(root) = cli_state_root.or(state_root) { diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index cf971c69f00..34a45607610 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -205,7 +205,7 @@ pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), if config.exclude_cache_builds { pre_state - .build_all_caches(spec) + .build_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; let state_root = pre_state .update_tree_hash_cache() @@ -303,7 +303,7 @@ fn do_transition( if !config.exclude_cache_builds { let t = Instant::now(); pre_state - .build_all_caches(spec) + .build_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; debug!("Build caches: {:?}", t.elapsed()); @@ -335,7 +335,7 @@ fn do_transition( let t = Instant::now(); pre_state - .build_all_caches(spec) + .build_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; debug!("Build all caches (again): {:?}", t.elapsed()); diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 65d7bd08b28..ac0780015f6 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -16,7 +16,10 @@ use std::str::FromStr; use std::string::ToString; use std::time::Duration; use tempfile::TempDir; -use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec}; +use types::{ + Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec, + ProgressiveBalancesMode, +}; use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -2284,3 +2287,28 @@ fn invalid_gossip_verified_blocks_path() { ) }); } + +#[test] +fn progressive_balances_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.progressive_balances_mode, + ProgressiveBalancesMode::Checked + ) + }); +} + +#[test] +fn progressive_balances_fast() { + CommandLineTest::new() + .flag("progressive-balances", Some("fast")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.progressive_balances_mode, + ProgressiveBalancesMode::Fast + ) + }); +} diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 6095e1be6b1..31542ba4476 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -6,9 +6,9 @@ use crate::type_name; use crate::type_name::TypeName; use serde_derive::Deserialize; use state_processing::per_epoch_processing::capella::process_historical_summaries_update; +use state_processing::per_epoch_processing::effective_balance_updates::process_effective_balance_updates; use state_processing::per_epoch_processing::{ altair, base, - effective_balance_updates::process_effective_balance_updates, historical_roots_update::process_historical_roots_update, process_registry_updates, process_slashings, resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, @@ -173,7 +173,7 @@ impl EpochTransition for Eth1DataReset { impl EpochTransition for EffectiveBalanceUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - process_effective_balance_updates(state, spec) + process_effective_balance_updates(state, None, spec) } } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 65528de1757..9627d2cde03 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -18,7 +18,8 @@ use std::sync::Arc; use std::time::Duration; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, Checkpoint, EthSpec, - ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, + ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, ProgressiveBalancesMode, + SignedBeaconBlock, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] @@ -440,7 +441,9 @@ impl Tester { block_delay, &state, PayloadVerificationStatus::Irrelevant, + ProgressiveBalancesMode::Strict, &self.harness.chain.spec, + self.harness.logger(), ); if result.is_ok() { diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 5fd00285aaa..21a56dcf2a7 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -4,6 +4,7 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; use serde_derive::Deserialize; +use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use state_processing::{ per_block_processing::{ errors::BlockProcessingError, @@ -96,6 +97,7 @@ impl Operation for Attestation { spec, ), BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { + initialize_progressive_balances_cache(state, None, spec)?; altair::process_attestation(state, self, 0, &mut ctxt, VerifySignatures::True, spec) } } @@ -118,6 +120,7 @@ impl Operation for AttesterSlashing { _: &Operations, ) -> Result<(), BlockProcessingError> { let mut ctxt = ConsensusContext::new(state.slot()); + initialize_progressive_balances_cache(state, None, spec)?; process_attester_slashings( state, &[self.clone()], @@ -168,6 +171,7 @@ impl Operation for ProposerSlashing { _: &Operations, ) -> Result<(), BlockProcessingError> { let mut ctxt = ConsensusContext::new(state.slot()); + initialize_progressive_balances_cache(state, None, spec)?; process_proposer_slashings( state, &[self.clone()], diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index e51fed1907f..191b45c33a1 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -67,7 +67,7 @@ impl Case for SanityBlocks { let spec = &testing_spec::(fork_name); // Processing requires the epoch cache. - bulk_state.build_all_caches(spec).unwrap(); + bulk_state.build_caches(spec).unwrap(); // Spawning a second state to call the VerifyIndiviual strategy to avoid bitrot. // See https://github.com/sigp/lighthouse/issues/742. diff --git a/testing/ef_tests/src/cases/sanity_slots.rs b/testing/ef_tests/src/cases/sanity_slots.rs index a38a8930a0e..dd385d13f4e 100644 --- a/testing/ef_tests/src/cases/sanity_slots.rs +++ b/testing/ef_tests/src/cases/sanity_slots.rs @@ -61,7 +61,7 @@ impl Case for SanitySlots { let spec = &testing_spec::(fork_name); // Processing requires the epoch cache. - state.build_all_caches(spec).unwrap(); + state.build_caches(spec).unwrap(); let mut result = (0..self.slots) .try_for_each(|_| per_slot_processing(&mut state, None, spec).map(|_| ()))