From f0991ee58a942b934afa7138bd48f335549548cc Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 26 Aug 2020 19:39:46 +1000 Subject: [PATCH 01/34] Experimental slasher implementation --- Cargo.lock | 50 ++ Cargo.toml | 2 + beacon_node/Cargo.toml | 1 + beacon_node/beacon_chain/Cargo.toml | 1 + .../src/attestation_verification.rs | 254 +++++++-- beacon_node/beacon_chain/src/beacon_chain.rs | 41 +- beacon_node/beacon_chain/src/builder.rs | 10 + beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/builder.rs | 33 ++ beacon_node/client/src/config.rs | 2 + beacon_node/src/cli.rs | 20 + beacon_node/src/config.rs | 9 + beacon_node/src/lib.rs | 30 + .../state_processing/src/verify_operation.rs | 4 + lighthouse/src/main.rs | 12 +- slasher/Cargo.toml | 32 ++ slasher/benches/blake2b.rs | 32 ++ slasher/src/array.rs | 513 ++++++++++++++++++ slasher/src/attestation_queue.rs | 90 +++ slasher/src/config.rs | 79 +++ slasher/src/database.rs | 196 +++++++ slasher/src/error.rs | 62 +++ slasher/src/lib.rs | 45 ++ slasher/src/slasher.rs | 168 ++++++ slasher/src/slasher_server.rs | 64 +++ slasher/tests/slasher_tests.rs | 236 ++++++++ 26 files changed, 1933 insertions(+), 54 deletions(-) create mode 100644 slasher/Cargo.toml create mode 100644 slasher/benches/blake2b.rs create mode 100644 slasher/src/array.rs create mode 100644 slasher/src/attestation_queue.rs create mode 100644 slasher/src/config.rs create mode 100644 slasher/src/database.rs create mode 100644 slasher/src/error.rs create mode 100644 slasher/src/lib.rs create mode 100644 slasher/src/slasher.rs create mode 100644 slasher/src/slasher_server.rs create mode 100644 slasher/tests/slasher_tests.rs diff --git a/Cargo.lock b/Cargo.lock index 878ee69468d..1c9a67d67b3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -424,6 +424,7 @@ dependencies = [ "serde_derive", "serde_json", "serde_yaml", + "slasher", "slog", "slog-term", "sloggers", @@ -465,6 +466,7 @@ dependencies = [ "node_test_rig", "rand 0.7.3", "serde", + "slasher", "slog", "slog-async", "slog-term", @@ -894,6 +896,7 @@ dependencies = [ "serde", "serde_derive", "serde_yaml", + "slasher", "slog", "slog-async", "sloggers", @@ -3223,6 +3226,28 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" +[[package]] +name = "lmdb" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0908efb5d6496aa977d96f91413da2635a902e5e31dbef0bfb88986c248539" +dependencies = [ + "bitflags 1.2.1", + "libc", + "lmdb-sys", +] + +[[package]] +name = "lmdb-sys" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5b392838cfe8858e86fac37cf97a0e8c55cc60ba0a18365cadc33092f128ce9" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + [[package]] name = "lock_api" version = "0.3.4" @@ -5090,6 +5115,31 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +[[package]] +name = "slasher" +version = "0.1.0" +dependencies = [ + "bincode", + "blake2b_simd", + "byte-slice-cast", + "criterion", + "environment", + "eth2_ssz", + "lmdb", + "parking_lot 0.11.0", + "rand 0.7.3", + "safe_arith", + "serde", + "serde_derive", + "slog", + "slog-term", + "slot_clock", + "tempdir", + "tokio 0.2.22", + "tree_hash", + "types", +] + [[package]] name = "slashing_protection" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index d15b23be682..089e09fb077 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,6 +63,8 @@ members = [ "lighthouse", "lighthouse/environment", + "slasher", + "testing/simulator", "testing/ef_tests", "testing/eth1_test_rig", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 57a3fd97775..97a2f895c90 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -43,3 +43,4 @@ clap_utils = { path = "../common/clap_utils" } hyper = "0.13.8" lighthouse_version = { path = "../common/lighthouse_version" } hex = "0.4.2" +slasher = { path = "../slasher" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index b249c172820..cb39f4b1ec7 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -61,3 +61,4 @@ derivative = "2.1.1" itertools = "0.9.0" regex = "1.3.9" exit-future = "0.2.0" +slasher = { path = "../../slasher" } diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 39436bcdb71..4c217c7df48 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -297,6 +297,61 @@ impl SignatureVerifiedAttestation for VerifiedUnaggregat } } +/// Information about invalid attestations which might still be slashable despite being invalid. +pub enum AttestationSlashInfo { + /// The attestation is invalid, but its signature wasn't checked. + SignatureNotChecked(Attestation, TErr), + /// As for `SignatureNotChecked`, but we know the `IndexedAttestation`. + SignatureNotCheckedIndexed(IndexedAttestation, TErr), + /// The attestation's signature is invalid, so it will never be slashable. + SignatureInvalid(TErr), + /// The signature is valid but the attestation is invalid in some other way. + SignatureValid(IndexedAttestation, TErr), +} + +fn process_slash_info( + slash_info: AttestationSlashInfo, + chain: &BeaconChain, +) -> Error { + use AttestationSlashInfo::*; + + if let Some(slasher) = chain.slasher.as_ref() { + let (indexed_attestation, err) = match slash_info { + // TODO(sproul): check signatures + // TODO: de-duplicate by attestation hash? + SignatureNotChecked(attestation, err) => { + match obtain_indexed_attestation_and_committees_per_slot(chain, &attestation) { + Ok((indexed, _)) => (indexed, err), + Err(e) => { + debug!( + chain.log, + "Unable to obtain indexed form of attestation for slasher"; + "attestation_root" => format!("{:?}", attestation.tree_hash_root()), + "error" => format!("{:?}", e) + ); + return err; + } + } + } + SignatureNotCheckedIndexed(indexed, err) => (indexed, err), + SignatureInvalid(e) => return e, + SignatureValid(indexed, err) => (indexed, err), + }; + + // Supply to slasher. + slasher.accept_attestation(indexed_attestation); + + err + } else { + match slash_info { + SignatureNotChecked(_, e) + | SignatureNotCheckedIndexed(_, e) + | SignatureInvalid(e) + | SignatureValid(_, e) => e, + } + } +} + impl VerifiedAggregatedAttestation { /// Returns `Ok(Self)` if the `signed_aggregate` is valid to be (re)published on the gossip /// network. @@ -304,6 +359,14 @@ impl VerifiedAggregatedAttestation { signed_aggregate: SignedAggregateAndProof, chain: &BeaconChain, ) -> Result { + Self::verify_slashable(signed_aggregate, chain) + .map_err(|slash_info| process_slash_info(slash_info, chain)) + } + + fn verify_early_checks( + signed_aggregate: &SignedAggregateAndProof, + chain: &BeaconChain, + ) -> Result { let attestation = &signed_aggregate.message.aggregate; // Ensure attestation is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (within a @@ -362,37 +425,19 @@ impl VerifiedAggregatedAttestation { // Ensure that the attestation has participants. if attestation.aggregation_bits.is_zero() { - return Err(Error::EmptyAggregationBitfield); + Err(Error::EmptyAggregationBitfield) + } else { + Ok(attestation_root) } + } - let indexed_attestation = - map_attestation_committee(chain, attestation, |(committee, _)| { - // Note: this clones the signature which is known to be a relatively slow operation. - // - // Future optimizations should remove this clone. - let selection_proof = - SelectionProof::from(signed_aggregate.message.selection_proof.clone()); - - if !selection_proof - .is_aggregator(committee.committee.len(), &chain.spec) - .map_err(|e| Error::BeaconChainError(e.into()))? - { - return Err(Error::InvalidSelectionProof { aggregator_index }); - } - - // Ensure the aggregator is a member of the committee for which it is aggregating. - if !committee.committee.contains(&(aggregator_index as usize)) { - return Err(Error::AggregatorNotInCommittee { aggregator_index }); - } - - get_indexed_attestation(committee.committee, &attestation) - .map_err(|e| BeaconChainError::from(e).into()) - })?; - - // Ensure that all signatures are valid. - if !verify_signed_aggregate_signatures(chain, &signed_aggregate, &indexed_attestation)? { - return Err(Error::InvalidSignature); - } + fn verify_late_checks( + signed_aggregate: &SignedAggregateAndProof, + attestation_root: Hash256, + chain: &BeaconChain, + ) -> Result<(), Error> { + let attestation = &signed_aggregate.message.aggregate; + let aggregator_index = signed_aggregate.message.aggregator_index; // Observe the valid attestation so we do not re-process it. // @@ -412,7 +457,7 @@ impl VerifiedAggregatedAttestation { // attestations processed at the same time could be published. if chain .observed_aggregators - .observe_validator(&attestation, aggregator_index as usize) + .observe_validator(attestation, aggregator_index as usize) .map_err(BeaconChainError::from)? { return Err(Error::PriorAttestationKnown { @@ -421,6 +466,68 @@ impl VerifiedAggregatedAttestation { }); } + Ok(()) + } + + // TODO(sproul): naming + pub fn verify_slashable( + signed_aggregate: SignedAggregateAndProof, + chain: &BeaconChain, + ) -> Result> { + use AttestationSlashInfo::*; + + let attestation = &signed_aggregate.message.aggregate; + let aggregator_index = signed_aggregate.message.aggregator_index; + let attestation_root = match Self::verify_early_checks(&signed_aggregate, chain) { + Ok(root) => root, + Err(e) => return Err(SignatureNotChecked(signed_aggregate.message.aggregate, e)), + }; + + let indexed_attestation = + match map_attestation_committee(chain, attestation, |(committee, _)| { + // Note: this clones the signature which is known to be a relatively slow operation. + // + // Future optimizations should remove this clone. + let selection_proof = + SelectionProof::from(signed_aggregate.message.selection_proof.clone()); + + if !selection_proof + .is_aggregator(committee.committee.len(), &chain.spec) + .map_err(|e| Error::BeaconChainError(e.into()))? + { + return Err(Error::InvalidSelectionProof { aggregator_index }); + } + + // Ensure the aggregator is a member of the committee for which it is aggregating. + if !committee.committee.contains(&(aggregator_index as usize)) { + return Err(Error::AggregatorNotInCommittee { aggregator_index }); + } + + get_indexed_attestation(committee.committee, attestation) + .map_err(|e| BeaconChainError::from(e).into()) + }) { + Ok(indexed_attestation) => indexed_attestation, + Err(e) => return Err(SignatureNotChecked(signed_aggregate.message.aggregate, e)), + }; + + // Ensure that all signatures are valid. + if let Err(e) = + verify_signed_aggregate_signatures(chain, &signed_aggregate, &indexed_attestation) + .and_then(|is_valid| { + if !is_valid { + Err(Error::InvalidSignature) + } else { + Ok(()) + } + }) + { + return Err(SignatureInvalid(e)); + } + + if let Err(e) = Self::verify_late_checks(&signed_aggregate, attestation_root, chain) { + return Err(SignatureValid(indexed_attestation, e)); + } + Ok(VerifiedAggregatedAttestation { signed_aggregate, indexed_attestation, @@ -444,16 +551,10 @@ impl VerifiedAggregatedAttestation { } impl VerifiedUnaggregatedAttestation { - /// Returns `Ok(Self)` if the `attestation` is valid to be (re)published on the gossip - /// network. - /// - /// `subnet_id` is the subnet from which we received this attestation. This function will - /// verify that it was received on the correct subnet. - pub fn verify( - attestation: Attestation, - subnet_id: Option, + pub fn verify_early_checks( + attestation: &Attestation, chain: &BeaconChain, - ) -> Result { + ) -> Result<(), Error> { let attestation_epoch = attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()); // Check the attestation's epoch matches its target. @@ -487,9 +588,16 @@ impl VerifiedUnaggregatedAttestation { // Check the attestation target root is consistent with the head root. verify_attestation_target_root::(&head_block, &attestation)?; - let (indexed_attestation, committees_per_slot) = - obtain_indexed_attestation_and_committees_per_slot(chain, &attestation)?; + Ok(()) + } + pub fn verify_middle_checks( + attestation: &Attestation, + indexed_attestation: &IndexedAttestation, + committees_per_slot: u64, + subnet_id: Option, + chain: &BeaconChain, + ) -> Result { let expected_subnet_id = SubnetId::compute_subnet_for_attestation_data::( &indexed_attestation.data, committees_per_slot, @@ -527,9 +635,14 @@ impl VerifiedUnaggregatedAttestation { }); } - // The aggregate signature of the attestation is valid. - verify_attestation_signature(chain, &indexed_attestation)?; + Ok(validator_index) + } + fn verify_late_checks( + attestation: &Attestation, + validator_index: u64, + chain: &BeaconChain, + ) -> Result<(), Error> { // Now that the attestation has been fully verified, store that we have received a valid // attestation from this validator. // @@ -546,6 +659,61 @@ impl VerifiedUnaggregatedAttestation { epoch: attestation.data.target.epoch, }); } + Ok(()) + } + + /// Returns `Ok(Self)` if the `attestation` is valid to be (re)published on the gossip + /// network. + /// + /// `subnet_id` is the subnet from which we received this attestation. This function will + /// verify that it was received on the correct subnet. + pub fn verify( + attestation: Attestation, + subnet_id: Option, + chain: &BeaconChain, + ) -> Result { + Self::verify_slashable(attestation, subnet_id, chain) + .map_err(|slash_info| process_slash_info(slash_info, chain)) + } + + pub fn verify_slashable( + attestation: Attestation, + subnet_id: SubnetId, + chain: &BeaconChain, + ) -> Result> { + use AttestationSlashInfo::*; + + if let Err(e) = Self::verify_early_checks(&attestation, chain) { + return Err(SignatureNotChecked(attestation, e)); + } + + let (indexed_attestation, committees_per_slot) = + match obtain_indexed_attestation_and_committees_per_slot(chain, &attestation) { + Ok(x) => x, + Err(e) => { + return Err(SignatureNotChecked(attestation, e)); + } + }; + + let validator_index = match Self::verify_middle_checks( + &attestation, + &indexed_attestation, + committees_per_slot, + subnet_id, + chain, + ) { + Ok(idx) => idx, + Err(e) => return Err(SignatureNotCheckedIndexed(indexed_attestation, e)), + }; + + // The aggregate signature of the attestation is valid. + if let Err(e) = verify_attestation_signature(chain, &indexed_attestation) { + return Err(SignatureInvalid(e)); + } + + if let Err(e) = Self::verify_late_checks(&attestation, validator_index, chain) { + return Err(SignatureValid(indexed_attestation, e)); + } Ok(Self { attestation, diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 1af92df7fa7..109671f33d4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -32,12 +32,14 @@ use futures::channel::mpsc::Sender; use itertools::process_results; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; +use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use state_processing::{ common::get_indexed_attestation, per_block_processing, - per_block_processing::errors::AttestationValidationError, per_slot_processing, - BlockSignatureStrategy, SigVerifiedOp, + per_block_processing::errors::AttestationValidationError, + per_block_processing::verify_attester_slashing, per_slot_processing, BlockSignatureStrategy, + SigVerifiedOp, VerifySignatures, }; use std::borrow::Cow; use std::cmp::Ordering; @@ -232,6 +234,8 @@ pub struct BeaconChain { pub(crate) log: Logger, /// Arbitrary bytes included in the blocks. pub(crate) graffiti: Graffiti, + /// Optional slasher. + pub(crate) slasher: Option>>, } type BeaconBlockAndState = (BeaconBlock, BeaconState); @@ -1084,6 +1088,38 @@ impl BeaconChain { Ok(signed_aggregate) } + fn ingest_slashings_to_op_pool(&self, state: &BeaconState) { + if let Some(slasher) = self.slasher.as_ref() { + let slashings = slasher.get_attester_slashings(); + debug!(self.log, "Ingesting {} slashings", slashings.len()); + for slashing in slashings { + if let Err(e) = + verify_attester_slashing(state, &slashing, VerifySignatures::True, &self.spec) + { + error!( + self.log, + "Slashing from slasher failed verification"; + "error" => format!("{:?}", e), + "slashing" => format!("{:?}", slashing), + ); + continue; + } + + // FIXME(sproul): remove `trust_me` + if let Err(e) = + self.import_attester_slashing(SigVerifiedOp::trust_me(slashing.clone())) + { + error!( + self.log, + "Slashing from slasher is invalid"; + "error" => format!("{:?}", e), + "slashing" => format!("{:?}", slashing), + ); + } + } + } + } + /// Check that the shuffling at `block_root` is equal to one of the shufflings of `state`. /// /// The `target_epoch` argument determines which shuffling to check compatibility with, it @@ -1730,6 +1766,7 @@ impl BeaconChain { state.latest_block_header.canonical_root() }; + self.ingest_slashings_to_op_pool(&state); let (proposer_slashings, attester_slashings) = self.op_pool.get_slashings(&state); let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index a251ced2d2f..8edcf4d316e 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -21,6 +21,7 @@ use fork_choice::ForkChoice; use futures::channel::mpsc::Sender; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; +use slasher::Slasher; use slog::{info, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; use std::marker::PhantomData; @@ -118,6 +119,7 @@ pub struct BeaconChainBuilder { disabled_forks: Vec, log: Option, graffiti: Graffiti, + slasher: Option>>, } impl @@ -166,6 +168,7 @@ where chain_config: ChainConfig::default(), log: None, graffiti: Graffiti::default(), + slasher: None, } } @@ -201,6 +204,12 @@ where self } + /// Sets the slasher. + pub fn slasher(mut self, slasher: Arc>) -> Self { + self.slasher = Some(slasher); + self + } + /// Sets the logger. /// /// Should generally be called early in the build chain. @@ -595,6 +604,7 @@ where .ok_or_else(|| "Cannot build without a shutdown sender.".to_string())?, log: log.clone(), graffiti: self.graffiti, + slasher: self.slasher.clone(), }; let head = beacon_chain diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 8114761aaed..e9a3469e54e 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -44,3 +44,4 @@ bus = "2.2.3" directory = {path = "../../common/directory"} http_api = { path = "../http_api" } http_metrics = { path = "../http_metrics" } +slasher = { path = "../../slasher" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 97d68f407b0..8040cafb4c8 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -17,6 +17,8 @@ use eth2_libp2p::NetworkGlobals; use genesis::{interop_genesis_state, Eth1GenesisService}; use network::{NetworkConfig, NetworkMessage, NetworkService}; use parking_lot::Mutex; +use slasher::Slasher; +use slasher::SlasherServer; use slog::{debug, info}; use ssz::Decode; use std::net::SocketAddr; @@ -65,6 +67,7 @@ pub struct ClientBuilder { http_api_config: http_api::Config, http_metrics_config: http_metrics::Config, websocket_listen_addr: Option, + slasher: Option>>, eth_spec_instance: T::EthSpec, } @@ -110,6 +113,7 @@ where http_api_config: <_>::default(), http_metrics_config: <_>::default(), websocket_listen_addr: None, + slasher: None, eth_spec_instance, } } @@ -126,6 +130,11 @@ where self } + pub fn slasher(mut self, slasher: Arc>) -> Self { + self.slasher = Some(slasher); + self + } + /// Initializes the `BeaconChainBuilder`. The `build_beacon_chain` method will need to be /// called later in order to actually instantiate the `BeaconChain`. pub async fn beacon_chain_builder( @@ -163,6 +172,12 @@ where .disabled_forks(disabled_forks) .graffiti(graffiti); + let builder = if let Some(slasher) = self.slasher.clone() { + builder.slasher(slasher) + } else { + builder + }; + let chain_exists = builder .store_contains_beacon_chain() .unwrap_or_else(|_| false); @@ -297,6 +312,24 @@ where self } + pub fn slasher_server(self) -> Result { + let context = self + .runtime_context + .as_ref() + .ok_or_else(|| "slasher requires a runtime_context")? + .service_context("slasher_server_ctxt".into()); + let slasher = self + .slasher + .clone() + .ok_or_else(|| "slasher server requires a slasher")?; + let slot_clock = self + .slot_clock + .clone() + .ok_or_else(|| "slasher server requires a slot clock")?; + SlasherServer::new(slasher, slot_clock, &context.executor); + Ok(self) + } + /// Immediately starts the service that periodically logs information each slot. pub fn notifier(self) -> Result { let context = self diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 0cf90d6b45d..a689722bcf2 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -67,6 +67,7 @@ pub struct Config { pub eth1: eth1::Config, pub http_api: http_api::Config, pub http_metrics: http_metrics::Config, + pub slasher: Option, } impl Default for Config { @@ -89,6 +90,7 @@ impl Default for Config { graffiti: Graffiti::default(), http_api: <_>::default(), http_metrics: <_>::default(), + slasher: None, } } } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index cb7ea121dc3..2941fabf55b 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -319,6 +319,26 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("NUM_SLOTS") .takes_value(true) .default_value("700") + ) + /* + * Slasher. + */ + .arg( + Arg::with_name("slasher") + .long("slasher") + .help( + "Run a slasher alongside the beacon node [EXPERIMENTAL]." + ) + .takes_value(false) + ) + .arg( + Arg::with_name("slasher-dir") + .long("slasher-dir") + .help( + "Set the slasher's database directory." + ) + .value_name("DIR") + .takes_value(true) ) .arg( Arg::with_name("wss-checkpoint") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 070c9973491..408a8b11b82 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -348,6 +348,15 @@ pub fn get_config( }; } + if cli_args.is_present("slasher") { + let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") { + PathBuf::from(slasher_dir) + } else { + client_config.data_dir.join("slasher_db") + }; + client_config.slasher = Some(slasher::Config::new(slasher_dir)); + } + Ok(client_config) } diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 86592cfc7a1..87197eef58d 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -18,8 +18,10 @@ use beacon_chain::{ }; use clap::ArgMatches; use environment::RuntimeContext; +use slasher::Slasher; use slog::{info, warn}; use std::ops::{Deref, DerefMut}; +use std::sync::Arc; use types::EthSpec; /// A type-alias to the tighten the definition of a production-intended `Client`. @@ -88,6 +90,16 @@ impl ProductionBeaconNode { .disk_store(&db_path, &freezer_db_path_res?, store_config)? .background_migrator()?; + let builder = if let Some(slasher_config) = client_config.slasher.clone() { + let slasher = Arc::new( + Slasher::open(slasher_config, log.new(slog::o!("service" => "slasher"))) + .map_err(|e| format!("Slasher open error: {:?}", e))?, + ); + builder.slasher(slasher) + } else { + builder + }; + let builder = builder .beacon_chain_builder(client_genesis, client_config_1) .await?; @@ -129,11 +141,29 @@ impl ProductionBeaconNode { .build_beacon_chain()? .network(&client_config.network) .await? +<<<<<<< HEAD .notifier()? .http_api_config(client_config.http_api.clone()) .http_metrics_config(client_config.http_metrics.clone()) .build() .map(Self) +======= + .notifier()?; + + let builder = if client_config.rest_api.enabled { + builder.http_server(&client_config, &http_eth2_config, events)? + } else { + builder + }; + + let builder = if client_config.slasher.is_some() { + builder.slasher_server()? + } else { + builder + }; + + Ok(Self(builder.build())) +>>>>>>> 039b06603... Experimental slasher implementation } pub fn into_inner(self) -> ProductionClient { diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index 6cc66aa814b..499802241fa 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -17,6 +17,10 @@ use types::{ pub struct SigVerifiedOp(T); impl SigVerifiedOp { + pub fn trust_me(t: T) -> Self { + SigVerifiedOp(t) + } + pub fn into_inner(self) -> T { self.0 } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 376381c27d3..6cae1829d8f 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -142,16 +142,10 @@ fn main() { Builder::from_env(Env::default()).init(); } - macro_rules! run_with_spec { - ($env_builder: expr) => { - run($env_builder, &matches) - }; - } - let result = match matches.value_of("spec") { - Some("minimal") => run_with_spec!(EnvironmentBuilder::minimal()), - Some("mainnet") => run_with_spec!(EnvironmentBuilder::mainnet()), - Some("interop") => run_with_spec!(EnvironmentBuilder::interop()), + Some("minimal") => run(EnvironmentBuilder::minimal(), &matches), + Some("mainnet") => run(EnvironmentBuilder::mainnet(), &matches), + Some("interop") => run(EnvironmentBuilder::interop(), &matches), spec => { // This path should be unreachable due to slog having a `default_value` unreachable!("Unknown spec configuration: {:?}", spec); diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml new file mode 100644 index 00000000000..a0e96e3ecd2 --- /dev/null +++ b/slasher/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "slasher" +version = "0.1.0" +authors = ["Michael Sproul "] +edition = "2018" + +[dependencies] +slot_clock = { path = "../common/slot_clock" } +bincode = "1.3.1" +blake2b_simd = "0.5.10" +byte-slice-cast = "0.3.5" +environment = { path = "../lighthouse/environment" } +eth2_ssz = { path = "../consensus/ssz" } +lmdb = "0.8" +parking_lot = "0.11.0" +rand = "0.7" +safe_arith = { path = "../consensus/safe_arith" } +serde = "1.0" +serde_derive = "1.0" +slog = "2.5.2" +tokio = { version = "0.2.21", features = ["full"] } +tree_hash = { path = "../consensus/tree_hash" } +types = { path = "../consensus/types" } + +[dev-dependencies] +criterion = "0.3" +tempdir = "0.3.7" +slog-term = "2.6.0" + +[[bench]] +name = "blake2b" +harness = false diff --git a/slasher/benches/blake2b.rs b/slasher/benches/blake2b.rs new file mode 100644 index 00000000000..8bdfdc6db3c --- /dev/null +++ b/slasher/benches/blake2b.rs @@ -0,0 +1,32 @@ +use blake2b_simd::{Hash, Params}; +use byte_slice_cast::AsByteSlice; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use rand::{thread_rng, Rng}; + +const CHUNK_SIZE: usize = 2048; +type Chunk = [u16; CHUNK_SIZE]; + +fn blake2b(data: &Chunk) -> Hash { + let mut params = Params::new(); + params.hash_length(16); + params.hash(data.as_byte_slice()) +} + +fn make_random_chunk() -> Chunk { + let mut chunk = [0; CHUNK_SIZE]; + thread_rng().fill(&mut chunk[..]); + chunk +} + +pub fn uniform_chunk(c: &mut Criterion) { + let chunk = [33; CHUNK_SIZE]; + c.bench_function("uniform_chunk", |b| b.iter(|| blake2b(&black_box(chunk)))); +} + +pub fn random_chunk(c: &mut Criterion) { + let chunk = make_random_chunk(); + c.bench_function("random_chunk", |b| b.iter(|| blake2b(&black_box(chunk)))); +} + +criterion_group!(benches, uniform_chunk, random_chunk); +criterion_main!(benches); diff --git a/slasher/src/array.rs b/slasher/src/array.rs new file mode 100644 index 00000000000..c2d1a03f966 --- /dev/null +++ b/slasher/src/array.rs @@ -0,0 +1,513 @@ +use crate::{Config, Error, SlasherDB, SlashingStatus}; +use lmdb::{RwTransaction, Transaction}; +use safe_arith::SafeArith; +use serde_derive::{Deserialize, Serialize}; +use std::collections::{btree_map::Entry, BTreeMap}; +use std::convert::TryFrom; +use std::sync::Arc; +use types::{AttesterSlashing, Epoch, EthSpec, IndexedAttestation}; + +pub const MAX_DISTANCE: u16 = u16::MAX; + +/// Terminology: +/// +/// Let +/// N = config.history_length +/// C = config.chunk_size +/// K = config.validator_chunk_size +/// +/// Then +/// +/// `chunk_index` in [0..N/C) is the column of a chunk in the 2D matrix +/// `validator_chunk_index` in [0..N/K) is the row of a chunk in the 2D matrix +/// `chunk_offset` in [0..C) is the horizontal (epoch) offset of a value within a 2D chunk +/// `validator_offset` in [0..K) is the vertical (validator) offset of a value within a 2D chunk +#[derive(Debug, Serialize, Deserialize)] +pub struct Chunk { + data: Vec, +} + +impl Chunk { + // TODO: write tests for epochs greater than length + pub fn get_target( + &self, + validator_index: u64, + epoch: Epoch, + config: &Config, + ) -> Result { + assert_eq!( + self.data.len(), + config.chunk_size * config.validator_chunk_size + ); + let validator_offset = config.validator_offset(validator_index); + let chunk_offset = config.chunk_offset(epoch); + let cell_index = config.cell_index(validator_offset, chunk_offset); + self.data + .get(cell_index) + .map(|distance| epoch + u64::from(*distance)) + .ok_or_else(|| Error::ChunkIndexOutOfBounds(cell_index)) + } + + pub fn set_target( + &mut self, + validator_index: u64, + epoch: Epoch, + target_epoch: Epoch, + config: &Config, + ) -> Result<(), Error> { + let validator_offset = config.validator_offset(validator_index); + let chunk_offset = config.chunk_offset(epoch); + let cell_index = config.cell_index(validator_offset, chunk_offset); + + let cell = self + .data + .get_mut(cell_index) + .ok_or_else(|| Error::ChunkIndexOutOfBounds(cell_index))?; + + *cell = Self::epoch_distance(target_epoch, epoch)?; + Ok(()) + } + + /// Compute the distance (difference) between two epochs. + /// + /// Error if the distance is greater than or equal to `MAX_DISTANCE`. + pub fn epoch_distance(epoch: Epoch, base_epoch: Epoch) -> Result { + let distance_u64 = epoch + .as_u64() + .checked_sub(base_epoch.as_u64()) + .ok_or(Error::DistanceCalculationOverflow)?; + + let distance = u16::try_from(distance_u64).map_err(|_| Error::DistanceTooLarge)?; + if distance < MAX_DISTANCE { + Ok(distance) + } else { + Err(Error::DistanceTooLarge) + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct MinTargetChunk { + chunk: Chunk, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct MaxTargetChunk { + chunk: Chunk, +} + +pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwned { + fn empty(config: &Config) -> Self; + + fn check_slashable( + &self, + db: &SlasherDB, + txn: &mut RwTransaction<'_>, + validator_index: u64, + attestation: &IndexedAttestation, + config: &Config, + ) -> Result, Error>; + + fn update( + &mut self, + chunk_index: usize, + validator_index: u64, + start_epoch: Epoch, + new_target_epoch: Epoch, + current_epoch: Epoch, + config: &Config, + ) -> Result; + + fn first_start_epoch(source_epoch: Epoch, current_epoch: Epoch) -> Option; + + fn next_chunk_index_and_start_epoch( + chunk_index: usize, + start_epoch: Epoch, + config: &Config, + ) -> Result<(usize, Epoch), Error>; + + fn select_db(db: &SlasherDB) -> lmdb::Database; + + fn load( + db: &SlasherDB, + txn: &mut RwTransaction<'_>, + validator_chunk_index: usize, + chunk_index: usize, + config: &Config, + ) -> Result, Error> { + let disk_key = config.disk_key(validator_chunk_index, chunk_index); + match txn.get(Self::select_db(db), &disk_key.to_be_bytes()) { + Ok(chunk_bytes) => Ok(Some(bincode::deserialize(chunk_bytes)?)), + Err(lmdb::Error::NotFound) => Ok(None), + Err(e) => Err(e.into()), + } + } + + fn store( + &self, + db: &SlasherDB, + txn: &mut RwTransaction<'_>, + validator_chunk_index: usize, + chunk_index: usize, + config: &Config, + ) -> Result<(), Error> { + let disk_key = config.disk_key(validator_chunk_index, chunk_index); + let value = bincode::serialize(self)?; + txn.put( + Self::select_db(db), + &disk_key.to_be_bytes(), + &value, + SlasherDB::::write_flags(), + )?; + Ok(()) + } +} + +impl TargetArrayChunk for MinTargetChunk { + fn empty(config: &Config) -> Self { + MinTargetChunk { + chunk: Chunk { + data: vec![MAX_DISTANCE; config.chunk_size * config.validator_chunk_size], + }, + } + } + + fn check_slashable( + &self, + db: &SlasherDB, + txn: &mut RwTransaction<'_>, + validator_index: u64, + attestation: &IndexedAttestation, + config: &Config, + ) -> Result, Error> { + let min_target = + self.chunk + .get_target(validator_index, attestation.data.source.epoch, config)?; + if attestation.data.target.epoch > min_target { + let attestation = db + .get_attestation_for_validator(txn, validator_index, min_target)? + .ok_or_else(|| Error::MissingAttesterRecord { + validator_index, + target_epoch: min_target, + })?; + Ok(SlashingStatus::SurroundsExisting(Box::new(attestation))) + } else { + Ok(SlashingStatus::NotSlashable) + } + } + + fn update( + &mut self, + chunk_index: usize, + validator_index: u64, + start_epoch: Epoch, + new_target_epoch: Epoch, + current_epoch: Epoch, + config: &Config, + ) -> Result { + let min_epoch = Epoch::from( + current_epoch + .as_usize() + .saturating_sub(config.history_length - 1), + ); + let mut epoch = start_epoch; + while config.chunk_index(epoch) == chunk_index { + if new_target_epoch < self.chunk.get_target(validator_index, epoch, config)? { + self.chunk + .set_target(validator_index, epoch, new_target_epoch, config)?; + } else { + // We can stop. + return Ok(false); + } + if epoch == min_epoch { + return Ok(false); + } + epoch -= 1; + } + // Continue to the next chunk. + assert_ne!(chunk_index, 0); + Ok(true) + } + + fn first_start_epoch(source_epoch: Epoch, _current_epoch: Epoch) -> Option { + if source_epoch > 0 { + Some(source_epoch - 1) + } else { + None + } + } + + fn next_chunk_index_and_start_epoch( + chunk_index: usize, + start_epoch: Epoch, + config: &Config, + ) -> Result<(usize, Epoch), Error> { + let chunk_size = config.chunk_size as u64; + Ok(( + chunk_index.safe_sub(1)?, + start_epoch / chunk_size * chunk_size - 1, + )) + } + + fn select_db(db: &SlasherDB) -> lmdb::Database { + db.min_targets_db + } +} + +impl TargetArrayChunk for MaxTargetChunk { + fn empty(config: &Config) -> Self { + MaxTargetChunk { + chunk: Chunk { + data: vec![0; config.chunk_size * config.validator_chunk_size], + }, + } + } + + fn check_slashable( + &self, + db: &SlasherDB, + txn: &mut RwTransaction<'_>, + validator_index: u64, + attestation: &IndexedAttestation, + config: &Config, + ) -> Result, Error> { + let max_target = + self.chunk + .get_target(validator_index, attestation.data.source.epoch, config)?; + if attestation.data.target.epoch < max_target { + let attestation = db + .get_attestation_for_validator(txn, validator_index, max_target)? + .ok_or_else(|| Error::MissingAttesterRecord { + validator_index, + target_epoch: max_target, + })?; + Ok(SlashingStatus::SurroundedByExisting(Box::new(attestation))) + } else { + Ok(SlashingStatus::NotSlashable) + } + } + + fn update( + &mut self, + chunk_index: usize, + validator_index: u64, + start_epoch: Epoch, + new_target_epoch: Epoch, + current_epoch: Epoch, + config: &Config, + ) -> Result { + let mut epoch = start_epoch; + while config.chunk_index(epoch) == chunk_index { + if new_target_epoch > self.chunk.get_target(validator_index, epoch, config)? { + self.chunk + .set_target(validator_index, epoch, new_target_epoch, config)?; + } else { + // We can stop. + return Ok(false); + } + if epoch == current_epoch { + return Ok(false); + } + epoch += 1; + } + // Continue to the next chunk. + Ok(true) + } + + fn first_start_epoch(source_epoch: Epoch, current_epoch: Epoch) -> Option { + if source_epoch < current_epoch { + Some(source_epoch + 1) + } else { + None + } + } + + // Go to next chunk, and first epoch of that chunk + fn next_chunk_index_and_start_epoch( + chunk_index: usize, + start_epoch: Epoch, + config: &Config, + ) -> Result<(usize, Epoch), Error> { + let chunk_size = config.chunk_size as u64; + Ok(( + chunk_index.safe_add(1)?, + (start_epoch / chunk_size + 1) * chunk_size, + )) + } + + fn select_db(db: &SlasherDB) -> lmdb::Database { + db.max_targets_db + } +} + +pub fn get_chunk_for_update<'a, E: EthSpec, T: TargetArrayChunk>( + db: &SlasherDB, + txn: &mut RwTransaction<'_>, + updated_chunks: &'a mut BTreeMap, + validator_chunk_index: usize, + chunk_index: usize, + config: &Config, +) -> Result<&'a mut T, Error> { + Ok(match updated_chunks.entry(chunk_index) { + Entry::Occupied(occupied) => occupied.into_mut(), + Entry::Vacant(vacant) => { + let chunk = if let Some(disk_chunk) = + T::load(db, txn, validator_chunk_index, chunk_index, config)? + { + disk_chunk + } else { + T::empty(config) + }; + vacant.insert(chunk) + } + }) +} + +pub fn apply_attestation_for_validator( + db: &SlasherDB, + txn: &mut RwTransaction<'_>, + updated_chunks: &mut BTreeMap, + validator_chunk_index: usize, + validator_index: u64, + attestation: &IndexedAttestation, + current_epoch: Epoch, + config: &Config, +) -> Result, Error> { + let mut chunk_index = config.chunk_index(attestation.data.source.epoch); + let mut current_chunk = get_chunk_for_update( + db, + txn, + updated_chunks, + validator_chunk_index, + chunk_index, + config, + )?; + + let slashing_status = + current_chunk.check_slashable(db, txn, validator_index, attestation, config)?; + + // TODO: consider removing this early return and updating the array + if slashing_status != SlashingStatus::NotSlashable { + return Ok(slashing_status); + } + + let mut start_epoch = if let Some(start_epoch) = + T::first_start_epoch(attestation.data.source.epoch, current_epoch) + { + start_epoch + } else { + return Ok(slashing_status); + }; + chunk_index = config.chunk_index(start_epoch); + + loop { + current_chunk = get_chunk_for_update( + db, + txn, + updated_chunks, + validator_chunk_index, + chunk_index, + config, + )?; + let keep_going = current_chunk.update( + chunk_index, + validator_index, + start_epoch, + attestation.data.target.epoch, + current_epoch, + config, + )?; + if !keep_going { + break; + } + + let (next_chunk_index, next_start_epoch) = + T::next_chunk_index_and_start_epoch(chunk_index, start_epoch, config)?; + chunk_index = next_chunk_index; + start_epoch = next_start_epoch; + } + + Ok(SlashingStatus::NotSlashable) +} + +pub fn update( + db: &SlasherDB, + txn: &mut RwTransaction<'_>, + validator_chunk_index: usize, + batch: Vec>>, + current_epoch: Epoch, + config: &Config, +) -> Result>, Error> { + // Split the batch up into horizontal segments. + // Map chunk indexes in the range `0..self.config.chunk_size` to attestations + // for those chunks. + let mut chunk_attestations = BTreeMap::new(); + for attestation in batch { + chunk_attestations + .entry(config.chunk_index(attestation.data.source.epoch)) + .or_insert_with(Vec::new) + .push(attestation); + } + + let mut slashings = update_array::<_, MinTargetChunk>( + db, + txn, + validator_chunk_index, + &chunk_attestations, + current_epoch, + config, + )?; + slashings.extend(update_array::<_, MaxTargetChunk>( + db, + txn, + validator_chunk_index, + &chunk_attestations, + current_epoch, + config, + )?); + Ok(slashings) +} + +pub fn update_array( + db: &SlasherDB, + txn: &mut RwTransaction<'_>, + validator_chunk_index: usize, + chunk_attestations: &BTreeMap>>>, + current_epoch: Epoch, + config: &Config, +) -> Result>, Error> { + let mut slashings = vec![]; + // Map from chunk index to updated chunk at that index. + let mut updated_chunks = BTreeMap::new(); + + for attestations in chunk_attestations.values() { + for attestation in attestations { + for validator_index in + config.attesting_validators_for_chunk(attestation, validator_chunk_index) + { + let slashing_status = apply_attestation_for_validator::( + db, + txn, + &mut updated_chunks, + validator_chunk_index, + validator_index, + attestation, + current_epoch, + config, + )?; + if let Some(slashing) = slashing_status.into_slashing(attestation) { + slashings.push(slashing); + } + } + } + } + + // Store chunks on disk. + for (chunk_index, chunk) in updated_chunks { + chunk.store(db, txn, validator_chunk_index, chunk_index, config)?; + } + + Ok(slashings) +} diff --git a/slasher/src/attestation_queue.rs b/slasher/src/attestation_queue.rs new file mode 100644 index 00000000000..5ed9adb3328 --- /dev/null +++ b/slasher/src/attestation_queue.rs @@ -0,0 +1,90 @@ +use parking_lot::{Mutex, RwLock}; +use std::collections::BTreeSet; +use std::sync::Arc; +use types::{EthSpec, IndexedAttestation}; + +/// Staging area for attestations received from the network. +/// +/// To be added to the database in batches, for efficiency and to prevent data races. +#[derive(Debug)] +pub struct AttestationQueue { + /// All attestations (unique) for storage on disk. + attestations_to_store: Mutex>>>, + /// Attestations group by validator index range. + pub(crate) subqueues: RwLock>>, + pub(crate) validators_per_chunk: usize, +} + +/// A queue of attestations for a range of validator indices. +#[derive(Debug)] +pub struct SubQueue { + pub(crate) attestations: Mutex>>>, +} + +impl SubQueue { + pub fn new() -> Self { + SubQueue { + attestations: Mutex::new(vec![]), + } + } + + /// Empty the queue. + pub fn take(&self) -> Vec>> { + std::mem::replace(&mut self.attestations.lock(), vec![]) + } + + pub fn len(&self) -> usize { + self.attestations.lock().len() + } +} + +impl AttestationQueue { + pub fn new(validators_per_chunk: usize) -> Self { + Self { + attestations_to_store: Mutex::new(vec![]), + subqueues: RwLock::new(vec![]), + validators_per_chunk, + } + } + + /// Add an attestation to all relevant queues, creating them if necessary. + pub fn queue(&self, attestation: IndexedAttestation) { + let attestation = Arc::new(attestation); + + self.attestations_to_store.lock().push(attestation.clone()); + + let subqueue_ids = attestation + .attesting_indices + .iter() + .map(|validator_index| *validator_index as usize / self.validators_per_chunk) + .collect::>(); + + if let Some(max_subqueue_id) = subqueue_ids.iter().max() { + if *max_subqueue_id >= self.subqueues.read().len() { + self.subqueues + .write() + .resize_with(max_subqueue_id + 1, SubQueue::new); + } + } + + for subqueue_id in subqueue_ids { + let subqueues_lock = self.subqueues.read(); + subqueues_lock[subqueue_id] + .attestations + .lock() + .push(attestation.clone()); + } + } + + pub fn get_attestations_to_store(&self) -> Vec>> { + std::mem::replace(&mut self.attestations_to_store.lock(), vec![]) + } + + /// Return `(num_queues, num_attestations)`. + pub fn stats(&self) -> (usize, usize) { + let subqueues = self.subqueues.read(); + let num_queues = subqueues.len(); + let num_attestations = subqueues.iter().map(SubQueue::len).sum(); + (num_queues, num_attestations) + } +} diff --git a/slasher/src/config.rs b/slasher/src/config.rs new file mode 100644 index 00000000000..d41c78c60b9 --- /dev/null +++ b/slasher/src/config.rs @@ -0,0 +1,79 @@ +use crate::Error; +use serde_derive::{Deserialize, Serialize}; +use std::path::PathBuf; +use types::{Epoch, EthSpec, IndexedAttestation}; + +pub const DEFAULT_CHUNK_SIZE: usize = 16; +pub const DEFAULT_VALIDATOR_CHUNK_SIZE: usize = 256; +pub const DEFAULT_HISTORY_LENGTH: usize = 54_000; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + pub database_path: PathBuf, + pub chunk_size: usize, + pub validator_chunk_size: usize, + /// Number of epochs of history to keep. + pub history_length: usize, +} + +impl Config { + pub fn new(database_path: PathBuf) -> Self { + Self { + database_path, + chunk_size: DEFAULT_CHUNK_SIZE, + validator_chunk_size: DEFAULT_VALIDATOR_CHUNK_SIZE, + history_length: DEFAULT_HISTORY_LENGTH, + } + } + + pub fn validate(&self) -> Result<(), Error> { + if self.history_length % self.chunk_size != 0 { + Err(Error::ConfigInvalidChunkSize { + chunk_size: self.chunk_size, + history_length: self.history_length, + }) + } else { + Ok(()) + } + } + + pub fn chunk_index(&self, epoch: Epoch) -> usize { + (epoch.as_usize() % self.history_length) / self.chunk_size + } + + pub fn validator_chunk_index(&self, validator_index: u64) -> usize { + validator_index as usize / self.validator_chunk_size + } + + pub fn chunk_offset(&self, epoch: Epoch) -> usize { + epoch.as_usize() % self.chunk_size + } + + pub fn validator_offset(&self, validator_index: u64) -> usize { + validator_index as usize % self.validator_chunk_size + } + + /// Map the validator and epoch chunk indexes into a single value for use as a database key. + pub fn disk_key(&self, validator_chunk_index: usize, chunk_index: usize) -> usize { + let width = self.history_length / self.chunk_size; + validator_chunk_index * width + chunk_index + } + + /// Map the validator and epoch offsets into an index for `Chunk::data`. + pub fn cell_index(&self, validator_offset: usize, chunk_offset: usize) -> usize { + validator_offset * self.chunk_size + chunk_offset + } + + /// Iterate over the attesting indices which belong to the `validator_chunk_index` chunk. + pub fn attesting_validators_for_chunk<'a, E: EthSpec>( + &'a self, + attestation: &'a IndexedAttestation, + validator_chunk_index: usize, + ) -> impl Iterator + 'a { + attestation + .attesting_indices + .iter() + .filter(move |v| self.validator_chunk_index(**v) == validator_chunk_index) + .copied() + } +} diff --git a/slasher/src/database.rs b/slasher/src/database.rs new file mode 100644 index 00000000000..3d787b71e8b --- /dev/null +++ b/slasher/src/database.rs @@ -0,0 +1,196 @@ +use crate::{Config, Error, SlashingStatus}; +use lmdb::{Database, DatabaseFlags, Environment, RwTransaction, Transaction, WriteFlags}; +use ssz::{Decode, Encode}; +use std::marker::PhantomData; +use std::sync::Arc; +use tree_hash::TreeHash; +use types::{Epoch, EthSpec, Hash256, IndexedAttestation}; + +/// Map from `(validator_index, target_epoch)` to `indexed_attestation_hash`. +const ATTESTER_DB: &str = "attester"; +/// Map from `indexed_attestation_hash` to `IndexedAttestation`. +const INDEXED_ATTESTATION_DB: &str = "indexed_attestations"; +const MIN_TARGETS_DB: &str = "min_targets"; +const MAX_TARGETS_DB: &str = "max_targets"; + +/// The number of DBs for LMDB to use (equal to the number of DBs defined above). +const LMDB_MAX_DBS: u32 = 4; +/// The size of the in-memory map for LMDB (larger than the maximum size of the database). +const LMDB_MAP_SIZE: usize = 256 * (1 << 30); // 256GiB + +const ATTESTER_KEY_SIZE: usize = 16; + +#[derive(Debug)] +pub struct SlasherDB { + pub(crate) env: Environment, + pub(crate) indexed_attestation_db: Database, + pub(crate) attester_db: Database, + pub(crate) min_targets_db: Database, + pub(crate) max_targets_db: Database, + config: Arc, + _phantom: PhantomData, +} + +#[derive(Debug)] +pub struct AttesterKey { + data: [u8; ATTESTER_KEY_SIZE], +} + +impl AttesterKey { + pub fn new(validator_index: u64, target_epoch: Epoch, config: &Config) -> Self { + let mut data = [0; ATTESTER_KEY_SIZE]; + let epoch_offset = target_epoch.as_usize() % config.history_length; + data[0..8].copy_from_slice(&validator_index.to_be_bytes()); + data[8..ATTESTER_KEY_SIZE].copy_from_slice(&epoch_offset.to_be_bytes()); + AttesterKey { data } + } +} + +impl AsRef<[u8]> for AttesterKey { + fn as_ref(&self) -> &[u8] { + &self.data + } +} + +impl SlasherDB { + pub fn open(config: Arc) -> Result { + // TODO: open_with_permissions + std::fs::create_dir_all(&config.database_path)?; + let env = Environment::new() + .set_max_dbs(LMDB_MAX_DBS) + .set_map_size(LMDB_MAP_SIZE) + .open(&config.database_path)?; + let indexed_attestation_db = + env.create_db(Some(INDEXED_ATTESTATION_DB), Self::db_flags())?; + let attester_db = env.create_db(Some(ATTESTER_DB), Self::db_flags())?; + let min_targets_db = env.create_db(Some(MIN_TARGETS_DB), Self::db_flags())?; + let max_targets_db = env.create_db(Some(MAX_TARGETS_DB), Self::db_flags())?; + Ok(Self { + env, + indexed_attestation_db, + attester_db, + min_targets_db, + max_targets_db, + config, + _phantom: PhantomData, + }) + } + + pub fn db_flags() -> DatabaseFlags { + DatabaseFlags::default() + } + + pub fn write_flags() -> WriteFlags { + WriteFlags::default() + } + + pub fn begin_rw_txn(&self) -> Result, Error> { + Ok(self.env.begin_rw_txn()?) + } + + pub fn store_indexed_attestation( + &self, + txn: &mut RwTransaction<'_>, + indexed_attestation: &IndexedAttestation, + ) -> Result<(), Error> { + let indexed_attestation_hash = indexed_attestation.tree_hash_root(); + let data = indexed_attestation.as_ssz_bytes(); + + txn.put( + self.indexed_attestation_db, + &indexed_attestation_hash.as_bytes(), + &data, + Self::write_flags(), + )?; + Ok(()) + } + + pub fn get_indexed_attestation( + &self, + txn: &mut RwTransaction<'_>, + indexed_attestation_hash: Hash256, + ) -> Result, Error> { + match txn.get(self.indexed_attestation_db, &indexed_attestation_hash) { + Ok(bytes) => Ok(IndexedAttestation::from_ssz_bytes(bytes)?), + Err(lmdb::Error::NotFound) => Err(Error::MissingIndexedAttestation { + root: indexed_attestation_hash, + }), + Err(e) => Err(e.into()), + } + } + + pub fn check_and_update_attester_record( + &self, + txn: &mut RwTransaction<'_>, + validator_index: u64, + attestation: &IndexedAttestation, + indexed_attestation_hash: Hash256, + ) -> Result, Error> { + // See if there's an existing indexed attestation for this attester. + if let Some(existing_hash) = self.get_attestation_hash_for_validator( + txn, + validator_index, + attestation.data.target.epoch, + )? { + // If the existing indexed attestation is identical, then this attestation is not + // slashable and no update is required. + if existing_hash == indexed_attestation_hash { + return Ok(SlashingStatus::NotSlashable); + } + + // Otherwise, load the indexed attestation so we can check if it's slashable against + // the new one. + let existing_attestation = self.get_indexed_attestation(txn, existing_hash)?; + if attestation.is_double_vote(&existing_attestation) { + Ok(SlashingStatus::DoubleVote(Box::new(existing_attestation))) + } else { + Ok(SlashingStatus::NotSlashable) + } + } + // If no indexed attestation exists, insert one for this attester. + else { + txn.put( + self.attester_db, + &AttesterKey::new(validator_index, attestation.data.target.epoch, &self.config), + &indexed_attestation_hash, + Self::write_flags(), + )?; + Ok(SlashingStatus::NotSlashable) + } + } + + pub fn get_attestation_for_validator( + &self, + txn: &mut RwTransaction<'_>, + validator_index: u64, + target: Epoch, + ) -> Result>, Error> { + if let Some(hash) = self.get_attestation_hash_for_validator(txn, validator_index, target)? { + Ok(Some(self.get_indexed_attestation(txn, hash)?)) + } else { + Ok(None) + } + } + + pub fn get_attestation_hash_for_validator( + &self, + txn: &mut RwTransaction<'_>, + validator_index: u64, + target: Epoch, + ) -> Result, Error> { + let attester_key = AttesterKey::new(validator_index, target, &self.config); + match txn.get(self.attester_db, &attester_key) { + Ok(hash_bytes) => Ok(Some(hash256_from_slice(hash_bytes)?)), + Err(lmdb::Error::NotFound) => Ok(None), + Err(e) => Err(e.into()), + } + } +} + +fn hash256_from_slice(data: &[u8]) -> Result { + if data.len() == 32 { + Ok(Hash256::from_slice(data)) + } else { + Err(Error::AttesterRecordCorrupt { length: data.len() }) + } +} diff --git a/slasher/src/error.rs b/slasher/src/error.rs new file mode 100644 index 00000000000..1a9e23ebf97 --- /dev/null +++ b/slasher/src/error.rs @@ -0,0 +1,62 @@ +use std::io; +use types::{Epoch, Hash256}; + +#[derive(Debug)] +pub enum Error { + DatabaseError(lmdb::Error), + DatabaseIOError(io::Error), + SszDecodeError(ssz::DecodeError), + BincodeError(bincode::Error), + ArithError(safe_arith::ArithError), + ChunkIndexOutOfBounds(usize), + ConfigInvalidChunkSize { + chunk_size: usize, + history_length: usize, + }, + DistanceTooLarge, + DistanceCalculationOverflow, + /// Missing an attester record that we expected to exist. + MissingAttesterRecord { + validator_index: u64, + target_epoch: Epoch, + }, + AttesterRecordCorrupt { + length: usize, + }, + MissingIndexedAttestation { + root: Hash256, + }, +} + +impl From for Error { + fn from(e: lmdb::Error) -> Self { + match e { + lmdb::Error::Other(os_error) => Error::from(io::Error::from_raw_os_error(os_error)), + _ => Error::DatabaseError(e), + } + } +} + +impl From for Error { + fn from(e: io::Error) -> Self { + Error::DatabaseIOError(e) + } +} + +impl From for Error { + fn from(e: ssz::DecodeError) -> Self { + Error::SszDecodeError(e) + } +} + +impl From for Error { + fn from(e: bincode::Error) -> Self { + Error::BincodeError(e) + } +} + +impl From for Error { + fn from(e: safe_arith::ArithError) -> Self { + Error::ArithError(e) + } +} diff --git a/slasher/src/lib.rs b/slasher/src/lib.rs new file mode 100644 index 00000000000..dfecf3a593e --- /dev/null +++ b/slasher/src/lib.rs @@ -0,0 +1,45 @@ +#![deny(missing_debug_implementations)] + +mod array; +mod attestation_queue; +pub mod config; +mod database; +mod error; +mod slasher; +mod slasher_server; + +pub use crate::slasher::Slasher; +pub use attestation_queue::AttestationQueue; +pub use config::Config; +pub use database::SlasherDB; +pub use error::Error; +pub use slasher_server::SlasherServer; + +use types::{AttesterSlashing, EthSpec, IndexedAttestation}; + +#[derive(Debug, PartialEq)] +pub enum SlashingStatus { + NotSlashable, + DoubleVote(Box>), + SurroundsExisting(Box>), + SurroundedByExisting(Box>), +} + +impl SlashingStatus { + pub fn into_slashing( + self, + new_attestation: &IndexedAttestation, + ) -> Option> { + use SlashingStatus::*; + + match self { + NotSlashable => None, + DoubleVote(existing) | SurroundsExisting(existing) | SurroundedByExisting(existing) => { + Some(AttesterSlashing { + attestation_1: *existing, + attestation_2: new_attestation.clone(), + }) + } + } + } +} diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs new file mode 100644 index 00000000000..35ce44e4df4 --- /dev/null +++ b/slasher/src/slasher.rs @@ -0,0 +1,168 @@ +use crate::{array, AttestationQueue, Config, Error, SlasherDB}; +use lmdb::{RwTransaction, Transaction}; +use parking_lot::Mutex; +use slog::{debug, error, info, Logger}; +use std::sync::Arc; +use tree_hash::TreeHash; +use types::{AttesterSlashing, Epoch, EthSpec, IndexedAttestation}; + +#[derive(Debug)] +pub struct Slasher { + db: SlasherDB, + pub(crate) attestation_queue: AttestationQueue, + // TODO: consider using a set + attester_slashings: Mutex>>, + // TODO: consider removing Arc + config: Arc, + pub(crate) log: Logger, +} + +impl Slasher { + pub fn open(config: Config, log: Logger) -> Result { + config.validate()?; + let config = Arc::new(config); + let db = SlasherDB::open(config.clone())?; + let attester_slashings = Mutex::new(vec![]); + let attestation_queue = AttestationQueue::new(config.validator_chunk_size); + Ok(Self { + db, + attester_slashings, + attestation_queue, + config, + log, + }) + } + + pub fn get_attester_slashings(&self) -> Vec> { + std::mem::replace(&mut self.attester_slashings.lock(), vec![]) + } + + pub fn config(&self) -> &Config { + &self.config + } + + /// Accept an attestation from the network and queue it for processing. + pub fn accept_attestation(&self, attestation: IndexedAttestation) { + self.attestation_queue.queue(attestation); + } + + /// Apply queued attestations to the on-disk database. + pub fn process_attestations(&self, current_epoch: Epoch) -> Result<(), Error> { + let mut txn = self.db.begin_rw_txn()?; + + // Insert attestations into database. + for attestation in self.attestation_queue.get_attestations_to_store() { + self.db.store_indexed_attestation(&mut txn, &attestation)?; + } + + // Dequeue attestations in batches and process them. + let subqueues_lock = self.attestation_queue.subqueues.read(); + for (subqueue_id, subqueue) in subqueues_lock.iter().enumerate() { + let batch = subqueue.take(); + self.process_batch(&mut txn, subqueue_id, batch, current_epoch); + } + txn.commit()?; + Ok(()) + } + + /// Process a batch of attestations for a range of validator indices. + fn process_batch( + &self, + txn: &mut RwTransaction<'_>, + subqueue_id: usize, + batch: Vec>>, + current_epoch: Epoch, + ) { + // First, check for double votes. + for attestation in &batch { + match self.check_double_votes(txn, subqueue_id, &attestation) { + Ok(slashings) => { + if !slashings.is_empty() { + info!( + self.log, + "Found {} new double-vote slashings!", + slashings.len() + ); + } + self.attester_slashings.lock().extend(slashings); + } + Err(e) => { + error!( + self.log, + "Error checking for double votes"; + "error" => format!("{:?}", e) + ); + } + } + } + + // Then check for surrounds using the min-max arrays. + match array::update( + &self.db, + txn, + subqueue_id, + batch, + current_epoch, + &self.config, + ) { + Ok(slashings) => { + if !slashings.is_empty() { + info!( + self.log, + "Found {} new surround slashings!", + slashings.len() + ); + } + self.attester_slashings.lock().extend(slashings); + } + Err(e) => { + error!( + self.log, + "Error processing array update"; + "error" => format!("{:?}", e), + ); + } + } + } + + /// Check for double votes from all validators on `attestation` who match the `subqueue_id`. + fn check_double_votes( + &self, + txn: &mut RwTransaction<'_>, + subqueue_id: usize, + attestation: &IndexedAttestation, + ) -> Result>, Error> { + let indexed_attestation_hash = attestation.tree_hash_root(); + + let mut slashings = vec![]; + + for validator_index in self + .config + .attesting_validators_for_chunk(attestation, subqueue_id) + { + let slashing_status = self.db.check_and_update_attester_record( + txn, + validator_index, + &attestation, + indexed_attestation_hash, + )?; + + if let Some(slashing) = slashing_status.into_slashing(attestation) { + debug!( + self.log, + "Found double-vote slashing"; + "validator_index" => validator_index, + "epoch" => slashing.attestation_1.data.target.epoch, + ); + + // Avoid creating duplicate slashings for the same attestation. + // PERF: this is O(n) instead of O(1), but n should be small. + if !slashings.contains(&slashing) { + slashings.push(slashing); + } + } + } + + Ok(slashings) + } +} diff --git a/slasher/src/slasher_server.rs b/slasher/src/slasher_server.rs new file mode 100644 index 00000000000..6029f3f071a --- /dev/null +++ b/slasher/src/slasher_server.rs @@ -0,0 +1,64 @@ +use crate::Slasher; +use environment::TaskExecutor; +use slog::{debug, error, info, trace}; +use slot_clock::SlotClock; +use std::sync::Arc; +use tokio::stream::StreamExt; +use tokio::time::{interval_at, Duration, Instant}; +use types::EthSpec; + +#[derive(Debug)] +pub struct SlasherServer; + +impl SlasherServer { + pub fn new( + slasher: Arc>, + slot_clock: C, + executor: &TaskExecutor, + ) { + info!(slasher.log, "Starting slasher to detect misbehaviour"); + let sub_executor = executor.clone(); + executor.spawn( + async move { + // FIXME: read slot time from config, align to some fraction of each slot + let slot_clock = Arc::new(slot_clock); + let mut interval = interval_at(Instant::now(), Duration::from_secs(12)); + while interval.next().await.is_some() { + let slot_clock = slot_clock.clone(); + let slasher = slasher.clone(); + sub_executor.spawn_blocking( + move || { + if let Some(current_slot) = slot_clock.now() { + let t = Instant::now(); + let current_epoch = current_slot.epoch(E::slots_per_epoch()); + let (num_validator_chunks, num_attestations) = + slasher.attestation_queue.stats(); + if let Err(e) = slasher.process_attestations(current_epoch) { + error!( + slasher.log, + "Error during scheduled slasher processing"; + "error" => format!("{:?}", e) + ); + } + debug!( + slasher.log, + "Completed slasher update"; + "time_taken" => format!("{}ms", t.elapsed().as_millis()), + "num_attestations" => num_attestations, + "num_validator_chunks" => num_validator_chunks, + ); + } else { + trace!( + slasher.log, + "Slasher has nothing to do: we are pre-genesis" + ); + } + }, + "slasher_server_process_attestations", + ); + } + }, + "slasher_server", + ); + } +} diff --git a/slasher/tests/slasher_tests.rs b/slasher/tests/slasher_tests.rs new file mode 100644 index 00000000000..882dcbe8c71 --- /dev/null +++ b/slasher/tests/slasher_tests.rs @@ -0,0 +1,236 @@ +use slasher::{config::DEFAULT_CHUNK_SIZE, Config, Slasher}; +use slog::{o, Drain, Logger}; +use tempdir::TempDir; +use types::{ + AggregateSignature, AttestationData, AttesterSlashing, Checkpoint, Epoch, Hash256, + IndexedAttestation, MainnetEthSpec, Slot, +}; + +type E = MainnetEthSpec; + +fn indexed_att( + attesting_indices: impl AsRef<[u64]>, + source_epoch: u64, + target_epoch: u64, + target_root: u64, +) -> IndexedAttestation { + IndexedAttestation { + attesting_indices: attesting_indices.as_ref().to_vec().into(), + data: AttestationData { + slot: Slot::new(0), + index: 0, + beacon_block_root: Hash256::zero(), + source: Checkpoint { + epoch: Epoch::new(source_epoch), + root: Hash256::from_low_u64_be(0), + }, + target: Checkpoint { + epoch: Epoch::new(target_epoch), + root: Hash256::from_low_u64_be(target_root), + }, + }, + signature: AggregateSignature::empty(), + } +} + +fn att_slashing( + attestation_1: &IndexedAttestation, + attestation_2: &IndexedAttestation, +) -> AttesterSlashing { + AttesterSlashing { + attestation_1: attestation_1.clone(), + attestation_2: attestation_2.clone(), + } +} + +#[test] +fn double_vote_single_val() { + let v = vec![99]; + let att1 = indexed_att(&v, 0, 1, 0); + let att2 = indexed_att(&v, 0, 1, 1); + let slashings = vec![att_slashing(&att1, &att2)]; + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &slashings, 1); + slasher_test_indiv(&attestations, &slashings, 1000); +} + +#[test] +fn double_vote_multi_vals() { + let v = vec![0, 1, 2]; + let att1 = indexed_att(&v, 0, 1, 0); + let att2 = indexed_att(&v, 0, 1, 1); + let slashings = vec![att_slashing(&att1, &att2)]; + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &slashings, 1); + slasher_test_indiv(&attestations, &slashings, 1000); +} + +// A subset of validators double vote. +#[test] +fn double_vote_some_vals() { + let v1 = vec![0, 1, 2, 3, 4, 5, 6]; + let v2 = vec![0, 2, 4, 6]; + let att1 = indexed_att(&v1, 0, 1, 0); + let att2 = indexed_att(&v2, 0, 1, 1); + let slashings = vec![att_slashing(&att1, &att2)]; + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &slashings, 1); + slasher_test_indiv(&attestations, &slashings, 1000); +} + +// A subset of validators double vote, others vote twice for the same thing. +#[test] +fn double_vote_some_vals_repeat() { + let v1 = vec![0, 1, 2, 3, 4, 5, 6]; + let v2 = vec![0, 2, 4, 6]; + let v3 = vec![1, 3, 5]; + let att1 = indexed_att(&v1, 0, 1, 0); + let att2 = indexed_att(&v2, 0, 1, 1); + let att3 = indexed_att(&v3, 0, 1, 0); + let slashings = vec![att_slashing(&att1, &att2)]; + let attestations = vec![att1, att2, att3]; + slasher_test_indiv(&attestations, &slashings, 1); + slasher_test_indiv(&attestations, &slashings, 1000); +} + +// Nobody double votes, nobody gets slashed. +#[test] +fn no_double_vote_same_target() { + let v1 = vec![0, 1, 2, 3, 4, 5, 6]; + let v2 = vec![0, 1, 2, 3, 4, 5, 7, 8]; + let att1 = indexed_att(&v1, 0, 1, 0); + let att2 = indexed_att(&v2, 0, 1, 0); + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &[], 1); + slasher_test_indiv(&attestations, &[], 1000); +} + +// Two groups votes for different things, no slashings. +#[test] +fn no_double_vote_distinct_vals() { + let v1 = vec![0, 1, 2, 3]; + let v2 = vec![4, 5, 6, 7]; + let att1 = indexed_att(&v1, 0, 1, 0); + let att2 = indexed_att(&v2, 0, 1, 1); + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &[], 1); + slasher_test_indiv(&attestations, &[], 1000); +} + +#[test] +fn surrounds_existing_single_val_single_chunk() { + let v = vec![0]; + let att1 = indexed_att(&v, 1, 2, 0); + let att2 = indexed_att(&v, 0, 3, 0); + let slashings = vec![att_slashing(&att1, &att2)]; + slasher_test_indiv(&[att1, att2], &slashings, 3); +} + +/* FIXME: refactor these tests +#[test] +fn surrounds_existing_multi_vals_single_chunk() { + let v = vec![0]; + let att1 = indexed_att(&v, 1, 2, 0); + let att2 = indexed_att(&v, 0, 3, 0); + let slashings = vec![att_slashing(&att1, &att2)]; + slasher_test_indiv(&[att1, att2], &slashings, 3); + let validators = vec![0, 16, 1024, 300_000, 300_001]; + let att1 = indexed_att(validators.clone(), 1, 2, 0); + let att2 = indexed_att(validators.clone(), 0, 3, 0); + + slasher.accept_attestation(att1); + slasher.process_attestations(); + slasher.accept_attestation(att2); + slasher.process_attestations(); +} + + +#[test] +fn surrounds_existing_many_chunks() { + let v = vec![0]; + let chunk_size = Config::default().chunk_size as u64; + let att1 = indexed_att(&v, 3 * chunk_size, 3 * chunk_size + 1, 0); + let att2 = indexed_att(&v, 0, 3 * chunk_size + 2, 0); + let slashings = vec![att_slashing(&att1, &att2)]; + let attestations = vec![att1, att2]; + slasher_test(&attestations, &slashings, 4 * chunk_size, |_| true); +} +*/ + +#[test] +fn surrounded_by_single_val_single_chunk() { + let v = vec![0]; + let att1 = indexed_att(&v, 0, 15, 0); + let att2 = indexed_att(&v, 1, 14, 0); + let slashings = vec![att_slashing(&att1, &att2)]; + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &slashings, 15); +} + +#[test] +fn surrounded_by_single_val_multi_chunk() { + let v = vec![0]; + let chunk_size = DEFAULT_CHUNK_SIZE as u64; + let att1 = indexed_att(&v, 0, 3 * chunk_size, 0); + let att2 = indexed_att(&v, chunk_size, chunk_size + 1, 0); + let slashings = vec![att_slashing(&att1, &att2)]; + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &slashings, 3 * chunk_size); + slasher_test_indiv(&attestations, &slashings, 4 * chunk_size); +} + +/* +fn slasher_tests(attestations: &[IndexedAttestation], expected: &[AttesterSlashing]) { + // Process after every attestation. + // slasher_test(attestations, expected, |_| true); + // Process only at the end. + slasher_test(attestations, expected, |_| false); + // Process every second attestation. + // slasher_test(attestations, expected, |i| i % 2 == 0); +} +*/ + +// Process each attestation individually, and confirm that the slashings produced are as expected. +fn slasher_test_indiv( + attestations: &[IndexedAttestation], + expected: &[AttesterSlashing], + current_epoch: u64, +) { + slasher_test(attestations, expected, current_epoch, |_| true); +} + +// FIXME(sproul): move this somewhere else +fn logger() -> Logger { + let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); + let drain = slog_term::FullFormat::new(decorator).build(); + Logger::root(Box::new(std::sync::Mutex::new(drain)).fuse(), o!()) +} + +fn slasher_test( + attestations: &[IndexedAttestation], + expected: &[AttesterSlashing], + current_epoch: u64, + should_process_after: impl Fn(usize) -> bool, +) { + let tempdir = TempDir::new("slasher").unwrap(); + let config = Config::new(tempdir.path().into()); + let slasher = Slasher::open(config, logger()).unwrap(); + let current_epoch = Epoch::new(current_epoch); + + for (i, attestation) in attestations.iter().enumerate() { + slasher.accept_attestation(attestation.clone()); + + if should_process_after(i) { + slasher.process_attestations(current_epoch).unwrap(); + } + } + slasher.process_attestations(current_epoch).unwrap(); + + let slashings = slasher.get_attester_slashings(); + + for (i, slashing) in expected.iter().enumerate() { + assert_eq!(*slashing, slashings[i], "slashing {} should match", i); + } + + assert_eq!(expected, &slashings[..]); +} From ca78ffc1b80e709558f7fa5ad11c45072e075e32 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 27 Aug 2020 11:37:13 +1000 Subject: [PATCH 02/34] Some new tests --- Cargo.lock | 1 + slasher/Cargo.toml | 5 +++-- slasher/tests/slasher_tests.rs | 41 ++++++++++++++++++++++++++++++++++ 3 files changed, 45 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1c9a67d67b3..1347ecafa68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5128,6 +5128,7 @@ dependencies = [ "lmdb", "parking_lot 0.11.0", "rand 0.7.3", + "rayon", "safe_arith", "serde", "serde_derive", diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index a0e96e3ecd2..501d39d1769 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -5,7 +5,6 @@ authors = ["Michael Sproul "] edition = "2018" [dependencies] -slot_clock = { path = "../common/slot_clock" } bincode = "1.3.1" blake2b_simd = "0.5.10" byte-slice-cast = "0.3.5" @@ -18,14 +17,16 @@ safe_arith = { path = "../consensus/safe_arith" } serde = "1.0" serde_derive = "1.0" slog = "2.5.2" +slot_clock = { path = "../common/slot_clock" } tokio = { version = "0.2.21", features = ["full"] } tree_hash = { path = "../consensus/tree_hash" } types = { path = "../consensus/types" } [dev-dependencies] criterion = "0.3" -tempdir = "0.3.7" +rayon = "1.3.0" slog-term = "2.6.0" +tempdir = "0.3.7" [[bench]] name = "blake2b" diff --git a/slasher/tests/slasher_tests.rs b/slasher/tests/slasher_tests.rs index 882dcbe8c71..bb2738169ce 100644 --- a/slasher/tests/slasher_tests.rs +++ b/slasher/tests/slasher_tests.rs @@ -1,3 +1,4 @@ +use rayon::prelude::*; use slasher::{config::DEFAULT_CHUNK_SIZE, Config, Slasher}; use slog::{o, Drain, Logger}; use tempdir::TempDir; @@ -117,6 +118,17 @@ fn no_double_vote_distinct_vals() { slasher_test_indiv(&attestations, &[], 1000); } +#[test] +fn no_double_vote_repeated() { + let v = vec![0, 1, 2, 3, 4]; + let att1 = indexed_att(&v, 0, 1, 0); + let att2 = att1.clone(); + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &[], 1); + slasher_test_batch(&attestations, &[], 1); + parallel_slasher_test(&attestations, vec![], 1); +} + #[test] fn surrounds_existing_single_val_single_chunk() { let v = vec![0]; @@ -199,6 +211,15 @@ fn slasher_test_indiv( slasher_test(attestations, expected, current_epoch, |_| true); } +// Process all attestations in one batch. +fn slasher_test_batch( + attestations: &[IndexedAttestation], + expected: &[AttesterSlashing], + current_epoch: u64, +) { + slasher_test(attestations, expected, current_epoch, |_| false); +} + // FIXME(sproul): move this somewhere else fn logger() -> Logger { let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); @@ -234,3 +255,23 @@ fn slasher_test( assert_eq!(expected, &slashings[..]); } + +fn parallel_slasher_test( + attestations: &[IndexedAttestation], + // TODO(sproul): check slashed validators + _slashed_validators: Vec, + current_epoch: u64, +) { + let tempdir = TempDir::new("slasher").unwrap(); + let config = Config::new(tempdir.path().into()); + let slasher = Slasher::open(config, logger()).unwrap(); + let current_epoch = Epoch::new(current_epoch); + + attestations + .into_par_iter() + .try_for_each(|attestation| { + slasher.accept_attestation(attestation.clone()); + slasher.process_attestations(current_epoch) + }) + .expect("parallel processing shouldn't race"); +} From 08b5bb9bac18e6a3994d012fcae6c507306dfa97 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 27 Aug 2020 15:20:31 +1000 Subject: [PATCH 03/34] Fix concurrency bug in attestation queue --- slasher/src/attestation_queue.rs | 61 +++++++++++++++++++------------- slasher/src/slasher.rs | 9 +++-- 2 files changed, 41 insertions(+), 29 deletions(-) diff --git a/slasher/src/attestation_queue.rs b/slasher/src/attestation_queue.rs index 5ed9adb3328..3563380ebc6 100644 --- a/slasher/src/attestation_queue.rs +++ b/slasher/src/attestation_queue.rs @@ -1,4 +1,4 @@ -use parking_lot::{Mutex, RwLock}; +use parking_lot::Mutex; use std::collections::BTreeSet; use std::sync::Arc; use types::{EthSpec, IndexedAttestation}; @@ -8,41 +8,50 @@ use types::{EthSpec, IndexedAttestation}; /// To be added to the database in batches, for efficiency and to prevent data races. #[derive(Debug)] pub struct AttestationQueue { + snapshot: Mutex>, + validators_per_chunk: usize, +} + +#[derive(Debug)] +pub struct AttestationQueueSnapshot { /// All attestations (unique) for storage on disk. - attestations_to_store: Mutex>>>, + pub attestations_to_store: Vec>>, /// Attestations group by validator index range. - pub(crate) subqueues: RwLock>>, - pub(crate) validators_per_chunk: usize, + pub subqueues: Vec>, } /// A queue of attestations for a range of validator indices. #[derive(Debug)] pub struct SubQueue { - pub(crate) attestations: Mutex>>>, + pub attestations: Vec>>, } impl SubQueue { pub fn new() -> Self { SubQueue { - attestations: Mutex::new(vec![]), + attestations: vec![], } } /// Empty the queue. - pub fn take(&self) -> Vec>> { - std::mem::replace(&mut self.attestations.lock(), vec![]) + pub fn take(&mut self) -> Self { + SubQueue { + attestations: std::mem::replace(&mut self.attestations, vec![]), + } } pub fn len(&self) -> usize { - self.attestations.lock().len() + self.attestations.len() } } impl AttestationQueue { pub fn new(validators_per_chunk: usize) -> Self { Self { - attestations_to_store: Mutex::new(vec![]), - subqueues: RwLock::new(vec![]), + snapshot: Mutex::new(AttestationQueueSnapshot { + attestations_to_store: vec![], + subqueues: vec![], + }), validators_per_chunk, } } @@ -51,40 +60,44 @@ impl AttestationQueue { pub fn queue(&self, attestation: IndexedAttestation) { let attestation = Arc::new(attestation); - self.attestations_to_store.lock().push(attestation.clone()); - let subqueue_ids = attestation .attesting_indices .iter() .map(|validator_index| *validator_index as usize / self.validators_per_chunk) .collect::>(); + let mut snapshot = self.snapshot.lock(); + + snapshot.attestations_to_store.push(attestation.clone()); + if let Some(max_subqueue_id) = subqueue_ids.iter().max() { - if *max_subqueue_id >= self.subqueues.read().len() { - self.subqueues - .write() + if *max_subqueue_id >= snapshot.subqueues.len() { + snapshot + .subqueues .resize_with(max_subqueue_id + 1, SubQueue::new); } } for subqueue_id in subqueue_ids { - let subqueues_lock = self.subqueues.read(); - subqueues_lock[subqueue_id] + snapshot.subqueues[subqueue_id] .attestations - .lock() .push(attestation.clone()); } } - pub fn get_attestations_to_store(&self) -> Vec>> { - std::mem::replace(&mut self.attestations_to_store.lock(), vec![]) + pub fn get_snapshot(&self) -> AttestationQueueSnapshot { + let mut snapshot = self.snapshot.lock(); + AttestationQueueSnapshot { + attestations_to_store: std::mem::replace(&mut snapshot.attestations_to_store, vec![]), + subqueues: snapshot.subqueues.iter_mut().map(SubQueue::take).collect(), + } } /// Return `(num_queues, num_attestations)`. pub fn stats(&self) -> (usize, usize) { - let subqueues = self.subqueues.read(); - let num_queues = subqueues.len(); - let num_attestations = subqueues.iter().map(SubQueue::len).sum(); + let snapshot = self.snapshot.lock(); + let num_queues = snapshot.subqueues.len(); + let num_attestations = snapshot.subqueues.iter().map(SubQueue::len).sum(); (num_queues, num_attestations) } } diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 35ce44e4df4..fd19f1a54bd 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -48,18 +48,17 @@ impl Slasher { /// Apply queued attestations to the on-disk database. pub fn process_attestations(&self, current_epoch: Epoch) -> Result<(), Error> { + let snapshot = self.attestation_queue.get_snapshot(); let mut txn = self.db.begin_rw_txn()?; // Insert attestations into database. - for attestation in self.attestation_queue.get_attestations_to_store() { + for attestation in snapshot.attestations_to_store { self.db.store_indexed_attestation(&mut txn, &attestation)?; } // Dequeue attestations in batches and process them. - let subqueues_lock = self.attestation_queue.subqueues.read(); - for (subqueue_id, subqueue) in subqueues_lock.iter().enumerate() { - let batch = subqueue.take(); - self.process_batch(&mut txn, subqueue_id, batch, current_epoch); + for (subqueue_id, subqueue) in snapshot.subqueues.into_iter().enumerate() { + self.process_batch(&mut txn, subqueue_id, subqueue.attestations, current_epoch); } txn.commit()?; Ok(()) From 88cd3ae03718d2521cd1003b9799f18ce5859eb4 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 27 Aug 2020 16:11:11 +1000 Subject: [PATCH 04/34] Silence, clippy --- beacon_node/client/src/builder.rs | 2 +- slasher/src/array.rs | 1 + slasher/src/attestation_queue.rs | 10 ++-------- slasher/src/slasher_server.rs | 2 +- 4 files changed, 5 insertions(+), 10 deletions(-) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 8040cafb4c8..58e37f60405 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -326,7 +326,7 @@ where .slot_clock .clone() .ok_or_else(|| "slasher server requires a slot clock")?; - SlasherServer::new(slasher, slot_clock, &context.executor); + SlasherServer::run(slasher, slot_clock, &context.executor); Ok(self) } diff --git a/slasher/src/array.rs b/slasher/src/array.rs index c2d1a03f966..58d3714892e 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -365,6 +365,7 @@ pub fn get_chunk_for_update<'a, E: EthSpec, T: TargetArrayChunk>( }) } +#[allow(clippy::too_many_arguments)] pub fn apply_attestation_for_validator( db: &SlasherDB, txn: &mut RwTransaction<'_>, diff --git a/slasher/src/attestation_queue.rs b/slasher/src/attestation_queue.rs index 3563380ebc6..95cfec7dc43 100644 --- a/slasher/src/attestation_queue.rs +++ b/slasher/src/attestation_queue.rs @@ -21,18 +21,12 @@ pub struct AttestationQueueSnapshot { } /// A queue of attestations for a range of validator indices. -#[derive(Debug)] +#[derive(Debug, Default)] pub struct SubQueue { pub attestations: Vec>>, } impl SubQueue { - pub fn new() -> Self { - SubQueue { - attestations: vec![], - } - } - /// Empty the queue. pub fn take(&mut self) -> Self { SubQueue { @@ -74,7 +68,7 @@ impl AttestationQueue { if *max_subqueue_id >= snapshot.subqueues.len() { snapshot .subqueues - .resize_with(max_subqueue_id + 1, SubQueue::new); + .resize_with(max_subqueue_id + 1, SubQueue::default); } } diff --git a/slasher/src/slasher_server.rs b/slasher/src/slasher_server.rs index 6029f3f071a..6a207138df0 100644 --- a/slasher/src/slasher_server.rs +++ b/slasher/src/slasher_server.rs @@ -11,7 +11,7 @@ use types::EthSpec; pub struct SlasherServer; impl SlasherServer { - pub fn new( + pub fn run( slasher: Arc>, slot_clock: C, executor: &TaskExecutor, From 0ab57fce93361cf01c00b9218c483bc770468c15 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 8 Sep 2020 14:17:29 +1000 Subject: [PATCH 05/34] Ensure attestation signatures are verified --- .../src/attestation_verification.rs | 33 +++++++++++++++---- beacon_node/beacon_chain/src/beacon_chain.rs | 9 +++-- 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 4c217c7df48..d175cc3d804 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -38,9 +38,14 @@ use crate::{ use bls::verify_signature_sets; use proto_array::Block as ProtoBlock; use slot_clock::SlotClock; +use state_processing::per_block_processing::is_valid_indexed_attestation; use state_processing::{ common::get_indexed_attestation, - per_block_processing::errors::AttestationValidationError, + per_block_processing::{ + errors::{AttestationValidationError, BlockOperationError, IndexedAttestationInvalid}, + VerifySignatures, + }, + per_slot_processing, signature_sets::{ indexed_attestation_signature_set_from_pubkeys, signed_aggregate_selection_proof_signature_set, signed_aggregate_signature_set, @@ -239,6 +244,8 @@ pub enum Error { attestation: Hash256, expected: Option, }, + /// There was an error while verifying the indexed attestation for the slasher. + SlasherVerificationError(BlockOperationError), /// There was an error whilst processing the attestation. It is not known if it is valid or invalid. /// /// ## Peer scoring @@ -316,12 +323,10 @@ fn process_slash_info( use AttestationSlashInfo::*; if let Some(slasher) = chain.slasher.as_ref() { - let (indexed_attestation, err) = match slash_info { - // TODO(sproul): check signatures - // TODO: de-duplicate by attestation hash? + let (indexed_attestation, check_signature, err) = match slash_info { SignatureNotChecked(attestation, err) => { match obtain_indexed_attestation_and_committees_per_slot(chain, &attestation) { - Ok((indexed, _)) => (indexed, err), + Ok((indexed, _)) => (indexed, true, err), Err(e) => { debug!( chain.log, @@ -333,11 +338,25 @@ fn process_slash_info( } } } - SignatureNotCheckedIndexed(indexed, err) => (indexed, err), + SignatureNotCheckedIndexed(indexed, err) => (indexed, true, err), SignatureInvalid(e) => return e, - SignatureValid(indexed, err) => (indexed, err), + SignatureValid(indexed, err) => (indexed, false, err), }; + if check_signature { + if let Err(e) = chain.with_head(|head| { + is_valid_indexed_attestation( + &head.beacon_state, + &indexed_attestation, + VerifySignatures::True, + &chain.spec, + ) + .map_err(Error::SlasherVerificationError) + }) { + return e; + } + } + // Supply to slasher. slasher.accept_attestation(indexed_attestation); diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 109671f33d4..19003a43b9f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -526,10 +526,13 @@ impl BeaconChain { } /// Apply a function to the canonical head without cloning it. - pub fn with_head( + pub fn with_head( &self, - f: impl FnOnce(&BeaconSnapshot) -> Result, - ) -> Result { + f: impl FnOnce(&BeaconSnapshot) -> Result, + ) -> Result + where + E: From, + { let head_lock = self .canonical_head .try_read_for(HEAD_LOCK_TIMEOUT) From 934b37665791aaac43d9335ee6de77a61e6b3c03 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 8 Sep 2020 14:24:24 +1000 Subject: [PATCH 06/34] Clean up collected slashing verification --- beacon_node/beacon_chain/src/beacon_chain.rs | 33 +++++++++----------- 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 19003a43b9f..123f2c521e5 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -37,9 +37,8 @@ use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use state_processing::{ common::get_indexed_attestation, per_block_processing, - per_block_processing::errors::AttestationValidationError, - per_block_processing::verify_attester_slashing, per_slot_processing, BlockSignatureStrategy, - SigVerifiedOp, VerifySignatures, + per_block_processing::errors::AttestationValidationError, per_slot_processing, + BlockSignatureStrategy, SigVerifiedOp, VerifyOperation, }; use std::borrow::Cow; use std::cmp::Ordering; @@ -1096,22 +1095,20 @@ impl BeaconChain { let slashings = slasher.get_attester_slashings(); debug!(self.log, "Ingesting {} slashings", slashings.len()); for slashing in slashings { - if let Err(e) = - verify_attester_slashing(state, &slashing, VerifySignatures::True, &self.spec) - { - error!( - self.log, - "Slashing from slasher failed verification"; - "error" => format!("{:?}", e), - "slashing" => format!("{:?}", slashing), - ); - continue; - } + let verified_slashing = match slashing.clone().validate(state, &self.spec) { + Ok(verified) => verified, + Err(e) => { + error!( + self.log, + "Slashing from slasher failed verification"; + "error" => format!("{:?}", e), + "slashing" => format!("{:?}", slashing), + ); + continue; + } + }; - // FIXME(sproul): remove `trust_me` - if let Err(e) = - self.import_attester_slashing(SigVerifiedOp::trust_me(slashing.clone())) - { + if let Err(e) = self.import_attester_slashing(verified_slashing) { error!( self.log, "Slashing from slasher is invalid"; From 6beda707bec3f7d1e4c79138f4279a61479c7a69 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 8 Sep 2020 17:31:09 +1000 Subject: [PATCH 07/34] Clean up sig verification --- .../src/attestation_verification.rs | 25 ++++++++++++++++--- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index d175cc3d804..c22c321f842 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -244,8 +244,6 @@ pub enum Error { attestation: Hash256, expected: Option, }, - /// There was an error while verifying the indexed attestation for the slasher. - SlasherVerificationError(BlockOperationError), /// There was an error whilst processing the attestation. It is not known if it is valid or invalid. /// /// ## Peer scoring @@ -261,6 +259,19 @@ impl From for Error { } } +#[derive(Debug)] +enum SlasherVerificationError { + /// There was an error while verifying the indexed attestation for the slasher. + SignatureError(BlockOperationError), + BeaconChainError(BeaconChainError), +} + +impl From for SlasherVerificationError { + fn from(e: BeaconChainError) -> Self { + Self::BeaconChainError(e) + } +} + /// Wraps a `SignedAggregateAndProof` that has been verified for propagation on the gossip network. pub struct VerifiedAggregatedAttestation { signed_aggregate: SignedAggregateAndProof, @@ -351,9 +362,15 @@ fn process_slash_info( VerifySignatures::True, &chain.spec, ) - .map_err(Error::SlasherVerificationError) + .map_err(SlasherVerificationError::SignatureError) }) { - return e; + // FIXME(sproul): differentiate error from invalid sig. + debug!( + chain.log, + "Signature verification for slasher failed"; + "error" => format!("{:?}", e), + ); + return err; } } From a972fa6341e35d189ee5f8983a553bfa896628e4 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 8 Sep 2020 17:31:30 +1000 Subject: [PATCH 08/34] Enable chunk compression! --- Cargo.lock | 1 + slasher/Cargo.toml | 1 + slasher/src/array.rs | 22 ++++++++++++++++------ 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1347ecafa68..e78df56258a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5125,6 +5125,7 @@ dependencies = [ "criterion", "environment", "eth2_ssz", + "flate2", "lmdb", "parking_lot 0.11.0", "rand 0.7.3", diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 501d39d1769..56a30cd515d 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -10,6 +10,7 @@ blake2b_simd = "0.5.10" byte-slice-cast = "0.3.5" environment = { path = "../lighthouse/environment" } eth2_ssz = { path = "../consensus/ssz" } +flate2 = { version = "1.0.17", features = ["zlib"], default-features = false } lmdb = "0.8" parking_lot = "0.11.0" rand = "0.7" diff --git a/slasher/src/array.rs b/slasher/src/array.rs index 58d3714892e..f91ceac5205 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -1,9 +1,11 @@ use crate::{Config, Error, SlasherDB, SlashingStatus}; +use flate2::bufread::{ZlibDecoder, ZlibEncoder}; use lmdb::{RwTransaction, Transaction}; use safe_arith::SafeArith; use serde_derive::{Deserialize, Serialize}; use std::collections::{btree_map::Entry, BTreeMap}; use std::convert::TryFrom; +use std::io::Read; use std::sync::Arc; use types::{AttesterSlashing, Epoch, EthSpec, IndexedAttestation}; @@ -138,11 +140,15 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn config: &Config, ) -> Result, Error> { let disk_key = config.disk_key(validator_chunk_index, chunk_index); - match txn.get(Self::select_db(db), &disk_key.to_be_bytes()) { - Ok(chunk_bytes) => Ok(Some(bincode::deserialize(chunk_bytes)?)), - Err(lmdb::Error::NotFound) => Ok(None), - Err(e) => Err(e.into()), - } + let chunk_bytes = match txn.get(Self::select_db(db), &disk_key.to_be_bytes()) { + Ok(chunk_bytes) => chunk_bytes, + Err(lmdb::Error::NotFound) => return Ok(None), + Err(e) => return Err(e.into()), + }; + + let chunk = bincode::deserialize_from(ZlibDecoder::new(chunk_bytes))?; + + Ok(Some(chunk)) } fn store( @@ -155,10 +161,14 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn ) -> Result<(), Error> { let disk_key = config.disk_key(validator_chunk_index, chunk_index); let value = bincode::serialize(self)?; + let mut encoder = ZlibEncoder::new(&value[..], flate2::Compression::default()); + let mut compressed_value = vec![]; + encoder.read_to_end(&mut compressed_value)?; + txn.put( Self::select_db(db), &disk_key.to_be_bytes(), - &value, + &compressed_value, SlasherDB::::write_flags(), )?; Ok(()) From 1ca2b267f38f4632ea39734072fbafbe098d7cb7 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 9 Sep 2020 13:08:08 +1000 Subject: [PATCH 09/34] Ingest from blocks, persist more often --- beacon_node/beacon_chain/src/beacon_chain.rs | 20 +++++++++++++++++++- slasher/src/slasher_server.rs | 3 ++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 123f2c521e5..c016892a3c8 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1093,7 +1093,11 @@ impl BeaconChain { fn ingest_slashings_to_op_pool(&self, state: &BeaconState) { if let Some(slasher) = self.slasher.as_ref() { let slashings = slasher.get_attester_slashings(); - debug!(self.log, "Ingesting {} slashings", slashings.len()); + + if !slashings.is_empty() { + debug!(self.log, "Ingesting {} slashings", slashings.len()); + } + for slashing in slashings { let verified_slashing = match slashing.clone().validate(state, &self.spec) { Ok(verified) => verified, @@ -1559,6 +1563,18 @@ impl BeaconChain { metrics::stop_timer(attestation_observation_timer); + // FIXME(sproul): add timer + if let Some(slasher) = self.slasher.as_ref() { + for attestation in &signed_block.message.body.attestations { + let committee = + state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; + let indexed_attestation = + get_indexed_attestation(&committee.committee, attestation) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; + slasher.accept_attestation(indexed_attestation); + } + } + // If there are new validators in this block, update our pubkey cache. // // We perform this _before_ adding the block to fork choice because the pubkey cache is @@ -2000,6 +2016,8 @@ impl BeaconChain { || is_reorg { self.persist_head_and_fork_choice()?; + self.ingest_slashings_to_op_pool(&new_head.beacon_state); + self.persist_op_pool()?; } let update_head_timer = metrics::start_timer(&metrics::UPDATE_HEAD_TIMES); diff --git a/slasher/src/slasher_server.rs b/slasher/src/slasher_server.rs index 6a207138df0..f79b69f01f8 100644 --- a/slasher/src/slasher_server.rs +++ b/slasher/src/slasher_server.rs @@ -20,7 +20,8 @@ impl SlasherServer { let sub_executor = executor.clone(); executor.spawn( async move { - // FIXME: read slot time from config, align to some fraction of each slot + // FIXME(sproul): read slot time from config, align to some fraction of each slot + // FIXME(sproul): queue updates, don't run them in parallel let slot_clock = Arc::new(slot_clock); let mut interval = interval_at(Instant::now(), Duration::from_secs(12)); while interval.next().await.is_some() { From 3bdde3432e5aca41326783e29338b59fccd16321 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 9 Sep 2020 19:40:33 +1000 Subject: [PATCH 10/34] Queue slasher updates to avoid contention --- slasher/src/slasher_server.rs | 74 +++++++++++++++++++---------------- 1 file changed, 40 insertions(+), 34 deletions(-) diff --git a/slasher/src/slasher_server.rs b/slasher/src/slasher_server.rs index f79b69f01f8..1f10b70ed54 100644 --- a/slasher/src/slasher_server.rs +++ b/slasher/src/slasher_server.rs @@ -2,6 +2,7 @@ use crate::Slasher; use environment::TaskExecutor; use slog::{debug, error, info, trace}; use slot_clock::SlotClock; +use std::sync::mpsc::{sync_channel, TrySendError}; use std::sync::Arc; use tokio::stream::StreamExt; use tokio::time::{interval_at, Duration, Instant}; @@ -17,49 +18,54 @@ impl SlasherServer { executor: &TaskExecutor, ) { info!(slasher.log, "Starting slasher to detect misbehaviour"); - let sub_executor = executor.clone(); + + // Buffer just a single message in the channel. If the receiver is still processing, we + // don't need to burden them with more work (we can wait). + let (sender, receiver) = sync_channel(1); + let log = slasher.log.clone(); + executor.spawn( async move { // FIXME(sproul): read slot time from config, align to some fraction of each slot - // FIXME(sproul): queue updates, don't run them in parallel let slot_clock = Arc::new(slot_clock); let mut interval = interval_at(Instant::now(), Duration::from_secs(12)); while interval.next().await.is_some() { - let slot_clock = slot_clock.clone(); - let slasher = slasher.clone(); - sub_executor.spawn_blocking( - move || { - if let Some(current_slot) = slot_clock.now() { - let t = Instant::now(); - let current_epoch = current_slot.epoch(E::slots_per_epoch()); - let (num_validator_chunks, num_attestations) = - slasher.attestation_queue.stats(); - if let Err(e) = slasher.process_attestations(current_epoch) { - error!( - slasher.log, - "Error during scheduled slasher processing"; - "error" => format!("{:?}", e) - ); - } - debug!( - slasher.log, - "Completed slasher update"; - "time_taken" => format!("{}ms", t.elapsed().as_millis()), - "num_attestations" => num_attestations, - "num_validator_chunks" => num_validator_chunks, - ); - } else { - trace!( - slasher.log, - "Slasher has nothing to do: we are pre-genesis" - ); - } - }, - "slasher_server_process_attestations", - ); + if let Some(current_slot) = slot_clock.clone().now() { + let current_epoch = current_slot.epoch(E::slots_per_epoch()); + if let Err(TrySendError::Disconnected(_)) = sender.try_send(current_epoch) { + break; + } + } else { + trace!(log, "Slasher has nothing to do: we are pre-genesis"); + } } }, "slasher_server", ); + + executor.spawn_blocking( + move || { + while let Ok(current_epoch) = receiver.recv() { + let t = Instant::now(); + let (num_validator_chunks, num_attestations) = + slasher.attestation_queue.stats(); + if let Err(e) = slasher.process_attestations(current_epoch) { + error!( + slasher.log, + "Error during scheduled slasher processing"; + "error" => format!("{:?}", e) + ); + } + debug!( + slasher.log, + "Completed slasher update"; + "time_taken" => format!("{}ms", t.elapsed().as_millis()), + "num_attestations" => num_attestations, + "num_validator_chunks" => num_validator_chunks, + ); + } + }, + "slasher_server_process_attestations", + ); } } From 2299e85e354de8fb6c6d0b1256015210038f4408 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 9 Sep 2020 19:41:30 +1000 Subject: [PATCH 11/34] Store attestation data hash to avoid pubkey deser This is faster, and pushes a lot of the work into tree hashing, which we might be able to optimise. --- Cargo.lock | 1 + slasher/Cargo.toml | 1 + slasher/src/database.rs | 55 +++++++++++++++++++++++++++-------------- slasher/src/slasher.rs | 2 ++ 4 files changed, 40 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e78df56258a..1d146a755e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5125,6 +5125,7 @@ dependencies = [ "criterion", "environment", "eth2_ssz", + "eth2_ssz_derive", "flate2", "lmdb", "parking_lot 0.11.0", diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 56a30cd515d..0f8c0fc087e 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -10,6 +10,7 @@ blake2b_simd = "0.5.10" byte-slice-cast = "0.3.5" environment = { path = "../lighthouse/environment" } eth2_ssz = { path = "../consensus/ssz" } +eth2_ssz_derive = { path = "../consensus/ssz_derive" } flate2 = { version = "1.0.17", features = ["zlib"], default-features = false } lmdb = "0.8" parking_lot = "0.11.0" diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 3d787b71e8b..cce02511148 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use tree_hash::TreeHash; use types::{Epoch, EthSpec, Hash256, IndexedAttestation}; -/// Map from `(validator_index, target_epoch)` to `indexed_attestation_hash`. +/// Map from `(validator_index, target_epoch)` to `AttesterRecord`. const ATTESTER_DB: &str = "attester"; /// Map from `indexed_attestation_hash` to `IndexedAttestation`. const INDEXED_ATTESTATION_DB: &str = "indexed_attestations"; @@ -52,6 +52,14 @@ impl AsRef<[u8]> for AttesterKey { } } +#[derive(Debug, ssz_derive::Encode, ssz_derive::Decode)] +pub struct AttesterRecord { + /// The hash of the attestation data, for checking double-voting. + attestation_data_hash: Hash256, + /// The hash of the indexed attestation, so it can be loaded. + indexed_attestation_hash: Hash256, +} + impl SlasherDB { pub fn open(config: Arc) -> Result { // TODO: open_with_permissions @@ -124,35 +132,39 @@ impl SlasherDB { txn: &mut RwTransaction<'_>, validator_index: u64, attestation: &IndexedAttestation, + attestation_data_hash: Hash256, indexed_attestation_hash: Hash256, ) -> Result, Error> { - // See if there's an existing indexed attestation for this attester. - if let Some(existing_hash) = self.get_attestation_hash_for_validator( - txn, - validator_index, - attestation.data.target.epoch, - )? { - // If the existing indexed attestation is identical, then this attestation is not + // See if there's an existing attestation for this attester. + if let Some(existing_record) = + self.get_attester_record(txn, validator_index, attestation.data.target.epoch)? + { + // If the existing attestation data is identical, then this attestation is not // slashable and no update is required. - if existing_hash == indexed_attestation_hash { + if existing_record.attestation_data_hash == attestation_data_hash { return Ok(SlashingStatus::NotSlashable); } - // Otherwise, load the indexed attestation so we can check if it's slashable against - // the new one. - let existing_attestation = self.get_indexed_attestation(txn, existing_hash)?; + // Otherwise, load the indexed attestation so we can confirm that it's slashable. + let existing_attestation = + self.get_indexed_attestation(txn, existing_record.indexed_attestation_hash)?; if attestation.is_double_vote(&existing_attestation) { Ok(SlashingStatus::DoubleVote(Box::new(existing_attestation))) } else { + // FIXME(sproul): this could be an Err Ok(SlashingStatus::NotSlashable) } } - // If no indexed attestation exists, insert one for this attester. + // If no attestation exists, insert a record for this validator. else { txn.put( self.attester_db, &AttesterKey::new(validator_index, attestation.data.target.epoch, &self.config), - &indexed_attestation_hash, + &AttesterRecord { + attestation_data_hash, + indexed_attestation_hash, + } + .as_ssz_bytes(), Self::write_flags(), )?; Ok(SlashingStatus::NotSlashable) @@ -165,28 +177,33 @@ impl SlasherDB { validator_index: u64, target: Epoch, ) -> Result>, Error> { - if let Some(hash) = self.get_attestation_hash_for_validator(txn, validator_index, target)? { - Ok(Some(self.get_indexed_attestation(txn, hash)?)) + if let Some(record) = self.get_attester_record(txn, validator_index, target)? { + Ok(Some(self.get_indexed_attestation( + txn, + record.indexed_attestation_hash, + )?)) } else { Ok(None) } } - pub fn get_attestation_hash_for_validator( + pub fn get_attester_record( &self, txn: &mut RwTransaction<'_>, validator_index: u64, target: Epoch, - ) -> Result, Error> { + ) -> Result, Error> { let attester_key = AttesterKey::new(validator_index, target, &self.config); match txn.get(self.attester_db, &attester_key) { - Ok(hash_bytes) => Ok(Some(hash256_from_slice(hash_bytes)?)), + Ok(bytes) => Ok(Some(AttesterRecord::from_ssz_bytes(bytes)?)), Err(lmdb::Error::NotFound) => Ok(None), Err(e) => Err(e.into()), } } } +// FIXME(sproul): consider using this to avoid allocations +#[allow(unused)] fn hash256_from_slice(data: &[u8]) -> Result { if data.len() == 32 { Ok(Hash256::from_slice(data)) diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index fd19f1a54bd..7b8634e4dd7 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -131,6 +131,7 @@ impl Slasher { subqueue_id: usize, attestation: &IndexedAttestation, ) -> Result>, Error> { + let attestation_data_hash = attestation.data.tree_hash_root(); let indexed_attestation_hash = attestation.tree_hash_root(); let mut slashings = vec![]; @@ -143,6 +144,7 @@ impl Slasher { txn, validator_index, &attestation, + attestation_data_hash, indexed_attestation_hash, )?; From 865dad50db25a435d6df25adca790502179c0124 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 11 Sep 2020 11:21:11 +1000 Subject: [PATCH 12/34] Reduce the amount of attestation hashing --- slasher/Cargo.toml | 1 + slasher/src/array.rs | 14 ++++++------- slasher/src/attestation_queue.rs | 15 ++++++++------ slasher/src/attester_record.rs | 35 ++++++++++++++++++++++++++++++++ slasher/src/database.rs | 24 +++++----------------- slasher/src/lib.rs | 2 ++ slasher/src/slasher.rs | 25 ++++++++++++++--------- 7 files changed, 74 insertions(+), 42 deletions(-) create mode 100644 slasher/src/attester_record.rs diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 0f8c0fc087e..c824ff755cf 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -22,6 +22,7 @@ slog = "2.5.2" slot_clock = { path = "../common/slot_clock" } tokio = { version = "0.2.21", features = ["full"] } tree_hash = { path = "../consensus/tree_hash" } +tree_hash_derive = { path = "../consensus/tree_hash_derive" } types = { path = "../consensus/types" } [dev-dependencies] diff --git a/slasher/src/array.rs b/slasher/src/array.rs index f91ceac5205..ad0c0010b2a 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -1,4 +1,4 @@ -use crate::{Config, Error, SlasherDB, SlashingStatus}; +use crate::{AttesterRecord, Config, Error, SlasherDB, SlashingStatus}; use flate2::bufread::{ZlibDecoder, ZlibEncoder}; use lmdb::{RwTransaction, Transaction}; use safe_arith::SafeArith; @@ -447,7 +447,7 @@ pub fn update( db: &SlasherDB, txn: &mut RwTransaction<'_>, validator_chunk_index: usize, - batch: Vec>>, + batch: Vec, AttesterRecord)>>, current_epoch: Epoch, config: &Config, ) -> Result>, Error> { @@ -457,7 +457,7 @@ pub fn update( let mut chunk_attestations = BTreeMap::new(); for attestation in batch { chunk_attestations - .entry(config.chunk_index(attestation.data.source.epoch)) + .entry(config.chunk_index(attestation.0.data.source.epoch)) .or_insert_with(Vec::new) .push(attestation); } @@ -485,7 +485,7 @@ pub fn update_array( db: &SlasherDB, txn: &mut RwTransaction<'_>, validator_chunk_index: usize, - chunk_attestations: &BTreeMap>>>, + chunk_attestations: &BTreeMap, AttesterRecord)>>>, current_epoch: Epoch, config: &Config, ) -> Result>, Error> { @@ -496,7 +496,7 @@ pub fn update_array( for attestations in chunk_attestations.values() { for attestation in attestations { for validator_index in - config.attesting_validators_for_chunk(attestation, validator_chunk_index) + config.attesting_validators_for_chunk(&attestation.0, validator_chunk_index) { let slashing_status = apply_attestation_for_validator::( db, @@ -504,11 +504,11 @@ pub fn update_array( &mut updated_chunks, validator_chunk_index, validator_index, - attestation, + &attestation.0, current_epoch, config, )?; - if let Some(slashing) = slashing_status.into_slashing(attestation) { + if let Some(slashing) = slashing_status.into_slashing(&attestation.0) { slashings.push(slashing); } } diff --git a/slasher/src/attestation_queue.rs b/slasher/src/attestation_queue.rs index 95cfec7dc43..6658aa7d906 100644 --- a/slasher/src/attestation_queue.rs +++ b/slasher/src/attestation_queue.rs @@ -1,3 +1,4 @@ +use crate::AttesterRecord; use parking_lot::Mutex; use std::collections::BTreeSet; use std::sync::Arc; @@ -15,7 +16,7 @@ pub struct AttestationQueue { #[derive(Debug)] pub struct AttestationQueueSnapshot { /// All attestations (unique) for storage on disk. - pub attestations_to_store: Vec>>, + pub attestations_to_store: Vec, AttesterRecord)>>, /// Attestations group by validator index range. pub subqueues: Vec>, } @@ -23,7 +24,7 @@ pub struct AttestationQueueSnapshot { /// A queue of attestations for a range of validator indices. #[derive(Debug, Default)] pub struct SubQueue { - pub attestations: Vec>>, + pub attestations: Vec, AttesterRecord)>>, } impl SubQueue { @@ -52,7 +53,8 @@ impl AttestationQueue { /// Add an attestation to all relevant queues, creating them if necessary. pub fn queue(&self, attestation: IndexedAttestation) { - let attestation = Arc::new(attestation); + // FIXME(sproul): this burdens the beacon node with extra hashing :\ + let attester_record = AttesterRecord::from(attestation.clone()); let subqueue_ids = attestation .attesting_indices @@ -60,9 +62,10 @@ impl AttestationQueue { .map(|validator_index| *validator_index as usize / self.validators_per_chunk) .collect::>(); - let mut snapshot = self.snapshot.lock(); + let arc_tuple = Arc::new((attestation, attester_record)); - snapshot.attestations_to_store.push(attestation.clone()); + let mut snapshot = self.snapshot.lock(); + snapshot.attestations_to_store.push(arc_tuple.clone()); if let Some(max_subqueue_id) = subqueue_ids.iter().max() { if *max_subqueue_id >= snapshot.subqueues.len() { @@ -75,7 +78,7 @@ impl AttestationQueue { for subqueue_id in subqueue_ids { snapshot.subqueues[subqueue_id] .attestations - .push(attestation.clone()); + .push(arc_tuple.clone()); } } diff --git a/slasher/src/attester_record.rs b/slasher/src/attester_record.rs new file mode 100644 index 00000000000..742faa99ef2 --- /dev/null +++ b/slasher/src/attester_record.rs @@ -0,0 +1,35 @@ +use ssz_derive::{Decode, Encode}; +use tree_hash::TreeHash as _; +use tree_hash_derive::TreeHash; +use types::{AggregateSignature, EthSpec, Hash256, IndexedAttestation, VariableList}; + +#[derive(Debug, Clone, Copy, Encode, Decode)] +pub struct AttesterRecord { + /// The hash of the attestation data, for checking double-voting. + pub attestation_data_hash: Hash256, + /// The hash of the indexed attestation, so it can be loaded. + pub indexed_attestation_hash: Hash256, +} + +#[derive(Debug, Clone, Encode, Decode, TreeHash)] +pub struct IndexedAttestationHeader { + pub attesting_indices: VariableList, + pub data_root: Hash256, + pub signature: AggregateSignature, +} + +impl From> for AttesterRecord { + fn from(indexed_attestation: IndexedAttestation) -> AttesterRecord { + let attestation_data_hash = indexed_attestation.data.tree_hash_root(); + let header = IndexedAttestationHeader:: { + attesting_indices: indexed_attestation.attesting_indices, + data_root: attestation_data_hash, + signature: indexed_attestation.signature, + }; + let indexed_attestation_hash = header.tree_hash_root(); + AttesterRecord { + attestation_data_hash, + indexed_attestation_hash, + } + } +} diff --git a/slasher/src/database.rs b/slasher/src/database.rs index cce02511148..25ef4df0264 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -1,9 +1,8 @@ -use crate::{Config, Error, SlashingStatus}; +use crate::{AttesterRecord, Config, Error, SlashingStatus}; use lmdb::{Database, DatabaseFlags, Environment, RwTransaction, Transaction, WriteFlags}; use ssz::{Decode, Encode}; use std::marker::PhantomData; use std::sync::Arc; -use tree_hash::TreeHash; use types::{Epoch, EthSpec, Hash256, IndexedAttestation}; /// Map from `(validator_index, target_epoch)` to `AttesterRecord`. @@ -52,14 +51,6 @@ impl AsRef<[u8]> for AttesterKey { } } -#[derive(Debug, ssz_derive::Encode, ssz_derive::Decode)] -pub struct AttesterRecord { - /// The hash of the attestation data, for checking double-voting. - attestation_data_hash: Hash256, - /// The hash of the indexed attestation, so it can be loaded. - indexed_attestation_hash: Hash256, -} - impl SlasherDB { pub fn open(config: Arc) -> Result { // TODO: open_with_permissions @@ -99,9 +90,9 @@ impl SlasherDB { pub fn store_indexed_attestation( &self, txn: &mut RwTransaction<'_>, + indexed_attestation_hash: Hash256, indexed_attestation: &IndexedAttestation, ) -> Result<(), Error> { - let indexed_attestation_hash = indexed_attestation.tree_hash_root(); let data = indexed_attestation.as_ssz_bytes(); txn.put( @@ -132,8 +123,7 @@ impl SlasherDB { txn: &mut RwTransaction<'_>, validator_index: u64, attestation: &IndexedAttestation, - attestation_data_hash: Hash256, - indexed_attestation_hash: Hash256, + record: AttesterRecord, ) -> Result, Error> { // See if there's an existing attestation for this attester. if let Some(existing_record) = @@ -141,7 +131,7 @@ impl SlasherDB { { // If the existing attestation data is identical, then this attestation is not // slashable and no update is required. - if existing_record.attestation_data_hash == attestation_data_hash { + if existing_record.attestation_data_hash == record.attestation_data_hash { return Ok(SlashingStatus::NotSlashable); } @@ -160,11 +150,7 @@ impl SlasherDB { txn.put( self.attester_db, &AttesterKey::new(validator_index, attestation.data.target.epoch, &self.config), - &AttesterRecord { - attestation_data_hash, - indexed_attestation_hash, - } - .as_ssz_bytes(), + &record.as_ssz_bytes(), Self::write_flags(), )?; Ok(SlashingStatus::NotSlashable) diff --git a/slasher/src/lib.rs b/slasher/src/lib.rs index dfecf3a593e..286ddb7190c 100644 --- a/slasher/src/lib.rs +++ b/slasher/src/lib.rs @@ -2,12 +2,14 @@ mod array; mod attestation_queue; +mod attester_record; pub mod config; mod database; mod error; mod slasher; mod slasher_server; +pub use crate::attester_record::AttesterRecord; pub use crate::slasher::Slasher; pub use attestation_queue::AttestationQueue; pub use config::Config; diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 7b8634e4dd7..86481f89a56 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -1,9 +1,8 @@ -use crate::{array, AttestationQueue, Config, Error, SlasherDB}; +use crate::{array, AttestationQueue, AttesterRecord, Config, Error, SlasherDB}; use lmdb::{RwTransaction, Transaction}; use parking_lot::Mutex; use slog::{debug, error, info, Logger}; use std::sync::Arc; -use tree_hash::TreeHash; use types::{AttesterSlashing, Epoch, EthSpec, IndexedAttestation}; #[derive(Debug)] @@ -52,8 +51,17 @@ impl Slasher { let mut txn = self.db.begin_rw_txn()?; // Insert attestations into database. + debug!( + self.log, + "Storing {} attestations in slasher DB", + snapshot.attestations_to_store.len() + ); for attestation in snapshot.attestations_to_store { - self.db.store_indexed_attestation(&mut txn, &attestation)?; + self.db.store_indexed_attestation( + &mut txn, + attestation.1.indexed_attestation_hash, + &attestation.0, + )?; } // Dequeue attestations in batches and process them. @@ -69,12 +77,12 @@ impl Slasher { &self, txn: &mut RwTransaction<'_>, subqueue_id: usize, - batch: Vec>>, + batch: Vec, AttesterRecord)>>, current_epoch: Epoch, ) { // First, check for double votes. for attestation in &batch { - match self.check_double_votes(txn, subqueue_id, &attestation) { + match self.check_double_votes(txn, subqueue_id, &attestation.0, attestation.1) { Ok(slashings) => { if !slashings.is_empty() { info!( @@ -130,10 +138,8 @@ impl Slasher { txn: &mut RwTransaction<'_>, subqueue_id: usize, attestation: &IndexedAttestation, + attester_record: AttesterRecord, ) -> Result>, Error> { - let attestation_data_hash = attestation.data.tree_hash_root(); - let indexed_attestation_hash = attestation.tree_hash_root(); - let mut slashings = vec![]; for validator_index in self @@ -144,8 +150,7 @@ impl Slasher { txn, validator_index, &attestation, - attestation_data_hash, - indexed_attestation_hash, + attester_record, )?; if let Some(slashing) = slashing_status.into_slashing(attestation) { From 463d4c834497dbf94f4e4526ac622a27ebd6f2b8 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 11 Sep 2020 12:05:15 +1000 Subject: [PATCH 13/34] Make slasher update period configurable --- beacon_node/src/cli.rs | 11 +++++++++++ beacon_node/src/config.rs | 10 +++++++++- slasher/src/config.rs | 4 ++++ slasher/src/slasher_server.rs | 3 ++- 4 files changed, 26 insertions(+), 2 deletions(-) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 2941fabf55b..965661a9497 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -339,6 +339,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .value_name("DIR") .takes_value(true) + .requires("slasher") + ) + .arg( + Arg::with_name("slasher-update-period") + .long("slasher-update-period") + .help( + "Configure how often the slasher runs batch processing." + ) + .value_name("SECONDS") + .requires("slasher") + .takes_value(true) ) .arg( Arg::with_name("wss-checkpoint") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 408a8b11b82..782ebd4da41 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -354,7 +354,15 @@ pub fn get_config( } else { client_config.data_dir.join("slasher_db") }; - client_config.slasher = Some(slasher::Config::new(slasher_dir)); + + let mut slasher_config = slasher::Config::new(slasher_dir); + + if let Some(update_period) = clap_utils::parse_optional(cli_args, "slasher-update-period")? + { + slasher_config.update_period = update_period; + } + + client_config.slasher = Some(slasher_config); } Ok(client_config) diff --git a/slasher/src/config.rs b/slasher/src/config.rs index d41c78c60b9..5f59dd46f3b 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -6,6 +6,7 @@ use types::{Epoch, EthSpec, IndexedAttestation}; pub const DEFAULT_CHUNK_SIZE: usize = 16; pub const DEFAULT_VALIDATOR_CHUNK_SIZE: usize = 256; pub const DEFAULT_HISTORY_LENGTH: usize = 54_000; +pub const DEFAULT_UPDATE_PERIOD: u64 = 12; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { @@ -14,6 +15,8 @@ pub struct Config { pub validator_chunk_size: usize, /// Number of epochs of history to keep. pub history_length: usize, + /// Update frequency in seconds. + pub update_period: u64, } impl Config { @@ -23,6 +26,7 @@ impl Config { chunk_size: DEFAULT_CHUNK_SIZE, validator_chunk_size: DEFAULT_VALIDATOR_CHUNK_SIZE, history_length: DEFAULT_HISTORY_LENGTH, + update_period: DEFAULT_UPDATE_PERIOD, } } diff --git a/slasher/src/slasher_server.rs b/slasher/src/slasher_server.rs index 1f10b70ed54..33dc02e742b 100644 --- a/slasher/src/slasher_server.rs +++ b/slasher/src/slasher_server.rs @@ -23,12 +23,13 @@ impl SlasherServer { // don't need to burden them with more work (we can wait). let (sender, receiver) = sync_channel(1); let log = slasher.log.clone(); + let update_period = slasher.config().update_period; executor.spawn( async move { // FIXME(sproul): read slot time from config, align to some fraction of each slot let slot_clock = Arc::new(slot_clock); - let mut interval = interval_at(Instant::now(), Duration::from_secs(12)); + let mut interval = interval_at(Instant::now(), Duration::from_secs(update_period)); while interval.next().await.is_some() { if let Some(current_slot) = slot_clock.clone().now() { let current_epoch = current_slot.epoch(E::slots_per_epoch()); From 1542737a78facd564ad2251d32aff8049fe07911 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 6 Oct 2020 10:58:48 +1100 Subject: [PATCH 14/34] Fix merge conflicts --- .../src/attestation_verification.rs | 11 +++++----- beacon_node/src/lib.rs | 20 ++++--------------- slasher/Cargo.toml | 4 ++-- slasher/src/slasher_server.rs | 2 +- 4 files changed, 12 insertions(+), 25 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index c22c321f842..fe5c28d1357 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -45,7 +45,6 @@ use state_processing::{ errors::{AttestationValidationError, BlockOperationError, IndexedAttestationInvalid}, VerifySignatures, }, - per_slot_processing, signature_sets::{ indexed_attestation_signature_set_from_pubkeys, signed_aggregate_selection_proof_signature_set, signed_aggregate_signature_set, @@ -633,7 +632,7 @@ impl VerifiedUnaggregatedAttestation { committees_per_slot: u64, subnet_id: Option, chain: &BeaconChain, - ) -> Result { + ) -> Result<(u64, SubnetId), Error> { let expected_subnet_id = SubnetId::compute_subnet_for_attestation_data::( &indexed_attestation.data, committees_per_slot, @@ -671,7 +670,7 @@ impl VerifiedUnaggregatedAttestation { }); } - Ok(validator_index) + Ok((validator_index, expected_subnet_id)) } fn verify_late_checks( @@ -714,7 +713,7 @@ impl VerifiedUnaggregatedAttestation { pub fn verify_slashable( attestation: Attestation, - subnet_id: SubnetId, + subnet_id: Option, chain: &BeaconChain, ) -> Result> { use AttestationSlashInfo::*; @@ -731,14 +730,14 @@ impl VerifiedUnaggregatedAttestation { } }; - let validator_index = match Self::verify_middle_checks( + let (validator_index, expected_subnet_id) = match Self::verify_middle_checks( &attestation, &indexed_attestation, committees_per_slot, subnet_id, chain, ) { - Ok(idx) => idx, + Ok(t) => t, Err(e) => return Err(SignatureNotCheckedIndexed(indexed_attestation, e)), }; diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 87197eef58d..abd8ffdf044 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -137,33 +137,21 @@ impl ProductionBeaconNode { let discv5_executor = Discv5Executor(executor); client_config.network.discv5_config.executor = Some(Box::new(discv5_executor)); - builder + let builder = builder .build_beacon_chain()? .network(&client_config.network) .await? -<<<<<<< HEAD .notifier()? .http_api_config(client_config.http_api.clone()) - .http_metrics_config(client_config.http_metrics.clone()) - .build() - .map(Self) -======= - .notifier()?; - - let builder = if client_config.rest_api.enabled { - builder.http_server(&client_config, &http_eth2_config, events)? - } else { - builder - }; + .http_metrics_config(client_config.http_metrics.clone()); + // FIXME(sproul): chain this let builder = if client_config.slasher.is_some() { builder.slasher_server()? } else { builder }; - - Ok(Self(builder.build())) ->>>>>>> 039b06603... Experimental slasher implementation + builder.build().map(Self) } pub fn into_inner(self) -> ProductionClient { diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index c824ff755cf..5f3bc854cd0 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -8,10 +8,10 @@ edition = "2018" bincode = "1.3.1" blake2b_simd = "0.5.10" byte-slice-cast = "0.3.5" -environment = { path = "../lighthouse/environment" } +task_executor = { path = "../common/task_executor" } eth2_ssz = { path = "../consensus/ssz" } eth2_ssz_derive = { path = "../consensus/ssz_derive" } -flate2 = { version = "1.0.17", features = ["zlib"], default-features = false } +flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lmdb = "0.8" parking_lot = "0.11.0" rand = "0.7" diff --git a/slasher/src/slasher_server.rs b/slasher/src/slasher_server.rs index 33dc02e742b..b280a81046e 100644 --- a/slasher/src/slasher_server.rs +++ b/slasher/src/slasher_server.rs @@ -1,9 +1,9 @@ use crate::Slasher; -use environment::TaskExecutor; use slog::{debug, error, info, trace}; use slot_clock::SlotClock; use std::sync::mpsc::{sync_channel, TrySendError}; use std::sync::Arc; +use task_executor::TaskExecutor; use tokio::stream::StreamExt; use tokio::time::{interval_at, Duration, Instant}; use types::EthSpec; From d92c7fbb9005a4c7fd14d4a95c092298a117d6bd Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 26 Oct 2020 15:58:35 +1100 Subject: [PATCH 15/34] Add proposer slashing detection --- Cargo.lock | 3 +- .../types/src/signed_beacon_block_header.rs | 2 +- slasher/src/attestation_queue.rs | 2 +- slasher/src/block_queue.rs | 28 +++++++ slasher/src/database.rs | 79 ++++++++++++++++++- slasher/src/lib.rs | 13 ++- slasher/src/slasher.rs | 66 ++++++++++++++-- slasher/src/slasher_server.rs | 6 +- 8 files changed, 182 insertions(+), 17 deletions(-) create mode 100644 slasher/src/block_queue.rs diff --git a/Cargo.lock b/Cargo.lock index 655fa4ff522..5595334a4c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5328,7 +5328,6 @@ dependencies = [ "blake2b_simd", "byte-slice-cast", "criterion", - "environment", "eth2_ssz", "eth2_ssz_derive", "flate2", @@ -5342,9 +5341,11 @@ dependencies = [ "slog", "slog-term", "slot_clock", + "task_executor", "tempdir", "tokio 0.2.22", "tree_hash", + "tree_hash_derive", "types", ] diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index 700a694f894..93248e956bd 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -6,7 +6,7 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -/// An exit voluntarily submitted a validator who wishes to withdraw. +/// A signed header of a `BeaconBlock`. /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] diff --git a/slasher/src/attestation_queue.rs b/slasher/src/attestation_queue.rs index 6658aa7d906..d8442eefbd6 100644 --- a/slasher/src/attestation_queue.rs +++ b/slasher/src/attestation_queue.rs @@ -17,7 +17,7 @@ pub struct AttestationQueue { pub struct AttestationQueueSnapshot { /// All attestations (unique) for storage on disk. pub attestations_to_store: Vec, AttesterRecord)>>, - /// Attestations group by validator index range. + /// Attestations grouped by validator index range. pub subqueues: Vec>, } diff --git a/slasher/src/block_queue.rs b/slasher/src/block_queue.rs new file mode 100644 index 00000000000..b80d0fb07bb --- /dev/null +++ b/slasher/src/block_queue.rs @@ -0,0 +1,28 @@ +use parking_lot::Mutex; +use types::SignedBeaconBlockHeader; + +#[derive(Debug)] +pub struct BlockQueue { + blocks: Mutex>, +} + +impl BlockQueue { + pub fn new() -> Self { + BlockQueue { + blocks: Mutex::new(vec![]), + } + } + + pub fn queue(&self, block_header: SignedBeaconBlockHeader) { + self.blocks.lock().push(block_header) + } + + pub fn dequeue(&self) -> Vec { + let mut blocks = self.blocks.lock(); + std::mem::replace(&mut *blocks, vec![]) + } + + pub fn len(&self) -> usize { + self.blocks.lock().len() + } +} diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 25ef4df0264..98f86b7b1eb 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -1,9 +1,11 @@ -use crate::{AttesterRecord, Config, Error, SlashingStatus}; +use crate::{AttesterRecord, Config, Error, ProposerSlashingStatus, SlashingStatus}; use lmdb::{Database, DatabaseFlags, Environment, RwTransaction, Transaction, WriteFlags}; use ssz::{Decode, Encode}; use std::marker::PhantomData; use std::sync::Arc; -use types::{Epoch, EthSpec, Hash256, IndexedAttestation}; +use types::{ + Epoch, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, SignedBeaconBlockHeader, Slot, +}; /// Map from `(validator_index, target_epoch)` to `AttesterRecord`. const ATTESTER_DB: &str = "attester"; @@ -11,13 +13,17 @@ const ATTESTER_DB: &str = "attester"; const INDEXED_ATTESTATION_DB: &str = "indexed_attestations"; const MIN_TARGETS_DB: &str = "min_targets"; const MAX_TARGETS_DB: &str = "max_targets"; +/// Map from `(validator_index, slot)` to `SignedBeaconBlockHeader`. +const PROPOSER_DB: &str = "proposer"; /// The number of DBs for LMDB to use (equal to the number of DBs defined above). -const LMDB_MAX_DBS: u32 = 4; +const LMDB_MAX_DBS: u32 = 5; /// The size of the in-memory map for LMDB (larger than the maximum size of the database). +// FIXME(sproul): make this user configurable const LMDB_MAP_SIZE: usize = 256 * (1 << 30); // 256GiB const ATTESTER_KEY_SIZE: usize = 16; +const PROPOSER_KEY_SIZE: usize = 16; #[derive(Debug)] pub struct SlasherDB { @@ -26,6 +32,7 @@ pub struct SlasherDB { pub(crate) attester_db: Database, pub(crate) min_targets_db: Database, pub(crate) max_targets_db: Database, + pub(crate) proposer_db: Database, config: Arc, _phantom: PhantomData, } @@ -51,6 +58,26 @@ impl AsRef<[u8]> for AttesterKey { } } +#[derive(Debug)] +pub struct ProposerKey { + data: [u8; PROPOSER_KEY_SIZE], +} + +impl ProposerKey { + pub fn new(validator_index: u64, slot: Slot) -> Self { + let mut data = [0; PROPOSER_KEY_SIZE]; + data[0..8].copy_from_slice(&validator_index.to_be_bytes()); + data[8..ATTESTER_KEY_SIZE].copy_from_slice(&slot.as_u64().to_be_bytes()); + ProposerKey { data } + } +} + +impl AsRef<[u8]> for ProposerKey { + fn as_ref(&self) -> &[u8] { + &self.data + } +} + impl SlasherDB { pub fn open(config: Arc) -> Result { // TODO: open_with_permissions @@ -64,12 +91,14 @@ impl SlasherDB { let attester_db = env.create_db(Some(ATTESTER_DB), Self::db_flags())?; let min_targets_db = env.create_db(Some(MIN_TARGETS_DB), Self::db_flags())?; let max_targets_db = env.create_db(Some(MAX_TARGETS_DB), Self::db_flags())?; + let proposer_db = env.create_db(Some(PROPOSER_DB), Self::db_flags())?; Ok(Self { env, indexed_attestation_db, attester_db, min_targets_db, max_targets_db, + proposer_db, config, _phantom: PhantomData, }) @@ -186,6 +215,50 @@ impl SlasherDB { Err(e) => Err(e.into()), } } + + pub fn get_block_proposal( + &self, + txn: &mut RwTransaction<'_>, + proposer_index: u64, + slot: Slot, + ) -> Result, Error> { + let proposer_key = ProposerKey::new(proposer_index, slot); + match txn.get(self.proposer_db, &proposer_key) { + Ok(bytes) => Ok(Some(SignedBeaconBlockHeader::from_ssz_bytes(bytes)?)), + Err(lmdb::Error::NotFound) => Ok(None), + Err(e) => Err(e.into()), + } + } + + pub fn check_or_insert_block_proposal( + &self, + txn: &mut RwTransaction<'_>, + block_header: SignedBeaconBlockHeader, + ) -> Result { + let proposer_index = block_header.message.proposer_index; + let slot = block_header.message.slot; + + if let Some(existing_block) = self.get_block_proposal(txn, proposer_index, slot)? { + if existing_block == block_header { + Ok(ProposerSlashingStatus::NotSlashable) + } else { + Ok(ProposerSlashingStatus::DoubleVote(Box::new( + ProposerSlashing { + signed_header_1: existing_block, + signed_header_2: block_header, + }, + ))) + } + } else { + txn.put( + self.proposer_db, + &ProposerKey::new(proposer_index, slot), + &block_header.as_ssz_bytes(), + Self::write_flags(), + )?; + Ok(ProposerSlashingStatus::NotSlashable) + } + } } // FIXME(sproul): consider using this to avoid allocations diff --git a/slasher/src/lib.rs b/slasher/src/lib.rs index 286ddb7190c..0baf6bb8641 100644 --- a/slasher/src/lib.rs +++ b/slasher/src/lib.rs @@ -3,22 +3,25 @@ mod array; mod attestation_queue; mod attester_record; +mod block_queue; pub mod config; mod database; mod error; mod slasher; mod slasher_server; -pub use crate::attester_record::AttesterRecord; pub use crate::slasher::Slasher; pub use attestation_queue::AttestationQueue; +pub use attester_record::AttesterRecord; +pub use block_queue::BlockQueue; pub use config::Config; pub use database::SlasherDB; pub use error::Error; pub use slasher_server::SlasherServer; -use types::{AttesterSlashing, EthSpec, IndexedAttestation}; +use types::{AttesterSlashing, EthSpec, IndexedAttestation, ProposerSlashing}; +// FIXME(sproul): rename #[derive(Debug, PartialEq)] pub enum SlashingStatus { NotSlashable, @@ -27,6 +30,12 @@ pub enum SlashingStatus { SurroundedByExisting(Box>), } +#[derive(Debug, PartialEq)] +pub enum ProposerSlashingStatus { + NotSlashable, + DoubleVote(Box), +} + impl SlashingStatus { pub fn into_slashing( self, diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 86481f89a56..9f72534a4e8 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -1,16 +1,23 @@ -use crate::{array, AttestationQueue, AttesterRecord, Config, Error, SlasherDB}; +use crate::{ + array, AttestationQueue, AttesterRecord, BlockQueue, Config, Error, ProposerSlashingStatus, + SlasherDB, +}; use lmdb::{RwTransaction, Transaction}; use parking_lot::Mutex; use slog::{debug, error, info, Logger}; use std::sync::Arc; -use types::{AttesterSlashing, Epoch, EthSpec, IndexedAttestation}; +use types::{ + AttesterSlashing, Epoch, EthSpec, IndexedAttestation, ProposerSlashing, SignedBeaconBlockHeader, +}; #[derive(Debug)] pub struct Slasher { db: SlasherDB, pub(crate) attestation_queue: AttestationQueue, + pub(crate) block_queue: BlockQueue, // TODO: consider using a set attester_slashings: Mutex>>, + proposer_slashings: Mutex>, // TODO: consider removing Arc config: Arc, pub(crate) log: Logger, @@ -22,11 +29,15 @@ impl Slasher { let config = Arc::new(config); let db = SlasherDB::open(config.clone())?; let attester_slashings = Mutex::new(vec![]); + let proposer_slashings = Mutex::new(vec![]); let attestation_queue = AttestationQueue::new(config.validator_chunk_size); + let block_queue = BlockQueue::new(); Ok(Self { db, attester_slashings, + proposer_slashings, attestation_queue, + block_queue, config, log, }) @@ -45,10 +56,52 @@ impl Slasher { self.attestation_queue.queue(attestation); } + /// Accept a block from the network and queue it for processing. + pub fn accept_block_header(&self, block_header: SignedBeaconBlockHeader) { + self.block_queue.queue(block_header); + } + + /// Apply queued blocks and attestations to the on-disk database, and detect slashings! + pub fn process_queued(&self, current_epoch: Epoch) -> Result<(), Error> { + let mut txn = self.db.begin_rw_txn()?; + self.process_blocks(&mut txn)?; + self.process_attestations(current_epoch, &mut txn)?; + txn.commit()?; + Ok(()) + } + + /// Apply queued blocks to the on-disk database. + pub fn process_blocks(&self, txn: &mut RwTransaction<'_>) -> Result<(), Error> { + let blocks = self.block_queue.dequeue(); + let mut slashings = vec![]; + + for block in blocks { + if let ProposerSlashingStatus::DoubleVote(slashing) = + self.db.check_or_insert_block_proposal(txn, block)? + { + slashings.push(*slashing); + } + } + + if !slashings.is_empty() { + info!( + self.log, + "Found {} new proposer slashings!", + slashings.len(), + ); + self.proposer_slashings.lock().extend(slashings); + } + + Ok(()) + } + /// Apply queued attestations to the on-disk database. - pub fn process_attestations(&self, current_epoch: Epoch) -> Result<(), Error> { + pub fn process_attestations( + &self, + current_epoch: Epoch, + txn: &mut RwTransaction<'_>, + ) -> Result<(), Error> { let snapshot = self.attestation_queue.get_snapshot(); - let mut txn = self.db.begin_rw_txn()?; // Insert attestations into database. debug!( @@ -58,7 +111,7 @@ impl Slasher { ); for attestation in snapshot.attestations_to_store { self.db.store_indexed_attestation( - &mut txn, + txn, attestation.1.indexed_attestation_hash, &attestation.0, )?; @@ -66,9 +119,8 @@ impl Slasher { // Dequeue attestations in batches and process them. for (subqueue_id, subqueue) in snapshot.subqueues.into_iter().enumerate() { - self.process_batch(&mut txn, subqueue_id, subqueue.attestations, current_epoch); + self.process_batch(txn, subqueue_id, subqueue.attestations, current_epoch); } - txn.commit()?; Ok(()) } diff --git a/slasher/src/slasher_server.rs b/slasher/src/slasher_server.rs index b280a81046e..4393882c32e 100644 --- a/slasher/src/slasher_server.rs +++ b/slasher/src/slasher_server.rs @@ -50,7 +50,8 @@ impl SlasherServer { let t = Instant::now(); let (num_validator_chunks, num_attestations) = slasher.attestation_queue.stats(); - if let Err(e) = slasher.process_attestations(current_epoch) { + let num_blocks = slasher.block_queue.len(); + if let Err(e) = slasher.process_queued(current_epoch) { error!( slasher.log, "Error during scheduled slasher processing"; @@ -63,10 +64,11 @@ impl SlasherServer { "time_taken" => format!("{}ms", t.elapsed().as_millis()), "num_attestations" => num_attestations, "num_validator_chunks" => num_validator_chunks, + "num_blocks" => num_blocks, ); } }, - "slasher_server_process_attestations", + "slasher_server_process_queued", ); } } From 661ec832190197b78952ecad5c5d4d735c2ef1da Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 27 Oct 2020 17:36:13 +1100 Subject: [PATCH 16/34] Integrate proposer slashing into BN --- .../src/attestation_verification.rs | 12 ++ beacon_node/beacon_chain/src/beacon_chain.rs | 34 ++++- .../beacon_chain/src/block_verification.rs | 135 ++++++++++++++++-- consensus/types/src/signed_beacon_block.rs | 9 +- .../types/src/signed_beacon_block_header.rs | 28 +++- slasher/src/slasher.rs | 4 + 6 files changed, 199 insertions(+), 23 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index fe114e40160..bd5725131df 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -396,6 +396,12 @@ impl VerifiedAggregatedAttestation { chain: &BeaconChain, ) -> Result { Self::verify_slashable(signed_aggregate, chain) + .map(|verified_aggregate| { + if let Some(slasher) = chain.slasher.as_ref() { + slasher.accept_attestation(verified_aggregate.indexed_attestation.clone()); + } + verified_aggregate + }) .map_err(|slash_info| process_slash_info(slash_info, chain)) } @@ -709,6 +715,12 @@ impl VerifiedUnaggregatedAttestation { chain: &BeaconChain, ) -> Result { Self::verify_slashable(attestation, subnet_id, chain) + .map(|verified_unaggregated| { + if let Some(slasher) = chain.slasher.as_ref() { + slasher.accept_attestation(verified_unaggregated.indexed_attestation.clone()); + } + verified_unaggregated + }) .map_err(|slash_info| process_slash_info(slash_info, chain)) } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 29af7ade6ef..b3aaf488ac6 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1087,19 +1087,25 @@ impl BeaconChain { fn ingest_slashings_to_op_pool(&self, state: &BeaconState) { if let Some(slasher) = self.slasher.as_ref() { - let slashings = slasher.get_attester_slashings(); + let attester_slashings = slasher.get_attester_slashings(); + let proposer_slashings = slasher.get_proposer_slashings(); - if !slashings.is_empty() { - debug!(self.log, "Ingesting {} slashings", slashings.len()); + if !attester_slashings.is_empty() || !proposer_slashings.is_empty() { + debug!( + self.log, + "Ingesting slashings"; + "num_attester_slashings" => attester_slashings.len(), + "num_proposer_slashings" => proposer_slashings.len(), + ); } - for slashing in slashings { + for slashing in attester_slashings { let verified_slashing = match slashing.clone().validate(state, &self.spec) { Ok(verified) => verified, Err(e) => { error!( self.log, - "Slashing from slasher failed verification"; + "Attester slashing from slasher failed verification"; "error" => format!("{:?}", e), "slashing" => format!("{:?}", slashing), ); @@ -1110,12 +1116,28 @@ impl BeaconChain { if let Err(e) = self.import_attester_slashing(verified_slashing) { error!( self.log, - "Slashing from slasher is invalid"; + "Attester slashing from slasher is invalid"; "error" => format!("{:?}", e), "slashing" => format!("{:?}", slashing), ); } } + + for slashing in proposer_slashings { + let verified_slashing = match slashing.clone().validate(state, &self.spec) { + Ok(verified) => verified, + Err(e) => { + error!( + self.log, + "Proposer slashing from slasher failed verification"; + "error" => format!("{:?}", e), + "slashing" => format!("{:?}", slashing), + ); + continue; + } + }; + self.import_proposer_slashing(verified_slashing); + } } } diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index aaa0425dd16..b2f5f3bc857 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -67,7 +67,7 @@ use store::{Error as DBError, HotColdDB, HotStateSummary, KeyValueStore, StoreOp use tree_hash::TreeHash; use types::{ BeaconBlock, BeaconState, BeaconStateError, ChainSpec, CloneConfig, EthSpec, Hash256, - PublicKey, RelativeEpoch, SignedBeaconBlock, Slot, + PublicKey, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; /// Maximum block slot number. Block with slots bigger than this constant will NOT be processed. @@ -268,6 +268,26 @@ impl From for BlockError { } } +pub enum BlockSlashInfo { + /// The block is invalid, but its signature wasn't checked. + SignatureNotChecked(SignedBeaconBlockHeader, TErr), + /// The block's signature is invalid, so it will never be slashable. + SignatureInvalid(TErr), + /// The signature is valid but the attestation is invalid in some other way. + SignatureValid(SignedBeaconBlockHeader, TErr), +} + +impl BlockSlashInfo> { + pub fn from_early_error(header: SignedBeaconBlockHeader, e: BlockError) -> Self { + match e { + BlockError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e), + // `InvalidSignature` could indicate any signature in the block, so we want + // to recheck the proposer signature alone. + BlockError::InvalidSignature | _ => BlockSlashInfo::SignatureNotChecked(header, e), + } + } +} + /// Verify all signatures (except deposit signatures) on all blocks in the `chain_segment`. If all /// signatures are valid, the `chain_segment` is mapped to a `Vec` that can /// later be transformed into a `FullyVerifiedBlock` without re-checking the signatures. If any @@ -366,14 +386,81 @@ pub struct FullyVerifiedBlock<'a, T: BeaconChainTypes> { pub confirmation_db_batch: Vec>, } +// FIXME(sproul): put this on the beacon chain? +fn verify_header_signature( + chain: &BeaconChain, + header: &SignedBeaconBlockHeader, +) -> Result<(), BlockError> { + let proposer_pubkey = get_validator_pubkey_cache(chain)? + .get(header.message.proposer_index as usize) + .cloned() + .ok_or_else(|| BlockError::UnknownValidator(header.message.proposer_index))?; + let (fork, genesis_validators_root) = chain + .with_head(|head| { + Ok(( + head.beacon_state.fork, + head.beacon_state.genesis_validators_root, + )) + }) + .map_err(|e: BlockError| e)?; + + if header.verify_signature::( + &proposer_pubkey, + &fork, + genesis_validators_root, + &chain.spec, + ) { + Ok(()) + } else { + Err(BlockError::ProposalSignatureInvalid) + } +} + /// Implemented on types that can be converted into a `FullyVerifiedBlock`. /// /// Used to allow functions to accept blocks at various stages of verification. -pub trait IntoFullyVerifiedBlock { +pub trait IntoFullyVerifiedBlock: Sized { fn into_fully_verified_block( self, chain: &BeaconChain, - ) -> Result, BlockError>; + ) -> Result, BlockError> { + self.into_fully_verified_block_slashable(chain) + .map(|fully_verified| { + if let Some(slasher) = chain.slasher.as_ref() { + slasher.accept_block_header(fully_verified.block.signed_block_header()); + } + fully_verified + }) + .map_err(|slash_info| { + if let Some(slasher) = chain.slasher.as_ref() { + let (verified_header, error) = match slash_info { + BlockSlashInfo::SignatureNotChecked(header, e) => { + if verify_header_signature(chain, &header).is_ok() { + (header, e) + } else { + return e; + } + } + BlockSlashInfo::SignatureInvalid(e) => return e, + BlockSlashInfo::SignatureValid(header, e) => (header, e), + }; + + slasher.accept_block_header(verified_header); + error + } else { + match slash_info { + BlockSlashInfo::SignatureNotChecked(_, e) + | BlockSlashInfo::SignatureInvalid(e) + | BlockSlashInfo::SignatureValid(_, e) => e, + } + } + }) + } + + fn into_fully_verified_block_slashable( + self, + chain: &BeaconChain, + ) -> Result, BlockSlashInfo>>; fn block(&self) -> &SignedBeaconBlock; } @@ -504,12 +591,13 @@ impl GossipVerifiedBlock { impl IntoFullyVerifiedBlock for GossipVerifiedBlock { /// Completes verification of the wrapped `block`. - fn into_fully_verified_block( + fn into_fully_verified_block_slashable( self, chain: &BeaconChain, - ) -> Result, BlockError> { - let fully_verified = SignatureVerifiedBlock::from_gossip_verified_block(self, chain)?; - fully_verified.into_fully_verified_block(chain) + ) -> Result, BlockSlashInfo>> { + let fully_verified = + SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?; + fully_verified.into_fully_verified_block_slashable(chain) } fn block(&self) -> &SignedBeaconBlock { @@ -556,6 +644,14 @@ impl SignatureVerifiedBlock { } } + pub fn check_slashable( + block: SignedBeaconBlock, + chain: &BeaconChain, + ) -> Result>> { + let header = block.signed_block_header(); + Self::new(block, chain).map_err(|e| BlockSlashInfo::from_early_error(header, e)) + } + /// Finishes signature verification on the provided `GossipVerifedBlock`. Does not re-verify /// the proposer signature. pub fn from_gossip_verified_block( @@ -587,18 +683,29 @@ impl SignatureVerifiedBlock { Err(BlockError::InvalidSignature) } } + + pub fn from_gossip_verified_block_check_slashable( + from: GossipVerifiedBlock, + chain: &BeaconChain, + ) -> Result>> { + let header = from.block.signed_block_header(); + Self::from_gossip_verified_block(from, chain) + .map_err(|e| BlockSlashInfo::from_early_error(header, e)) + } } impl IntoFullyVerifiedBlock for SignatureVerifiedBlock { /// Completes verification of the wrapped `block`. - fn into_fully_verified_block( + fn into_fully_verified_block_slashable( self, chain: &BeaconChain, - ) -> Result, BlockError> { + ) -> Result, BlockSlashInfo>> { + let header = self.block.signed_block_header(); let (parent, block) = if let Some(parent) = self.parent { (parent, self.block) } else { - load_parent(self.block, chain)? + load_parent(self.block, chain) + .map_err(|e| BlockSlashInfo::SignatureValid(header.clone(), e))? }; FullyVerifiedBlock::from_signature_verified_components( @@ -607,6 +714,7 @@ impl IntoFullyVerifiedBlock for SignatureVerifiedBlock &SignedBeaconBlock { @@ -617,11 +725,12 @@ impl IntoFullyVerifiedBlock for SignatureVerifiedBlock IntoFullyVerifiedBlock for SignedBeaconBlock { /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` /// and then using that implementation of `IntoFullyVerifiedBlock` to complete verification. - fn into_fully_verified_block( + fn into_fully_verified_block_slashable( self, chain: &BeaconChain, - ) -> Result, BlockError> { - SignatureVerifiedBlock::new(self, chain)?.into_fully_verified_block(chain) + ) -> Result, BlockSlashInfo>> { + SignatureVerifiedBlock::check_slashable(self, chain)? + .into_fully_verified_block_slashable(chain) } fn block(&self) -> &SignedBeaconBlock { diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 9ab34254210..c56dad5fdb2 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -1,6 +1,6 @@ use crate::{ test_utils::TestRandom, BeaconBlock, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, - SignedRoot, SigningData, Slot, + SignedBeaconBlockHeader, SignedRoot, SigningData, Slot, }; use bls::Signature; use serde_derive::{Deserialize, Serialize}; @@ -81,6 +81,13 @@ impl SignedBeaconBlock { self.signature.verify(pubkey, message) } + pub fn signed_block_header(&self) -> SignedBeaconBlockHeader { + SignedBeaconBlockHeader { + message: self.message.block_header(), + signature: self.signature.clone(), + } + } + /// Convenience accessor for the block's slot. pub fn slot(&self) -> Slot { self.message.slot diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index 93248e956bd..0a6411bae35 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -1,6 +1,7 @@ -use crate::{test_utils::TestRandom, BeaconBlockHeader}; -use bls::Signature; - +use crate::{ + test_utils::TestRandom, BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, Hash256, + PublicKey, Signature, SignedRoot, +}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -16,6 +17,27 @@ pub struct SignedBeaconBlockHeader { pub signature: Signature, } +impl SignedBeaconBlockHeader { + pub fn verify_signature( + &self, + pubkey: &PublicKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> bool { + let domain = spec.get_domain( + self.message.slot.epoch(E::slots_per_epoch()), + Domain::BeaconProposer, + fork, + genesis_validators_root, + ); + + let message = self.message.signing_root(domain); + + self.signature.verify(pubkey, message) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 9f72534a4e8..2df5736daab 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -47,6 +47,10 @@ impl Slasher { std::mem::replace(&mut self.attester_slashings.lock(), vec![]) } + pub fn get_proposer_slashings(&self) -> Vec { + std::mem::replace(&mut self.proposer_slashings.lock(), vec![]) + } + pub fn config(&self) -> &Config { &self.config } From b2693a58b75dfc2ed5088da2de22f800798335d9 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 27 Oct 2020 17:51:17 +1100 Subject: [PATCH 17/34] Remove dodgy shim --- consensus/state_processing/src/verify_operation.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index 499802241fa..6cc66aa814b 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -17,10 +17,6 @@ use types::{ pub struct SigVerifiedOp(T); impl SigVerifiedOp { - pub fn trust_me(t: T) -> Self { - SigVerifiedOp(t) - } - pub fn into_inner(self) -> T { self.0 } From de41d8cefe9cea50719ecbc9e18f91bb09289be3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 28 Oct 2020 17:56:18 +1100 Subject: [PATCH 18/34] Add pruning and epoch tracking --- Cargo.lock | 4 +- slasher/Cargo.toml | 4 +- slasher/src/array.rs | 103 +++++++- slasher/src/database.rs | 249 +++++++++++++++--- slasher/src/error.rs | 8 + slasher/src/lib.rs | 2 + slasher/src/slasher.rs | 13 +- slasher/src/test_utils.rs | 54 ++++ slasher/src/utils.rs | 16 ++ ...slasher_tests.rs => attester_slashings.rs} | 65 +---- slasher/tests/proposer_slashings.rs | 68 +++++ slasher/tests/wrap_around.rs | 47 ++++ 12 files changed, 528 insertions(+), 105 deletions(-) create mode 100644 slasher/src/test_utils.rs create mode 100644 slasher/src/utils.rs rename slasher/tests/{slasher_tests.rs => attester_slashings.rs} (80%) create mode 100644 slasher/tests/proposer_slashings.rs create mode 100644 slasher/tests/wrap_around.rs diff --git a/Cargo.lock b/Cargo.lock index 5595334a4c4..4223f68f2ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5327,11 +5327,13 @@ dependencies = [ "bincode", "blake2b_simd", "byte-slice-cast", + "byteorder", "criterion", "eth2_ssz", "eth2_ssz_derive", "flate2", "lmdb", + "lmdb-sys", "parking_lot 0.11.0", "rand 0.7.3", "rayon", @@ -5339,7 +5341,7 @@ dependencies = [ "serde", "serde_derive", "slog", - "slog-term", + "sloggers", "slot_clock", "task_executor", "tempdir", diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 5f3bc854cd0..cd15d210096 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -8,17 +8,20 @@ edition = "2018" bincode = "1.3.1" blake2b_simd = "0.5.10" byte-slice-cast = "0.3.5" +byteorder = "1.3.4" task_executor = { path = "../common/task_executor" } eth2_ssz = { path = "../consensus/ssz" } eth2_ssz_derive = { path = "../consensus/ssz_derive" } flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lmdb = "0.8" +lmdb-sys = "0.8" parking_lot = "0.11.0" rand = "0.7" safe_arith = { path = "../consensus/safe_arith" } serde = "1.0" serde_derive = "1.0" slog = "2.5.2" +sloggers = "*" slot_clock = { path = "../common/slot_clock" } tokio = { version = "0.2.21", features = ["full"] } tree_hash = { path = "../consensus/tree_hash" } @@ -28,7 +31,6 @@ types = { path = "../consensus/types" } [dev-dependencies] criterion = "0.3" rayon = "1.3.0" -slog-term = "2.6.0" tempdir = "0.3.7" [[bench]] diff --git a/slasher/src/array.rs b/slasher/src/array.rs index ad0c0010b2a..062a7a221b0 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -56,6 +56,17 @@ impl Chunk { epoch: Epoch, target_epoch: Epoch, config: &Config, + ) -> Result<(), Error> { + let distance = Self::epoch_distance(target_epoch, epoch)?; + self.set_raw_distance(validator_index, epoch, distance, config) + } + + pub fn set_raw_distance( + &mut self, + validator_index: u64, + epoch: Epoch, + target_distance: u16, + config: &Config, ) -> Result<(), Error> { let validator_offset = config.validator_offset(validator_index); let chunk_offset = config.chunk_offset(epoch); @@ -65,8 +76,7 @@ impl Chunk { .data .get_mut(cell_index) .ok_or_else(|| Error::ChunkIndexOutOfBounds(cell_index))?; - - *cell = Self::epoch_distance(target_epoch, epoch)?; + *cell = target_distance; Ok(()) } @@ -103,6 +113,10 @@ pub struct MaxTargetChunk { pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwned { fn empty(config: &Config) -> Self; + fn chunk(&mut self) -> &mut Chunk; + + fn neutral_element() -> u16; + fn check_slashable( &self, db: &SlasherDB, @@ -179,11 +193,22 @@ impl TargetArrayChunk for MinTargetChunk { fn empty(config: &Config) -> Self { MinTargetChunk { chunk: Chunk { - data: vec![MAX_DISTANCE; config.chunk_size * config.validator_chunk_size], + data: vec![ + Self::neutral_element(); + config.chunk_size * config.validator_chunk_size + ], }, } } + fn neutral_element() -> u16 { + MAX_DISTANCE + } + + fn chunk(&mut self) -> &mut Chunk { + &mut self.chunk + } + fn check_slashable( &self, db: &SlasherDB, @@ -270,11 +295,22 @@ impl TargetArrayChunk for MaxTargetChunk { fn empty(config: &Config) -> Self { MaxTargetChunk { chunk: Chunk { - data: vec![0; config.chunk_size * config.validator_chunk_size], + data: vec![ + Self::neutral_element(); + config.chunk_size * config.validator_chunk_size + ], }, } } + fn neutral_element() -> u16 { + 0 + } + + fn chunk(&mut self) -> &mut Chunk { + &mut self.chunk + } + fn check_slashable( &self, db: &SlasherDB, @@ -317,13 +353,11 @@ impl TargetArrayChunk for MaxTargetChunk { // We can stop. return Ok(false); } - if epoch == current_epoch { - return Ok(false); - } epoch += 1; } - // Continue to the next chunk. - Ok(true) + // If the epoch to update now lies beyond the current chunk and is less than + // or equal to the current epoch, then continue to the next chunk to update it. + Ok(epoch <= current_epoch) } fn first_start_epoch(source_epoch: Epoch, current_epoch: Epoch) -> Option { @@ -481,6 +515,48 @@ pub fn update( Ok(slashings) } +pub fn epoch_update_for_validator( + db: &SlasherDB, + txn: &mut RwTransaction<'_>, + updated_chunks: &mut BTreeMap, + validator_chunk_index: usize, + validator_index: u64, + current_epoch: Epoch, + config: &Config, +) -> Result<(), Error> { + let previous_current_epoch = if let Some(epoch) = db.get_stored_current_epoch(txn)? { + epoch + } else { + return Ok(()); + }; + + let mut chunk_index = config.chunk_index(previous_current_epoch); + let mut epoch = previous_current_epoch; + + while epoch <= current_epoch { + let current_chunk = get_chunk_for_update( + db, + txn, + updated_chunks, + validator_chunk_index, + chunk_index, + config, + )?; + while config.chunk_index(epoch) == chunk_index && epoch <= current_epoch { + current_chunk.chunk().set_raw_distance( + validator_index, + epoch, + T::neutral_element(), + config, + )?; + epoch += 1; + } + chunk_index += 1; + } + + Ok(()) +} + pub fn update_array( db: &SlasherDB, txn: &mut RwTransaction<'_>, @@ -498,6 +574,15 @@ pub fn update_array( for validator_index in config.attesting_validators_for_chunk(&attestation.0, validator_chunk_index) { + epoch_update_for_validator( + db, + txn, + &mut updated_chunks, + validator_chunk_index, + validator_index, + current_epoch, + config, + )?; let slashing_status = apply_attestation_for_validator::( db, txn, diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 98f86b7b1eb..14731df759f 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -1,23 +1,29 @@ -use crate::{AttesterRecord, Config, Error, ProposerSlashingStatus, SlashingStatus}; -use lmdb::{Database, DatabaseFlags, Environment, RwTransaction, Transaction, WriteFlags}; +use crate::{ + utils::TxnOptional, AttesterRecord, Config, Error, ProposerSlashingStatus, SlashingStatus, +}; +use byteorder::{BigEndian, ByteOrder}; +use lmdb::{Cursor, Database, DatabaseFlags, Environment, RwTransaction, Transaction, WriteFlags}; use ssz::{Decode, Encode}; +use std::collections::HashSet; use std::marker::PhantomData; use std::sync::Arc; use types::{ Epoch, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, SignedBeaconBlockHeader, Slot, }; -/// Map from `(validator_index, target_epoch)` to `AttesterRecord`. -const ATTESTER_DB: &str = "attester"; +/// Map from `(target_epoch, validator_index)` to `AttesterRecord`. +const ATTESTER_DB: &str = "attesters"; /// Map from `indexed_attestation_hash` to `IndexedAttestation`. const INDEXED_ATTESTATION_DB: &str = "indexed_attestations"; const MIN_TARGETS_DB: &str = "min_targets"; const MAX_TARGETS_DB: &str = "max_targets"; -/// Map from `(validator_index, slot)` to `SignedBeaconBlockHeader`. -const PROPOSER_DB: &str = "proposer"; +/// Map from `(slot, validator_index)` to `SignedBeaconBlockHeader`. +const PROPOSER_DB: &str = "proposers"; +/// Metadata about the slashing database itself. +const METADATA_DB: &str = "metadata"; /// The number of DBs for LMDB to use (equal to the number of DBs defined above). -const LMDB_MAX_DBS: u32 = 5; +const LMDB_MAX_DBS: u32 = 6; /// The size of the in-memory map for LMDB (larger than the maximum size of the database). // FIXME(sproul): make this user configurable const LMDB_MAP_SIZE: usize = 256 * (1 << 30); // 256GiB @@ -25,31 +31,47 @@ const LMDB_MAP_SIZE: usize = 256 * (1 << 30); // 256GiB const ATTESTER_KEY_SIZE: usize = 16; const PROPOSER_KEY_SIZE: usize = 16; +const METADATA_CURRENT_EPOCH_KEY: &'static [u8] = &[0]; + #[derive(Debug)] pub struct SlasherDB { pub(crate) env: Environment, pub(crate) indexed_attestation_db: Database, - pub(crate) attester_db: Database, + pub(crate) attesters_db: Database, pub(crate) min_targets_db: Database, pub(crate) max_targets_db: Database, - pub(crate) proposer_db: Database, + pub(crate) proposers_db: Database, + pub(crate) metadata_db: Database, config: Arc, _phantom: PhantomData, } +/// Database key for the `attesters` database. +/// +/// Stored as big-endian `(target_epoch, validator_index)` to enable efficient iteration +/// while pruning. #[derive(Debug)] pub struct AttesterKey { data: [u8; ATTESTER_KEY_SIZE], } impl AttesterKey { - pub fn new(validator_index: u64, target_epoch: Epoch, config: &Config) -> Self { + pub fn new(validator_index: u64, target_epoch: Epoch) -> Self { let mut data = [0; ATTESTER_KEY_SIZE]; - let epoch_offset = target_epoch.as_usize() % config.history_length; - data[0..8].copy_from_slice(&validator_index.to_be_bytes()); - data[8..ATTESTER_KEY_SIZE].copy_from_slice(&epoch_offset.to_be_bytes()); + data[0..8].copy_from_slice(&target_epoch.as_u64().to_be_bytes()); + data[8..ATTESTER_KEY_SIZE].copy_from_slice(&validator_index.to_be_bytes()); AttesterKey { data } } + + pub fn parse(data: &[u8]) -> Result<(Epoch, u64), Error> { + if data.len() == ATTESTER_KEY_SIZE { + let target_epoch = Epoch::new(BigEndian::read_u64(&data[..8])); + let validator_index = BigEndian::read_u64(&data[8..]); + Ok((target_epoch, validator_index)) + } else { + Err(Error::AttesterKeyCorrupt { length: data.len() }) + } + } } impl AsRef<[u8]> for AttesterKey { @@ -58,6 +80,10 @@ impl AsRef<[u8]> for AttesterKey { } } +/// Database key for the `proposers` database. +/// +/// Stored as big-endian `(slot, validator_index)` to enable efficient iteration +/// while pruning. #[derive(Debug)] pub struct ProposerKey { data: [u8; PROPOSER_KEY_SIZE], @@ -66,10 +92,20 @@ pub struct ProposerKey { impl ProposerKey { pub fn new(validator_index: u64, slot: Slot) -> Self { let mut data = [0; PROPOSER_KEY_SIZE]; - data[0..8].copy_from_slice(&validator_index.to_be_bytes()); - data[8..ATTESTER_KEY_SIZE].copy_from_slice(&slot.as_u64().to_be_bytes()); + data[0..8].copy_from_slice(&slot.as_u64().to_be_bytes()); + data[8..PROPOSER_KEY_SIZE].copy_from_slice(&validator_index.to_be_bytes()); ProposerKey { data } } + + pub fn parse(data: &[u8]) -> Result<(Slot, u64), Error> { + if data.len() == PROPOSER_KEY_SIZE { + let slot = Slot::new(BigEndian::read_u64(&data[..8])); + let validator_index = BigEndian::read_u64(&data[8..]); + Ok((slot, validator_index)) + } else { + Err(Error::ProposerKeyCorrupt { length: data.len() }) + } + } } impl AsRef<[u8]> for ProposerKey { @@ -88,17 +124,19 @@ impl SlasherDB { .open(&config.database_path)?; let indexed_attestation_db = env.create_db(Some(INDEXED_ATTESTATION_DB), Self::db_flags())?; - let attester_db = env.create_db(Some(ATTESTER_DB), Self::db_flags())?; + let attesters_db = env.create_db(Some(ATTESTER_DB), Self::db_flags())?; let min_targets_db = env.create_db(Some(MIN_TARGETS_DB), Self::db_flags())?; let max_targets_db = env.create_db(Some(MAX_TARGETS_DB), Self::db_flags())?; - let proposer_db = env.create_db(Some(PROPOSER_DB), Self::db_flags())?; + let proposers_db = env.create_db(Some(PROPOSER_DB), Self::db_flags())?; + let metadata_db = env.create_db(Some(METADATA_DB), Self::db_flags())?; Ok(Self { env, indexed_attestation_db, - attester_db, + attesters_db, min_targets_db, max_targets_db, - proposer_db, + proposers_db, + metadata_db, config, _phantom: PhantomData, }) @@ -116,6 +154,32 @@ impl SlasherDB { Ok(self.env.begin_rw_txn()?) } + // FIXME(sproul): rename + pub fn get_stored_current_epoch( + &self, + txn: &mut RwTransaction<'_>, + ) -> Result, Error> { + Ok(txn + .get(self.metadata_db, &METADATA_CURRENT_EPOCH_KEY) + .optional()? + .map(Epoch::from_ssz_bytes) + .transpose()?) + } + + pub fn update_current_epoch( + &self, + current_epoch: Epoch, + txn: &mut RwTransaction<'_>, + ) -> Result<(), Error> { + txn.put( + self.metadata_db, + &METADATA_CURRENT_EPOCH_KEY, + ¤t_epoch.as_ssz_bytes(), + Self::write_flags(), + )?; + Ok(()) + } + pub fn store_indexed_attestation( &self, txn: &mut RwTransaction<'_>, @@ -138,13 +202,13 @@ impl SlasherDB { txn: &mut RwTransaction<'_>, indexed_attestation_hash: Hash256, ) -> Result, Error> { - match txn.get(self.indexed_attestation_db, &indexed_attestation_hash) { - Ok(bytes) => Ok(IndexedAttestation::from_ssz_bytes(bytes)?), - Err(lmdb::Error::NotFound) => Err(Error::MissingIndexedAttestation { + let bytes = txn + .get(self.indexed_attestation_db, &indexed_attestation_hash) + .optional()? + .ok_or_else(|| Error::MissingIndexedAttestation { root: indexed_attestation_hash, - }), - Err(e) => Err(e.into()), - } + })?; + Ok(IndexedAttestation::from_ssz_bytes(bytes)?) } pub fn check_and_update_attester_record( @@ -177,8 +241,8 @@ impl SlasherDB { // If no attestation exists, insert a record for this validator. else { txn.put( - self.attester_db, - &AttesterKey::new(validator_index, attestation.data.target.epoch, &self.config), + self.attesters_db, + &AttesterKey::new(validator_index, attestation.data.target.epoch), &record.as_ssz_bytes(), Self::write_flags(), )?; @@ -208,12 +272,12 @@ impl SlasherDB { validator_index: u64, target: Epoch, ) -> Result, Error> { - let attester_key = AttesterKey::new(validator_index, target, &self.config); - match txn.get(self.attester_db, &attester_key) { - Ok(bytes) => Ok(Some(AttesterRecord::from_ssz_bytes(bytes)?)), - Err(lmdb::Error::NotFound) => Ok(None), - Err(e) => Err(e.into()), - } + let attester_key = AttesterKey::new(validator_index, target); + Ok(txn + .get(self.attesters_db, &attester_key) + .optional()? + .map(AttesterRecord::from_ssz_bytes) + .transpose()?) } pub fn get_block_proposal( @@ -223,11 +287,11 @@ impl SlasherDB { slot: Slot, ) -> Result, Error> { let proposer_key = ProposerKey::new(proposer_index, slot); - match txn.get(self.proposer_db, &proposer_key) { - Ok(bytes) => Ok(Some(SignedBeaconBlockHeader::from_ssz_bytes(bytes)?)), - Err(lmdb::Error::NotFound) => Ok(None), - Err(e) => Err(e.into()), - } + Ok(txn + .get(self.proposers_db, &proposer_key) + .optional()? + .map(SignedBeaconBlockHeader::from_ssz_bytes) + .transpose()?) } pub fn check_or_insert_block_proposal( @@ -251,7 +315,7 @@ impl SlasherDB { } } else { txn.put( - self.proposer_db, + self.proposers_db, &ProposerKey::new(proposer_index, slot), &block_header.as_ssz_bytes(), Self::write_flags(), @@ -259,6 +323,113 @@ impl SlasherDB { Ok(ProposerSlashingStatus::NotSlashable) } } + + pub fn prune(&self, current_epoch: Epoch) -> Result<(), Error> { + let mut txn = self.begin_rw_txn()?; + self.prune_proposers(current_epoch, &mut txn)?; + self.prune_attesters(current_epoch, &mut txn)?; + txn.commit()?; + Ok(()) + } + + fn prune_proposers( + &self, + current_epoch: Epoch, + txn: &mut RwTransaction<'_>, + ) -> Result<(), Error> { + let min_slot = current_epoch + .saturating_add(1u64) + .saturating_sub(self.config.history_length) + .start_slot(E::slots_per_epoch()); + + let mut cursor = txn.open_rw_cursor(self.proposers_db)?; + + // Position cursor at first key, bailing out if the database is empty. + match cursor.get(None, None, lmdb_sys::MDB_FIRST) { + Ok(_) => (), + Err(lmdb::Error::NotFound) => return Ok(()), + Err(e) => return Err(e.into()), + } + + loop { + let key_bytes = cursor + .get(None, None, lmdb_sys::MDB_GET_CURRENT)? + .0 + .ok_or_else(|| Error::MissingProposerKey)?; + + let (slot, _) = ProposerKey::parse(key_bytes)?; + if slot < min_slot { + cursor.del(Self::write_flags())?; + cursor.get(None, None, lmdb_sys::MDB_NEXT)?; + } else { + break; + } + } + + Ok(()) + } + + fn prune_attesters( + &self, + current_epoch: Epoch, + txn: &mut RwTransaction<'_>, + ) -> Result<(), Error> { + let min_epoch = current_epoch + .saturating_add(1u64) + .saturating_sub(self.config.history_length as u64); + + let mut cursor = txn.open_rw_cursor(self.attesters_db)?; + + // Position cursor at first key, bailing out if the database is empty. + match cursor.get(None, None, lmdb_sys::MDB_FIRST) { + Ok(_) => (), + Err(lmdb::Error::NotFound) => return Ok(()), + Err(e) => return Err(e.into()), + } + + let mut indexed_attestations_to_delete = HashSet::new(); + + loop { + println!("Iterating..."); + let (optional_key, value) = cursor.get(None, None, lmdb_sys::MDB_GET_CURRENT).unwrap(); + let key_bytes = optional_key.ok_or_else(|| Error::MissingAttesterKey)?; + + let (target_epoch, validator_index) = AttesterKey::parse(key_bytes)?; + + if target_epoch < min_epoch { + // Stage the indexed attestation for deletion and delete the record itself. + let attester_record = AttesterRecord::from_ssz_bytes(value)?; + indexed_attestations_to_delete.insert(attester_record.indexed_attestation_hash); + + println!( + "Deleting attestation for epoch {} from {}", + target_epoch, validator_index + ); + + cursor.del(Self::write_flags()).unwrap(); + // FIXME(sproul): abstract this pattern + match cursor.get(None, None, lmdb_sys::MDB_NEXT) { + Ok(_) => (), + Err(lmdb::Error::NotFound) => break, + Err(e) => return Err(e.into()), + } + } else { + break; + } + } + drop(cursor); + + for indexed_attestation_hash in indexed_attestations_to_delete { + println!( + "Deleting indexed attestation {:?}", + indexed_attestation_hash + ); + txn.del(self.indexed_attestation_db, &indexed_attestation_hash, None) + .expect("HELLO"); + } + + Ok(()) + } } // FIXME(sproul): consider using this to avoid allocations diff --git a/slasher/src/error.rs b/slasher/src/error.rs index 1a9e23ebf97..352ea668e26 100644 --- a/slasher/src/error.rs +++ b/slasher/src/error.rs @@ -23,9 +23,17 @@ pub enum Error { AttesterRecordCorrupt { length: usize, }, + AttesterKeyCorrupt { + length: usize, + }, + ProposerKeyCorrupt { + length: usize, + }, MissingIndexedAttestation { root: Hash256, }, + MissingAttesterKey, + MissingProposerKey, } impl From for Error { diff --git a/slasher/src/lib.rs b/slasher/src/lib.rs index 0baf6bb8641..b7a8f6e2aef 100644 --- a/slasher/src/lib.rs +++ b/slasher/src/lib.rs @@ -9,6 +9,8 @@ mod database; mod error; mod slasher; mod slasher_server; +pub mod test_utils; +mod utils; pub use crate::slasher::Slasher; pub use attestation_queue::AttestationQueue; diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 2df5736daab..4b307b8aeb2 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -70,6 +70,7 @@ impl Slasher { let mut txn = self.db.begin_rw_txn()?; self.process_blocks(&mut txn)?; self.process_attestations(current_epoch, &mut txn)?; + self.db.update_current_epoch(current_epoch, &mut txn)?; txn.commit()?; Ok(()) } @@ -123,7 +124,7 @@ impl Slasher { // Dequeue attestations in batches and process them. for (subqueue_id, subqueue) in snapshot.subqueues.into_iter().enumerate() { - self.process_batch(txn, subqueue_id, subqueue.attestations, current_epoch); + self.process_batch(txn, subqueue_id, subqueue.attestations, current_epoch)?; } Ok(()) } @@ -135,7 +136,7 @@ impl Slasher { subqueue_id: usize, batch: Vec, AttesterRecord)>>, current_epoch: Epoch, - ) { + ) -> Result<(), Error> { // First, check for double votes. for attestation in &batch { match self.check_double_votes(txn, subqueue_id, &attestation.0, attestation.1) { @@ -155,6 +156,7 @@ impl Slasher { "Error checking for double votes"; "error" => format!("{:?}", e) ); + return Err(e); } } } @@ -184,8 +186,11 @@ impl Slasher { "Error processing array update"; "error" => format!("{:?}", e), ); + return Err(e); } } + + Ok(()) } /// Check for double votes from all validators on `attestation` who match the `subqueue_id`. @@ -227,4 +232,8 @@ impl Slasher { Ok(slashings) } + + pub fn prune_database(&self, current_epoch: Epoch) -> Result<(), Error> { + self.db.prune(current_epoch) + } } diff --git a/slasher/src/test_utils.rs b/slasher/src/test_utils.rs new file mode 100644 index 00000000000..256710fc94a --- /dev/null +++ b/slasher/src/test_utils.rs @@ -0,0 +1,54 @@ +use slog::Logger; +use sloggers::Build; +use types::{ + AggregateSignature, AttestationData, AttesterSlashing, Checkpoint, Epoch, Hash256, + IndexedAttestation, MainnetEthSpec, Slot, +}; + +pub type E = MainnetEthSpec; + +pub fn logger() -> Logger { + if cfg!(feature = "test_logger") { + sloggers::terminal::TerminalLoggerBuilder::new() + .level(sloggers::types::Severity::Debug) + .build() + .unwrap() + } else { + sloggers::null::NullLoggerBuilder.build().unwrap() + } +} + +pub fn indexed_att( + attesting_indices: impl AsRef<[u64]>, + source_epoch: u64, + target_epoch: u64, + target_root: u64, +) -> IndexedAttestation { + IndexedAttestation { + attesting_indices: attesting_indices.as_ref().to_vec().into(), + data: AttestationData { + slot: Slot::new(0), + index: 0, + beacon_block_root: Hash256::zero(), + source: Checkpoint { + epoch: Epoch::new(source_epoch), + root: Hash256::from_low_u64_be(0), + }, + target: Checkpoint { + epoch: Epoch::new(target_epoch), + root: Hash256::from_low_u64_be(target_root), + }, + }, + signature: AggregateSignature::empty(), + } +} + +pub fn att_slashing( + attestation_1: &IndexedAttestation, + attestation_2: &IndexedAttestation, +) -> AttesterSlashing { + AttesterSlashing { + attestation_1: attestation_1.clone(), + attestation_2: attestation_2.clone(), + } +} diff --git a/slasher/src/utils.rs b/slasher/src/utils.rs new file mode 100644 index 00000000000..b9df9b5b442 --- /dev/null +++ b/slasher/src/utils.rs @@ -0,0 +1,16 @@ +use crate::Error; + +/// Mix-in trait for loading values from LMDB that may or may not exist. +pub trait TxnOptional { + fn optional(self) -> Result, E>; +} + +impl TxnOptional for Result { + fn optional(self) -> Result, Error> { + match self { + Ok(x) => Ok(Some(x)), + Err(lmdb::Error::NotFound) => Ok(None), + Err(e) => Err(e.into()), + } + } +} diff --git a/slasher/tests/slasher_tests.rs b/slasher/tests/attester_slashings.rs similarity index 80% rename from slasher/tests/slasher_tests.rs rename to slasher/tests/attester_slashings.rs index bb2738169ce..185b36b3f5a 100644 --- a/slasher/tests/slasher_tests.rs +++ b/slasher/tests/attester_slashings.rs @@ -1,48 +1,11 @@ use rayon::prelude::*; -use slasher::{config::DEFAULT_CHUNK_SIZE, Config, Slasher}; -use slog::{o, Drain, Logger}; -use tempdir::TempDir; -use types::{ - AggregateSignature, AttestationData, AttesterSlashing, Checkpoint, Epoch, Hash256, - IndexedAttestation, MainnetEthSpec, Slot, +use slasher::{ + config::DEFAULT_CHUNK_SIZE, + test_utils::{att_slashing, indexed_att, logger, E}, + Config, Slasher, }; - -type E = MainnetEthSpec; - -fn indexed_att( - attesting_indices: impl AsRef<[u64]>, - source_epoch: u64, - target_epoch: u64, - target_root: u64, -) -> IndexedAttestation { - IndexedAttestation { - attesting_indices: attesting_indices.as_ref().to_vec().into(), - data: AttestationData { - slot: Slot::new(0), - index: 0, - beacon_block_root: Hash256::zero(), - source: Checkpoint { - epoch: Epoch::new(source_epoch), - root: Hash256::from_low_u64_be(0), - }, - target: Checkpoint { - epoch: Epoch::new(target_epoch), - root: Hash256::from_low_u64_be(target_root), - }, - }, - signature: AggregateSignature::empty(), - } -} - -fn att_slashing( - attestation_1: &IndexedAttestation, - attestation_2: &IndexedAttestation, -) -> AttesterSlashing { - AttesterSlashing { - attestation_1: attestation_1.clone(), - attestation_2: attestation_2.clone(), - } -} +use tempdir::TempDir; +use types::{AttesterSlashing, Epoch, IndexedAttestation}; #[test] fn double_vote_single_val() { @@ -220,13 +183,6 @@ fn slasher_test_batch( slasher_test(attestations, expected, current_epoch, |_| false); } -// FIXME(sproul): move this somewhere else -fn logger() -> Logger { - let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); - let drain = slog_term::FullFormat::new(decorator).build(); - Logger::root(Box::new(std::sync::Mutex::new(drain)).fuse(), o!()) -} - fn slasher_test( attestations: &[IndexedAttestation], expected: &[AttesterSlashing], @@ -242,10 +198,10 @@ fn slasher_test( slasher.accept_attestation(attestation.clone()); if should_process_after(i) { - slasher.process_attestations(current_epoch).unwrap(); + slasher.process_queued(current_epoch).unwrap(); } } - slasher.process_attestations(current_epoch).unwrap(); + slasher.process_queued(current_epoch).unwrap(); let slashings = slasher.get_attester_slashings(); @@ -254,6 +210,9 @@ fn slasher_test( } assert_eq!(expected, &slashings[..]); + + // Pruning should not error. + slasher.prune_database(current_epoch).unwrap(); } fn parallel_slasher_test( @@ -271,7 +230,7 @@ fn parallel_slasher_test( .into_par_iter() .try_for_each(|attestation| { slasher.accept_attestation(attestation.clone()); - slasher.process_attestations(current_epoch) + slasher.process_queued(current_epoch) }) .expect("parallel processing shouldn't race"); } diff --git a/slasher/tests/proposer_slashings.rs b/slasher/tests/proposer_slashings.rs new file mode 100644 index 00000000000..5a8f60013af --- /dev/null +++ b/slasher/tests/proposer_slashings.rs @@ -0,0 +1,68 @@ +use slasher::{test_utils::logger, Config, Slasher}; +use tempdir::TempDir; +use types::{ + BeaconBlockHeader, Epoch, EthSpec, Hash256, MainnetEthSpec, Signature, SignedBeaconBlockHeader, + Slot, +}; + +type E = MainnetEthSpec; + +fn test_block(slot: u64, proposer_index: u64, block_root: u64) -> SignedBeaconBlockHeader { + SignedBeaconBlockHeader { + message: BeaconBlockHeader { + slot: Slot::new(slot), + proposer_index, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body_root: Hash256::from_low_u64_be(block_root), + }, + signature: Signature::empty(), + } +} + +#[test] +fn block_pruning() { + let slots_per_epoch = E::slots_per_epoch(); + + let tempdir = TempDir::new("slasher").unwrap(); + let mut config = Config::new(tempdir.path().into()); + config.chunk_size = 2; + config.history_length = 2; + + let slasher = Slasher::::open(config.clone(), logger()).unwrap(); + let current_epoch = Epoch::from(2 * config.history_length); + + // Pruning the empty database should be safe. + slasher.prune_database(Epoch::new(0)).unwrap(); + slasher.prune_database(current_epoch).unwrap(); + + // Add blocks in excess of the history length and prune them away. + let proposer_index = 100_000; // high to check sorting by slot + for slot in 1..=current_epoch.as_u64() * slots_per_epoch { + slasher.accept_block_header(test_block(slot, proposer_index, 0)); + } + slasher.process_queued(current_epoch).unwrap(); + slasher.prune_database(current_epoch).unwrap(); + + // Add more conflicting blocks, and check that only the ones within the non-pruned + // section are detected as slashable. + for slot in 1..=current_epoch.as_u64() * slots_per_epoch { + slasher.accept_block_header(test_block(slot, proposer_index, 1)); + } + slasher.process_queued(current_epoch).unwrap(); + + let proposer_slashings = slasher.get_proposer_slashings(); + + // Check number of proposer slashings, accounting for single block in current epoch. + assert_eq!( + proposer_slashings.len(), + (config.history_length - 1) * slots_per_epoch as usize + 1 + ); + // Check epochs of all slashings are from within range. + assert!(proposer_slashings.iter().all(|slashing| slashing + .signed_header_1 + .message + .slot + .epoch(slots_per_epoch) + > current_epoch - config.history_length as u64)); +} diff --git a/slasher/tests/wrap_around.rs b/slasher/tests/wrap_around.rs new file mode 100644 index 00000000000..ed49f0303a1 --- /dev/null +++ b/slasher/tests/wrap_around.rs @@ -0,0 +1,47 @@ +use slasher::{ + test_utils::{indexed_att, logger}, + Config, Slasher, +}; +use tempdir::TempDir; +use types::Epoch; + +/* +#[test] +fn attestation_pruning_basic() { + unimplemented!() +} +*/ + +#[test] +fn attestation_pruning_empty_wrap_around() { + let tempdir = TempDir::new("slasher").unwrap(); + let mut config = Config::new(tempdir.path().into()); + config.validator_chunk_size = 1; + config.chunk_size = 16; + config.history_length = 16; + + let slasher = Slasher::open(config.clone(), logger()).unwrap(); + + let v = vec![0]; + let history_length = config.history_length as u64; + + let mut current_epoch = Epoch::new(history_length - 1); + + // FIXME(sproul): add bounds check that attestation isn't wider than history length + slasher.accept_attestation(indexed_att(v.clone(), 0, history_length - 1, 0)); + slasher.process_queued(current_epoch).unwrap(); + slasher.prune_database(current_epoch).unwrap(); + + // Delete the previous attestation + current_epoch = Epoch::new(2 * history_length + 2); + slasher.prune_database(current_epoch).unwrap(); + + // Add an attestation that would be surrounded with the modulo considered + slasher.accept_attestation(indexed_att( + v.clone(), + 2 * history_length - 3, + 2 * history_length - 2, + 1, + )); + slasher.process_queued(current_epoch).unwrap(); +} From 363a718334ab5fa85dcb8249b93a7adc31a2ad4e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 30 Oct 2020 12:50:49 +1100 Subject: [PATCH 19/34] Add time validation --- slasher/src/attestation_queue.rs | 113 ++++++++++++++----------------- slasher/src/attester_record.rs | 24 ++++++- slasher/src/lib.rs | 2 +- slasher/src/slasher.rs | 72 +++++++++++++++++--- slasher/src/slasher_server.rs | 4 +- 5 files changed, 140 insertions(+), 75 deletions(-) diff --git a/slasher/src/attestation_queue.rs b/slasher/src/attestation_queue.rs index d8442eefbd6..5f6fdf8a301 100644 --- a/slasher/src/attestation_queue.rs +++ b/slasher/src/attestation_queue.rs @@ -1,4 +1,4 @@ -use crate::AttesterRecord; +use crate::{AttesterRecord, Config}; use parking_lot::Mutex; use std::collections::BTreeSet; use std::sync::Arc; @@ -9,92 +9,83 @@ use types::{EthSpec, IndexedAttestation}; /// To be added to the database in batches, for efficiency and to prevent data races. #[derive(Debug)] pub struct AttestationQueue { - snapshot: Mutex>, - validators_per_chunk: usize, + /// All attestations (unique) for storage on disk. + pub queue: Mutex>, } +/// Attestations grouped by validator index range. #[derive(Debug)] -pub struct AttestationQueueSnapshot { - /// All attestations (unique) for storage on disk. - pub attestations_to_store: Vec, AttesterRecord)>>, - /// Attestations grouped by validator index range. - pub subqueues: Vec>, +pub struct GroupedAttestations { + pub subqueues: Vec>, } /// A queue of attestations for a range of validator indices. #[derive(Debug, Default)] -pub struct SubQueue { +pub struct AttestationBatch { pub attestations: Vec, AttesterRecord)>>, } -impl SubQueue { - /// Empty the queue. - pub fn take(&mut self) -> Self { - SubQueue { - attestations: std::mem::replace(&mut self.attestations, vec![]), - } - } - +impl AttestationBatch { pub fn len(&self) -> usize { self.attestations.len() } + + /// Group the attestations by validator index. + pub fn group_by_validator_index(self, config: &Config) -> GroupedAttestations { + let mut grouped_attestations = GroupedAttestations { subqueues: vec![] }; + + for attestation in self.attestations { + let subqueue_ids = attestation + .0 + .attesting_indices + .iter() + .map(|validator_index| config.validator_chunk_index(*validator_index)) + .collect::>(); + + if let Some(max_subqueue_id) = subqueue_ids.iter().next_back() { + if *max_subqueue_id >= grouped_attestations.subqueues.len() { + grouped_attestations + .subqueues + .resize_with(max_subqueue_id + 1, AttestationBatch::default); + } + } + + for subqueue_id in subqueue_ids { + grouped_attestations.subqueues[subqueue_id] + .attestations + .push(attestation.clone()); + } + } + + grouped_attestations + } } impl AttestationQueue { - pub fn new(validators_per_chunk: usize) -> Self { + pub fn new() -> Self { Self { - snapshot: Mutex::new(AttestationQueueSnapshot { - attestations_to_store: vec![], - subqueues: vec![], - }), - validators_per_chunk, + queue: Mutex::new(AttestationBatch::default()), } } - /// Add an attestation to all relevant queues, creating them if necessary. + /// Add an attestation to the queue. pub fn queue(&self, attestation: IndexedAttestation) { - // FIXME(sproul): this burdens the beacon node with extra hashing :\ let attester_record = AttesterRecord::from(attestation.clone()); + self.queue + .lock() + .attestations + .push(Arc::new((attestation, attester_record))); + } - let subqueue_ids = attestation - .attesting_indices - .iter() - .map(|validator_index| *validator_index as usize / self.validators_per_chunk) - .collect::>(); - - let arc_tuple = Arc::new((attestation, attester_record)); - - let mut snapshot = self.snapshot.lock(); - snapshot.attestations_to_store.push(arc_tuple.clone()); - - if let Some(max_subqueue_id) = subqueue_ids.iter().max() { - if *max_subqueue_id >= snapshot.subqueues.len() { - snapshot - .subqueues - .resize_with(max_subqueue_id + 1, SubQueue::default); - } - } - - for subqueue_id in subqueue_ids { - snapshot.subqueues[subqueue_id] - .attestations - .push(arc_tuple.clone()); - } + pub fn dequeue(&self) -> AttestationBatch { + std::mem::replace(&mut self.queue.lock(), AttestationBatch::default()) } - pub fn get_snapshot(&self) -> AttestationQueueSnapshot { - let mut snapshot = self.snapshot.lock(); - AttestationQueueSnapshot { - attestations_to_store: std::mem::replace(&mut snapshot.attestations_to_store, vec![]), - subqueues: snapshot.subqueues.iter_mut().map(SubQueue::take).collect(), - } + pub fn requeue(&self, batch: AttestationBatch) { + self.queue.lock().attestations.extend(batch.attestations); } - /// Return `(num_queues, num_attestations)`. - pub fn stats(&self) -> (usize, usize) { - let snapshot = self.snapshot.lock(); - let num_queues = snapshot.subqueues.len(); - let num_attestations = snapshot.subqueues.iter().map(SubQueue::len).sum(); - (num_queues, num_attestations) + pub fn len(&self) -> usize { + self.queue.lock().len() } } diff --git a/slasher/src/attester_record.rs b/slasher/src/attester_record.rs index 742faa99ef2..82d5dc46f99 100644 --- a/slasher/src/attester_record.rs +++ b/slasher/src/attester_record.rs @@ -12,7 +12,7 @@ pub struct AttesterRecord { } #[derive(Debug, Clone, Encode, Decode, TreeHash)] -pub struct IndexedAttestationHeader { +struct IndexedAttestationHeader { pub attesting_indices: VariableList, pub data_root: Hash256, pub signature: AggregateSignature, @@ -33,3 +33,25 @@ impl From> for AttesterRecord { } } } + +#[cfg(test)] +mod test { + use super::*; + use crate::test_utils::indexed_att; + + // Check correctness of fast hashing + #[test] + fn fast_hash() { + let data = vec![ + indexed_att(vec![], 0, 0, 0), + indexed_att(vec![1, 2, 3], 12, 14, 1), + indexed_att(vec![4], 0, 5, u64::MAX), + ]; + for att in data { + assert_eq!( + att.tree_hash_root(), + AttesterRecord::from(att).indexed_attestation_hash + ); + } + } +} diff --git a/slasher/src/lib.rs b/slasher/src/lib.rs index b7a8f6e2aef..375c16b13b9 100644 --- a/slasher/src/lib.rs +++ b/slasher/src/lib.rs @@ -13,7 +13,7 @@ pub mod test_utils; mod utils; pub use crate::slasher::Slasher; -pub use attestation_queue::AttestationQueue; +pub use attestation_queue::{AttestationBatch, AttestationQueue}; pub use attester_record::AttesterRecord; pub use block_queue::BlockQueue; pub use config::Config; diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 4b307b8aeb2..fd3bf9cc0cf 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -1,6 +1,6 @@ use crate::{ - array, AttestationQueue, AttesterRecord, BlockQueue, Config, Error, ProposerSlashingStatus, - SlasherDB, + array, AttestationBatch, AttestationQueue, AttesterRecord, BlockQueue, Config, Error, + ProposerSlashingStatus, SlasherDB, }; use lmdb::{RwTransaction, Transaction}; use parking_lot::Mutex; @@ -30,7 +30,7 @@ impl Slasher { let db = SlasherDB::open(config.clone())?; let attester_slashings = Mutex::new(vec![]); let proposer_slashings = Mutex::new(vec![]); - let attestation_queue = AttestationQueue::new(config.validator_chunk_size); + let attestation_queue = AttestationQueue::new(); let block_queue = BlockQueue::new(); Ok(Self { db, @@ -106,15 +106,22 @@ impl Slasher { current_epoch: Epoch, txn: &mut RwTransaction<'_>, ) -> Result<(), Error> { - let snapshot = self.attestation_queue.get_snapshot(); + let snapshot = self.attestation_queue.dequeue(); + + // Filter attestations for relevance. + let (snapshot, deferred, num_dropped) = self.validate(snapshot, current_epoch); + let num_deferred = deferred.len(); + self.attestation_queue.requeue(deferred); // Insert attestations into database. debug!( self.log, - "Storing {} attestations in slasher DB", - snapshot.attestations_to_store.len() + "Storing attestations in slasher DB"; + "num_valid" => snapshot.len(), + "num_deferred" => num_deferred, + "num_dropped" => num_dropped, ); - for attestation in snapshot.attestations_to_store { + for attestation in snapshot.attestations.iter() { self.db.store_indexed_attestation( txn, attestation.1.indexed_attestation_hash, @@ -122,8 +129,9 @@ impl Slasher { )?; } - // Dequeue attestations in batches and process them. - for (subqueue_id, subqueue) in snapshot.subqueues.into_iter().enumerate() { + // Group attestations into batches and process them. + let grouped_attestations = snapshot.group_by_validator_index(&self.config); + for (subqueue_id, subqueue) in grouped_attestations.subqueues.into_iter().enumerate() { self.process_batch(txn, subqueue_id, subqueue.attestations, current_epoch)?; } Ok(()) @@ -233,6 +241,52 @@ impl Slasher { Ok(slashings) } + /// Validate the attestations in `batch` for ingestion during `current_epoch`. + /// + /// Drop any attestations that are too old to ever be relevant, and return any attestations + /// that might be valid in the future. + fn validate( + &self, + batch: AttestationBatch, + current_epoch: Epoch, + ) -> (AttestationBatch, AttestationBatch, usize) { + let mut keep = Vec::with_capacity(batch.len()); + let mut defer = vec![]; + let mut drop_count = 0; + + for tuple in batch.attestations.into_iter() { + let attestation = &tuple.0; + let target_epoch = attestation.data.target.epoch; + let source_epoch = attestation.data.source.epoch; + + // Check that the attestation doesn't span a distance greater than or equal to the + // history length, else it will cause wrap-around issues for us. + if source_epoch > target_epoch + || target_epoch - source_epoch >= self.config.history_length as u64 + { + drop_count += 1; + continue; + } + + // Check that the attestation's target epoch is acceptable, and defer it + // if it's not. + if target_epoch > current_epoch { + defer.push(tuple); + } else { + // Otherwise the attestation is OK to process. + keep.push(tuple); + } + } + + ( + AttestationBatch { attestations: keep }, + AttestationBatch { + attestations: defer, + }, + drop_count, + ) + } + pub fn prune_database(&self, current_epoch: Epoch) -> Result<(), Error> { self.db.prune(current_epoch) } diff --git a/slasher/src/slasher_server.rs b/slasher/src/slasher_server.rs index 4393882c32e..ebf431bdd09 100644 --- a/slasher/src/slasher_server.rs +++ b/slasher/src/slasher_server.rs @@ -48,8 +48,7 @@ impl SlasherServer { move || { while let Ok(current_epoch) = receiver.recv() { let t = Instant::now(); - let (num_validator_chunks, num_attestations) = - slasher.attestation_queue.stats(); + let num_attestations = slasher.attestation_queue.len(); let num_blocks = slasher.block_queue.len(); if let Err(e) = slasher.process_queued(current_epoch) { error!( @@ -63,7 +62,6 @@ impl SlasherServer { "Completed slasher update"; "time_taken" => format!("{}ms", t.elapsed().as_millis()), "num_attestations" => num_attestations, - "num_validator_chunks" => num_validator_chunks, "num_blocks" => num_blocks, ); } From 19f073d0a5306d6d9b726cbfdeb9ba1320b5b967 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 30 Oct 2020 17:41:09 +1100 Subject: [PATCH 20/34] Initial fuzzing tests --- slasher/Cargo.toml | 3 + slasher/src/array.rs | 49 ++++------------ slasher/src/database.rs | 18 ++---- slasher/src/slasher.rs | 15 ++++- slasher/tests/random.rs | 123 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 153 insertions(+), 55 deletions(-) create mode 100644 slasher/tests/random.rs diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index cd15d210096..7e18668c8f9 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -33,6 +33,9 @@ criterion = "0.3" rayon = "1.3.0" tempdir = "0.3.7" +[features] +test_logger = [] + [[bench]] name = "blake2b" harness = false diff --git a/slasher/src/array.rs b/slasher/src/array.rs index 062a7a221b0..7b9040bde42 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -1,7 +1,6 @@ use crate::{AttesterRecord, Config, Error, SlasherDB, SlashingStatus}; use flate2::bufread::{ZlibDecoder, ZlibEncoder}; use lmdb::{RwTransaction, Transaction}; -use safe_arith::SafeArith; use serde_derive::{Deserialize, Serialize}; use std::collections::{btree_map::Entry, BTreeMap}; use std::convert::TryFrom; @@ -138,11 +137,7 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn fn first_start_epoch(source_epoch: Epoch, current_epoch: Epoch) -> Option; - fn next_chunk_index_and_start_epoch( - chunk_index: usize, - start_epoch: Epoch, - config: &Config, - ) -> Result<(usize, Epoch), Error>; + fn next_start_epoch(start_epoch: Epoch, config: &Config) -> Epoch; fn select_db(db: &SlasherDB) -> lmdb::Database; @@ -256,14 +251,9 @@ impl TargetArrayChunk for MinTargetChunk { // We can stop. return Ok(false); } - if epoch == min_epoch { - return Ok(false); - } epoch -= 1; } - // Continue to the next chunk. - assert_ne!(chunk_index, 0); - Ok(true) + Ok(epoch > min_epoch) } fn first_start_epoch(source_epoch: Epoch, _current_epoch: Epoch) -> Option { @@ -274,16 +264,10 @@ impl TargetArrayChunk for MinTargetChunk { } } - fn next_chunk_index_and_start_epoch( - chunk_index: usize, - start_epoch: Epoch, - config: &Config, - ) -> Result<(usize, Epoch), Error> { + // Move to last epoch of previous chunk + fn next_start_epoch(start_epoch: Epoch, config: &Config) -> Epoch { let chunk_size = config.chunk_size as u64; - Ok(( - chunk_index.safe_sub(1)?, - start_epoch / chunk_size * chunk_size - 1, - )) + start_epoch / chunk_size * chunk_size - 1 } fn select_db(db: &SlasherDB) -> lmdb::Database { @@ -368,17 +352,10 @@ impl TargetArrayChunk for MaxTargetChunk { } } - // Go to next chunk, and first epoch of that chunk - fn next_chunk_index_and_start_epoch( - chunk_index: usize, - start_epoch: Epoch, - config: &Config, - ) -> Result<(usize, Epoch), Error> { + // Move to first epoch of next chunk + fn next_start_epoch(start_epoch: Epoch, config: &Config) -> Epoch { let chunk_size = config.chunk_size as u64; - Ok(( - chunk_index.safe_add(1)?, - (start_epoch / chunk_size + 1) * chunk_size, - )) + (start_epoch / chunk_size + 1) * chunk_size } fn select_db(db: &SlasherDB) -> lmdb::Database { @@ -445,9 +422,9 @@ pub fn apply_attestation_for_validator( } else { return Ok(slashing_status); }; - chunk_index = config.chunk_index(start_epoch); loop { + chunk_index = config.chunk_index(start_epoch); current_chunk = get_chunk_for_update( db, txn, @@ -467,11 +444,7 @@ pub fn apply_attestation_for_validator( if !keep_going { break; } - - let (next_chunk_index, next_start_epoch) = - T::next_chunk_index_and_start_epoch(chunk_index, start_epoch, config)?; - chunk_index = next_chunk_index; - start_epoch = next_start_epoch; + start_epoch = T::next_start_epoch(start_epoch, config); } Ok(SlashingStatus::NotSlashable) @@ -551,7 +524,7 @@ pub fn epoch_update_for_validator( )?; epoch += 1; } - chunk_index += 1; + chunk_index = config.chunk_index(epoch); } Ok(()) diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 14731df759f..72f5c179fb6 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -390,23 +390,18 @@ impl SlasherDB { let mut indexed_attestations_to_delete = HashSet::new(); loop { - println!("Iterating..."); - let (optional_key, value) = cursor.get(None, None, lmdb_sys::MDB_GET_CURRENT).unwrap(); + let (optional_key, value) = cursor.get(None, None, lmdb_sys::MDB_GET_CURRENT)?; let key_bytes = optional_key.ok_or_else(|| Error::MissingAttesterKey)?; - let (target_epoch, validator_index) = AttesterKey::parse(key_bytes)?; + let (target_epoch, _validator_index) = AttesterKey::parse(key_bytes)?; if target_epoch < min_epoch { // Stage the indexed attestation for deletion and delete the record itself. let attester_record = AttesterRecord::from_ssz_bytes(value)?; indexed_attestations_to_delete.insert(attester_record.indexed_attestation_hash); - println!( - "Deleting attestation for epoch {} from {}", - target_epoch, validator_index - ); + cursor.del(Self::write_flags())?; - cursor.del(Self::write_flags()).unwrap(); // FIXME(sproul): abstract this pattern match cursor.get(None, None, lmdb_sys::MDB_NEXT) { Ok(_) => (), @@ -420,12 +415,7 @@ impl SlasherDB { drop(cursor); for indexed_attestation_hash in indexed_attestations_to_delete { - println!( - "Deleting indexed attestation {:?}", - indexed_attestation_hash - ); - txn.del(self.indexed_attestation_db, &indexed_attestation_hash, None) - .expect("HELLO"); + txn.del(self.indexed_attestation_db, &indexed_attestation_hash, None)?; } Ok(()) diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index fd3bf9cc0cf..14066365e59 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -121,6 +121,12 @@ impl Slasher { "num_deferred" => num_deferred, "num_dropped" => num_dropped, ); + eprintln!( + "valid: {}, deferred: {}, dropped: {}", + snapshot.len(), + num_deferred, + num_dropped + ); for attestation in snapshot.attestations.iter() { self.db.store_indexed_attestation( txn, @@ -259,11 +265,10 @@ impl Slasher { let target_epoch = attestation.data.target.epoch; let source_epoch = attestation.data.source.epoch; - // Check that the attestation doesn't span a distance greater than or equal to the - // history length, else it will cause wrap-around issues for us. if source_epoch > target_epoch - || target_epoch - source_epoch >= self.config.history_length as u64 + || source_epoch <= current_epoch - self.config.history_length as u64 { + eprintln!("dropping {}=>{}", source_epoch, target_epoch); drop_count += 1; continue; } @@ -271,9 +276,11 @@ impl Slasher { // Check that the attestation's target epoch is acceptable, and defer it // if it's not. if target_epoch > current_epoch { + eprintln!("deferring {}=>{}", source_epoch, target_epoch); defer.push(tuple); } else { // Otherwise the attestation is OK to process. + eprintln!("processing {}=>{}", source_epoch, target_epoch); keep.push(tuple); } } @@ -287,6 +294,8 @@ impl Slasher { ) } + /// Must only be called after `process_queued(current_epoch)`. + // FIXME(sproul): consider checking this condition pub fn prune_database(&self, current_epoch: Epoch) -> Result<(), Error> { self.db.prune(current_epoch) } diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs new file mode 100644 index 00000000000..ebb41157c14 --- /dev/null +++ b/slasher/tests/random.rs @@ -0,0 +1,123 @@ +use rand::prelude::*; +use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng}; +use slasher::{ + test_utils::{indexed_att, logger, E}, + Config, Slasher, +}; +use std::cmp::max; +use tempdir::TempDir; +use types::Epoch; + +fn random_test(seed: u64, check_slashings: bool) { + let num_validators = 4_usize; + let max_attestations = 50; + + eprintln!("Running with seed {}", seed); + let mut rng = StdRng::seed_from_u64(seed); + + let tempdir = TempDir::new("slasher").unwrap(); + + let mut config = Config::new(tempdir.path().into()); + config.validator_chunk_size = 1 << rng.gen_range(1, 4); + + let chunk_size_exponent = rng.gen_range(1, 4); + config.chunk_size = 1 << chunk_size_exponent; + config.history_length = 1 << rng.gen_range(chunk_size_exponent, chunk_size_exponent + 3); + + eprintln!("History length: {}", config.history_length); + + let slasher = Slasher::::open(config.clone(), logger()).unwrap(); + + let validators = (0..num_validators as u64).collect::>(); + + let num_attestations = rng.gen_range(2, max_attestations + 1); + + let mut current_epoch = Epoch::new(0); + let mut attestations = vec![]; + + for _ in 0..num_attestations { + let num_attesters = rng.gen_range(1, num_validators); + let mut attesting_indices = validators + .choose_multiple(&mut rng, num_attesters) + .copied() + .collect::>(); + attesting_indices.sort(); + + // If checking slashings, generate valid attestations in range. + let (source, target) = if check_slashings { + let source = rng.gen_range( + current_epoch.as_u64() - config.history_length as u64 + 1, + current_epoch.as_u64() + 1, + ); + let target = rng.gen_range(source, current_epoch.as_u64() + 1); + (source, target) + } else { + let source = rng.gen_range(0, max(3 * current_epoch.as_u64(), 1)); + let target = rng.gen_range(source, max(3 * current_epoch.as_u64(), source + 1)); + (source, target) + }; + let target_root = rng.gen_range(0, 3); + let attestation = indexed_att(&attesting_indices, source, target, target_root); + + eprintln!( + "Attestation {}=>{} from {:?} for root {}", + source, target, attesting_indices, target_root + ); + + if check_slashings { + attestations.push(attestation.clone()); + } + + // Supply to slasher + slasher.accept_attestation(attestation); + + // Maybe process + if rng.gen_bool(0.1) { + eprintln!("Processing {}", current_epoch); + slasher.process_queued(current_epoch).unwrap(); + + // Maybe prune + if rng.gen_bool(0.1) { + eprintln!("Pruning at epoch {}", current_epoch); + slasher.prune_database(current_epoch).unwrap(); + } + } + + // Maybe advance to the next epoch + if rng.gen_bool(0.5) { + if check_slashings { + slasher.process_queued(current_epoch).unwrap(); + } + current_epoch += 1; + } + } + + if !check_slashings { + return; + } + + slasher.process_queued(current_epoch).unwrap(); + + let _slashings = slasher.get_attester_slashings(); +} + +#[test] +fn no_crash() { + let mut rng = thread_rng(); + loop { + random_test(rng.gen(), false); + } +} + +#[test] +fn check_slashings() { + let mut rng = thread_rng(); + loop { + random_test(rng.gen(), true); + } +} + +#[test] +fn problem() { + random_test(6950630541455174723, false); +} From f74346fd8985c7a2fb7f2deacb4b5a310cf15b35 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 2 Nov 2020 18:04:41 +1100 Subject: [PATCH 21/34] No moar crashes --- slasher/src/array.rs | 111 +++++++++++++++++++++++++++++++--------- slasher/src/database.rs | 41 ++++++++++++--- slasher/src/slasher.rs | 8 ++- slasher/tests/random.rs | 10 +++- 4 files changed, 135 insertions(+), 35 deletions(-) diff --git a/slasher/src/array.rs b/slasher/src/array.rs index 7b9040bde42..8981b7dd960 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -135,7 +135,11 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn config: &Config, ) -> Result; - fn first_start_epoch(source_epoch: Epoch, current_epoch: Epoch) -> Option; + fn first_start_epoch( + source_epoch: Epoch, + current_epoch: Epoch, + config: &Config, + ) -> Option; fn next_start_epoch(start_epoch: Epoch, config: &Config) -> Epoch; @@ -216,6 +220,11 @@ impl TargetArrayChunk for MinTargetChunk { self.chunk .get_target(validator_index, attestation.data.source.epoch, config)?; if attestation.data.target.epoch > min_target { + eprintln!("Min target chunk: {:?}", self); + eprintln!( + "Attestation: {}=>{}", + attestation.data.source.epoch, attestation.data.target.epoch + ); let attestation = db .get_attestation_for_validator(txn, validator_index, min_target)? .ok_or_else(|| Error::MissingAttesterRecord { @@ -243,8 +252,17 @@ impl TargetArrayChunk for MinTargetChunk { .saturating_sub(config.history_length - 1), ); let mut epoch = start_epoch; - while config.chunk_index(epoch) == chunk_index { + while config.chunk_index(epoch) == chunk_index && epoch >= min_epoch { if new_target_epoch < self.chunk.get_target(validator_index, epoch, config)? { + if validator_index == 0 + && Chunk::epoch_distance(new_target_epoch, epoch)? == 4 + && epoch.as_u64() % 8 == 1 + { + println!( + "SETTING DISTANCE FOR EPOCH {} TO {} AT {}", + epoch, new_target_epoch, current_epoch + ); + } self.chunk .set_target(validator_index, epoch, new_target_epoch, config)?; } else { @@ -256,8 +274,14 @@ impl TargetArrayChunk for MinTargetChunk { Ok(epoch > min_epoch) } - fn first_start_epoch(source_epoch: Epoch, _current_epoch: Epoch) -> Option { - if source_epoch > 0 { + // FIXME(sproul): fix modulo behaviour + fn first_start_epoch( + source_epoch: Epoch, + current_epoch: Epoch, + config: &Config, + ) -> Option { + if source_epoch > current_epoch - config.history_length as u64 { + assert_ne!(source_epoch, 0); Some(source_epoch - 1) } else { None @@ -307,6 +331,11 @@ impl TargetArrayChunk for MaxTargetChunk { self.chunk .get_target(validator_index, attestation.data.source.epoch, config)?; if attestation.data.target.epoch < max_target { + eprintln!("Max target chunk: {:?}", self); + eprintln!( + "Attestation: {}=>{}", + attestation.data.source.epoch, attestation.data.target.epoch + ); let attestation = db .get_attestation_for_validator(txn, validator_index, max_target)? .ok_or_else(|| Error::MissingAttesterRecord { @@ -329,7 +358,7 @@ impl TargetArrayChunk for MaxTargetChunk { config: &Config, ) -> Result { let mut epoch = start_epoch; - while config.chunk_index(epoch) == chunk_index { + while config.chunk_index(epoch) == chunk_index && epoch <= current_epoch { if new_target_epoch > self.chunk.get_target(validator_index, epoch, config)? { self.chunk .set_target(validator_index, epoch, new_target_epoch, config)?; @@ -344,7 +373,11 @@ impl TargetArrayChunk for MaxTargetChunk { Ok(epoch <= current_epoch) } - fn first_start_epoch(source_epoch: Epoch, current_epoch: Epoch) -> Option { + fn first_start_epoch( + source_epoch: Epoch, + current_epoch: Epoch, + _config: &Config, + ) -> Option { if source_epoch < current_epoch { Some(source_epoch + 1) } else { @@ -416,7 +449,7 @@ pub fn apply_attestation_for_validator( } let mut start_epoch = if let Some(start_epoch) = - T::first_start_epoch(attestation.data.source.epoch, current_epoch) + T::first_start_epoch(attestation.data.source.epoch, current_epoch, config) { start_epoch } else { @@ -485,6 +518,14 @@ pub fn update( current_epoch, config, )?); + + // Update all current epochs. + for validator_index in validator_chunk_index * config.validator_chunk_size + ..(validator_chunk_index + 1) * config.validator_chunk_size + { + db.update_current_epoch(validator_index as u64, current_epoch, txn)?; + } + Ok(slashings) } @@ -497,16 +538,29 @@ pub fn epoch_update_for_validator( current_epoch: Epoch, config: &Config, ) -> Result<(), Error> { - let previous_current_epoch = if let Some(epoch) = db.get_stored_current_epoch(txn)? { - epoch - } else { - return Ok(()); - }; + let previous_current_epoch = + if let Some(epoch) = db.get_stored_current_epoch(validator_index, txn)? { + epoch + } else { + return Ok(()); + }; + + let debug_index = 0; + + if validator_index == debug_index { + eprintln!( + "Doing epoch update for {} at {} from {}", + validator_index, current_epoch, previous_current_epoch + ); + } - let mut chunk_index = config.chunk_index(previous_current_epoch); let mut epoch = previous_current_epoch; while epoch <= current_epoch { + if validator_index == debug_index { + eprintln!("Starting iter with epoch {}", epoch); + } + let chunk_index = config.chunk_index(epoch); let current_chunk = get_chunk_for_update( db, txn, @@ -516,6 +570,13 @@ pub fn epoch_update_for_validator( config, )?; while config.chunk_index(epoch) == chunk_index && epoch <= current_epoch { + if validator_index == debug_index { + eprintln!( + "Setting distance at epoch {} to {}", + epoch, + T::neutral_element() + ); + } current_chunk.chunk().set_raw_distance( validator_index, epoch, @@ -524,7 +585,6 @@ pub fn epoch_update_for_validator( )?; epoch += 1; } - chunk_index = config.chunk_index(epoch); } Ok(()) @@ -542,20 +602,25 @@ pub fn update_array( // Map from chunk index to updated chunk at that index. let mut updated_chunks = BTreeMap::new(); + for validator_index in validator_chunk_index * config.validator_chunk_size + ..(validator_chunk_index + 1) * config.validator_chunk_size + { + epoch_update_for_validator( + db, + txn, + &mut updated_chunks, + validator_chunk_index, + validator_index as u64, + current_epoch, + config, + )?; + } + for attestations in chunk_attestations.values() { for attestation in attestations { for validator_index in config.attesting_validators_for_chunk(&attestation.0, validator_chunk_index) { - epoch_update_for_validator( - db, - txn, - &mut updated_chunks, - validator_chunk_index, - validator_index, - current_epoch, - config, - )?; let slashing_status = apply_attestation_for_validator::( db, txn, diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 72f5c179fb6..2fce2efaf70 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -12,18 +12,22 @@ use types::{ }; /// Map from `(target_epoch, validator_index)` to `AttesterRecord`. +// FIXME(sproul): rename to `attesters_DB` const ATTESTER_DB: &str = "attesters"; /// Map from `indexed_attestation_hash` to `IndexedAttestation`. const INDEXED_ATTESTATION_DB: &str = "indexed_attestations"; const MIN_TARGETS_DB: &str = "min_targets"; const MAX_TARGETS_DB: &str = "max_targets"; +/// Map from `validator_index` to the `current_epoch` stored for that validator's min and max +/// target arrays. +const CURRENT_EPOCHS_DB: &str = "current_epochs"; /// Map from `(slot, validator_index)` to `SignedBeaconBlockHeader`. const PROPOSER_DB: &str = "proposers"; /// Metadata about the slashing database itself. const METADATA_DB: &str = "metadata"; /// The number of DBs for LMDB to use (equal to the number of DBs defined above). -const LMDB_MAX_DBS: u32 = 6; +const LMDB_MAX_DBS: u32 = 7; /// The size of the in-memory map for LMDB (larger than the maximum size of the database). // FIXME(sproul): make this user configurable const LMDB_MAP_SIZE: usize = 256 * (1 << 30); // 256GiB @@ -31,8 +35,6 @@ const LMDB_MAP_SIZE: usize = 256 * (1 << 30); // 256GiB const ATTESTER_KEY_SIZE: usize = 16; const PROPOSER_KEY_SIZE: usize = 16; -const METADATA_CURRENT_EPOCH_KEY: &'static [u8] = &[0]; - #[derive(Debug)] pub struct SlasherDB { pub(crate) env: Environment, @@ -40,6 +42,7 @@ pub struct SlasherDB { pub(crate) attesters_db: Database, pub(crate) min_targets_db: Database, pub(crate) max_targets_db: Database, + pub(crate) current_epochs_db: Database, pub(crate) proposers_db: Database, pub(crate) metadata_db: Database, config: Arc, @@ -114,6 +117,25 @@ impl AsRef<[u8]> for ProposerKey { } } +/// Key containing a validator index +pub struct CurrentEpochKey { + validator_index: [u8; 8], +} + +impl CurrentEpochKey { + pub fn new(validator_index: u64) -> Self { + Self { + validator_index: validator_index.to_be_bytes(), + } + } +} + +impl AsRef<[u8]> for CurrentEpochKey { + fn as_ref(&self) -> &[u8] { + &self.validator_index + } +} + impl SlasherDB { pub fn open(config: Arc) -> Result { // TODO: open_with_permissions @@ -127,6 +149,7 @@ impl SlasherDB { let attesters_db = env.create_db(Some(ATTESTER_DB), Self::db_flags())?; let min_targets_db = env.create_db(Some(MIN_TARGETS_DB), Self::db_flags())?; let max_targets_db = env.create_db(Some(MAX_TARGETS_DB), Self::db_flags())?; + let current_epochs_db = env.create_db(Some(CURRENT_EPOCHS_DB), Self::db_flags())?; let proposers_db = env.create_db(Some(PROPOSER_DB), Self::db_flags())?; let metadata_db = env.create_db(Some(METADATA_DB), Self::db_flags())?; Ok(Self { @@ -135,6 +158,7 @@ impl SlasherDB { attesters_db, min_targets_db, max_targets_db, + current_epochs_db, proposers_db, metadata_db, config, @@ -157,10 +181,14 @@ impl SlasherDB { // FIXME(sproul): rename pub fn get_stored_current_epoch( &self, + validator_index: u64, txn: &mut RwTransaction<'_>, ) -> Result, Error> { Ok(txn - .get(self.metadata_db, &METADATA_CURRENT_EPOCH_KEY) + .get( + self.current_epochs_db, + &CurrentEpochKey::new(validator_index), + ) .optional()? .map(Epoch::from_ssz_bytes) .transpose()?) @@ -168,12 +196,13 @@ impl SlasherDB { pub fn update_current_epoch( &self, + validator_index: u64, current_epoch: Epoch, txn: &mut RwTransaction<'_>, ) -> Result<(), Error> { txn.put( - self.metadata_db, - &METADATA_CURRENT_EPOCH_KEY, + self.current_epochs_db, + &CurrentEpochKey::new(validator_index), ¤t_epoch.as_ssz_bytes(), Self::write_flags(), )?; diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 14066365e59..e68555e2a50 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -70,7 +70,6 @@ impl Slasher { let mut txn = self.db.begin_rw_txn()?; self.process_blocks(&mut txn)?; self.process_attestations(current_epoch, &mut txn)?; - self.db.update_current_epoch(current_epoch, &mut txn)?; txn.commit()?; Ok(()) } @@ -121,12 +120,14 @@ impl Slasher { "num_deferred" => num_deferred, "num_dropped" => num_dropped, ); + /* eprintln!( "valid: {}, deferred: {}, dropped: {}", snapshot.len(), num_deferred, num_dropped ); + */ for attestation in snapshot.attestations.iter() { self.db.store_indexed_attestation( txn, @@ -266,9 +267,8 @@ impl Slasher { let source_epoch = attestation.data.source.epoch; if source_epoch > target_epoch - || source_epoch <= current_epoch - self.config.history_length as u64 + || source_epoch + self.config.history_length as u64 <= current_epoch { - eprintln!("dropping {}=>{}", source_epoch, target_epoch); drop_count += 1; continue; } @@ -276,11 +276,9 @@ impl Slasher { // Check that the attestation's target epoch is acceptable, and defer it // if it's not. if target_epoch > current_epoch { - eprintln!("deferring {}=>{}", source_epoch, target_epoch); defer.push(tuple); } else { // Otherwise the attestation is OK to process. - eprintln!("processing {}=>{}", source_epoch, target_epoch); keep.push(tuple); } } diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index ebb41157c14..532a42eea16 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -20,10 +20,13 @@ fn random_test(seed: u64, check_slashings: bool) { let mut config = Config::new(tempdir.path().into()); config.validator_chunk_size = 1 << rng.gen_range(1, 4); + println!("Validator chunk size: {}", config.validator_chunk_size); + let chunk_size_exponent = rng.gen_range(1, 4); config.chunk_size = 1 << chunk_size_exponent; config.history_length = 1 << rng.gen_range(chunk_size_exponent, chunk_size_exponent + 3); + eprintln!("Chunk size: {}", config.chunk_size); eprintln!("History length: {}", config.history_length); let slasher = Slasher::::open(config.clone(), logger()).unwrap(); @@ -119,5 +122,10 @@ fn check_slashings() { #[test] fn problem() { - random_test(6950630541455174723, false); + random_test(2064946994010930548, false); +} + +#[test] +fn problem2() { + random_test(10684284558065464334, false); } From 7c55bf7351e6ea82401b0c067519925ca0f4f9f3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 4 Nov 2020 15:39:34 +1100 Subject: [PATCH 22/34] Seemingly stable --- slasher/src/array.rs | 92 +++++++++------------------ slasher/src/database.rs | 34 +++++----- slasher/src/lib.rs | 28 ++++++--- slasher/src/slasher.rs | 2 +- slasher/src/test_utils.rs | 2 +- slasher/tests/random.rs | 128 +++++++++++++++++++++++++++++++++----- 6 files changed, 183 insertions(+), 103 deletions(-) diff --git a/slasher/src/array.rs b/slasher/src/array.rs index 8981b7dd960..a043359d4d5 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -1,4 +1,4 @@ -use crate::{AttesterRecord, Config, Error, SlasherDB, SlashingStatus}; +use crate::{AttesterRecord, AttesterSlashingStatus, Config, Error, SlasherDB}; use flate2::bufread::{ZlibDecoder, ZlibEncoder}; use lmdb::{RwTransaction, Transaction}; use serde_derive::{Deserialize, Serialize}; @@ -123,7 +123,7 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn validator_index: u64, attestation: &IndexedAttestation, config: &Config, - ) -> Result, Error>; + ) -> Result, Error>; fn update( &mut self, @@ -215,25 +215,23 @@ impl TargetArrayChunk for MinTargetChunk { validator_index: u64, attestation: &IndexedAttestation, config: &Config, - ) -> Result, Error> { + ) -> Result, Error> { let min_target = self.chunk .get_target(validator_index, attestation.data.source.epoch, config)?; if attestation.data.target.epoch > min_target { - eprintln!("Min target chunk: {:?}", self); - eprintln!( - "Attestation: {}=>{}", - attestation.data.source.epoch, attestation.data.target.epoch - ); - let attestation = db - .get_attestation_for_validator(txn, validator_index, min_target)? - .ok_or_else(|| Error::MissingAttesterRecord { - validator_index, - target_epoch: min_target, - })?; - Ok(SlashingStatus::SurroundsExisting(Box::new(attestation))) + let existing_attestation = + db.get_attestation_for_validator(txn, validator_index, min_target)?; + + if attestation.data.source.epoch < existing_attestation.data.source.epoch { + Ok(AttesterSlashingStatus::SurroundsExisting(Box::new( + existing_attestation, + ))) + } else { + Ok(AttesterSlashingStatus::AlreadyDoubleVoted) + } } else { - Ok(SlashingStatus::NotSlashable) + Ok(AttesterSlashingStatus::NotSlashable) } } @@ -254,15 +252,6 @@ impl TargetArrayChunk for MinTargetChunk { let mut epoch = start_epoch; while config.chunk_index(epoch) == chunk_index && epoch >= min_epoch { if new_target_epoch < self.chunk.get_target(validator_index, epoch, config)? { - if validator_index == 0 - && Chunk::epoch_distance(new_target_epoch, epoch)? == 4 - && epoch.as_u64() % 8 == 1 - { - println!( - "SETTING DISTANCE FOR EPOCH {} TO {} AT {}", - epoch, new_target_epoch, current_epoch - ); - } self.chunk .set_target(validator_index, epoch, new_target_epoch, config)?; } else { @@ -271,7 +260,7 @@ impl TargetArrayChunk for MinTargetChunk { } epoch -= 1; } - Ok(epoch > min_epoch) + Ok(epoch >= min_epoch) } // FIXME(sproul): fix modulo behaviour @@ -326,25 +315,23 @@ impl TargetArrayChunk for MaxTargetChunk { validator_index: u64, attestation: &IndexedAttestation, config: &Config, - ) -> Result, Error> { + ) -> Result, Error> { let max_target = self.chunk .get_target(validator_index, attestation.data.source.epoch, config)?; if attestation.data.target.epoch < max_target { - eprintln!("Max target chunk: {:?}", self); - eprintln!( - "Attestation: {}=>{}", - attestation.data.source.epoch, attestation.data.target.epoch - ); - let attestation = db - .get_attestation_for_validator(txn, validator_index, max_target)? - .ok_or_else(|| Error::MissingAttesterRecord { - validator_index, - target_epoch: max_target, - })?; - Ok(SlashingStatus::SurroundedByExisting(Box::new(attestation))) + let existing_attestation = + db.get_attestation_for_validator(txn, validator_index, max_target)?; + + if existing_attestation.data.source.epoch < attestation.data.source.epoch { + Ok(AttesterSlashingStatus::SurroundedByExisting(Box::new( + existing_attestation, + ))) + } else { + Ok(AttesterSlashingStatus::AlreadyDoubleVoted) + } } else { - Ok(SlashingStatus::NotSlashable) + Ok(AttesterSlashingStatus::NotSlashable) } } @@ -429,7 +416,7 @@ pub fn apply_attestation_for_validator( attestation: &IndexedAttestation, current_epoch: Epoch, config: &Config, -) -> Result, Error> { +) -> Result, Error> { let mut chunk_index = config.chunk_index(attestation.data.source.epoch); let mut current_chunk = get_chunk_for_update( db, @@ -444,7 +431,7 @@ pub fn apply_attestation_for_validator( current_chunk.check_slashable(db, txn, validator_index, attestation, config)?; // TODO: consider removing this early return and updating the array - if slashing_status != SlashingStatus::NotSlashable { + if slashing_status != AttesterSlashingStatus::NotSlashable { return Ok(slashing_status); } @@ -480,7 +467,7 @@ pub fn apply_attestation_for_validator( start_epoch = T::next_start_epoch(start_epoch, config); } - Ok(SlashingStatus::NotSlashable) + Ok(AttesterSlashingStatus::NotSlashable) } pub fn update( @@ -545,21 +532,9 @@ pub fn epoch_update_for_validator( return Ok(()); }; - let debug_index = 0; - - if validator_index == debug_index { - eprintln!( - "Doing epoch update for {} at {} from {}", - validator_index, current_epoch, previous_current_epoch - ); - } - let mut epoch = previous_current_epoch; while epoch <= current_epoch { - if validator_index == debug_index { - eprintln!("Starting iter with epoch {}", epoch); - } let chunk_index = config.chunk_index(epoch); let current_chunk = get_chunk_for_update( db, @@ -570,13 +545,6 @@ pub fn epoch_update_for_validator( config, )?; while config.chunk_index(epoch) == chunk_index && epoch <= current_epoch { - if validator_index == debug_index { - eprintln!( - "Setting distance at epoch {} to {}", - epoch, - T::neutral_element() - ); - } current_chunk.chunk().set_raw_distance( validator_index, epoch, diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 2fce2efaf70..f7bbaf87e6f 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -1,5 +1,6 @@ use crate::{ - utils::TxnOptional, AttesterRecord, Config, Error, ProposerSlashingStatus, SlashingStatus, + utils::TxnOptional, AttesterRecord, AttesterSlashingStatus, Config, Error, + ProposerSlashingStatus, }; use byteorder::{BigEndian, ByteOrder}; use lmdb::{Cursor, Database, DatabaseFlags, Environment, RwTransaction, Transaction, WriteFlags}; @@ -246,7 +247,7 @@ impl SlasherDB { validator_index: u64, attestation: &IndexedAttestation, record: AttesterRecord, - ) -> Result, Error> { + ) -> Result, Error> { // See if there's an existing attestation for this attester. if let Some(existing_record) = self.get_attester_record(txn, validator_index, attestation.data.target.epoch)? @@ -254,17 +255,19 @@ impl SlasherDB { // If the existing attestation data is identical, then this attestation is not // slashable and no update is required. if existing_record.attestation_data_hash == record.attestation_data_hash { - return Ok(SlashingStatus::NotSlashable); + return Ok(AttesterSlashingStatus::NotSlashable); } // Otherwise, load the indexed attestation so we can confirm that it's slashable. let existing_attestation = self.get_indexed_attestation(txn, existing_record.indexed_attestation_hash)?; if attestation.is_double_vote(&existing_attestation) { - Ok(SlashingStatus::DoubleVote(Box::new(existing_attestation))) + Ok(AttesterSlashingStatus::DoubleVote(Box::new( + existing_attestation, + ))) } else { // FIXME(sproul): this could be an Err - Ok(SlashingStatus::NotSlashable) + Ok(AttesterSlashingStatus::NotSlashable) } } // If no attestation exists, insert a record for this validator. @@ -275,7 +278,7 @@ impl SlasherDB { &record.as_ssz_bytes(), Self::write_flags(), )?; - Ok(SlashingStatus::NotSlashable) + Ok(AttesterSlashingStatus::NotSlashable) } } @@ -283,16 +286,15 @@ impl SlasherDB { &self, txn: &mut RwTransaction<'_>, validator_index: u64, - target: Epoch, - ) -> Result>, Error> { - if let Some(record) = self.get_attester_record(txn, validator_index, target)? { - Ok(Some(self.get_indexed_attestation( - txn, - record.indexed_attestation_hash, - )?)) - } else { - Ok(None) - } + target_epoch: Epoch, + ) -> Result, Error> { + let record = self + .get_attester_record(txn, validator_index, target_epoch)? + .ok_or_else(|| Error::MissingAttesterRecord { + validator_index, + target_epoch, + })?; + self.get_indexed_attestation(txn, record.indexed_attestation_hash) } pub fn get_attester_record( diff --git a/slasher/src/lib.rs b/slasher/src/lib.rs index 375c16b13b9..a1886e5646d 100644 --- a/slasher/src/lib.rs +++ b/slasher/src/lib.rs @@ -23,10 +23,13 @@ pub use slasher_server::SlasherServer; use types::{AttesterSlashing, EthSpec, IndexedAttestation, ProposerSlashing}; -// FIXME(sproul): rename #[derive(Debug, PartialEq)] -pub enum SlashingStatus { +pub enum AttesterSlashingStatus { NotSlashable, + /// A weird outcome that can occur when we go to lookup an attestation by its target + /// epoch for a surround slashing, but find a different attestation -- indicating that + /// the validator has already been caught double voting. + AlreadyDoubleVoted, DoubleVote(Box>), SurroundsExisting(Box>), SurroundedByExisting(Box>), @@ -38,21 +41,28 @@ pub enum ProposerSlashingStatus { DoubleVote(Box), } -impl SlashingStatus { +impl AttesterSlashingStatus { pub fn into_slashing( self, new_attestation: &IndexedAttestation, ) -> Option> { - use SlashingStatus::*; + use AttesterSlashingStatus::*; + // The surrounding attestation must be in `attestation_1` to be valid. match self { NotSlashable => None, - DoubleVote(existing) | SurroundsExisting(existing) | SurroundedByExisting(existing) => { - Some(AttesterSlashing { - attestation_1: *existing, - attestation_2: new_attestation.clone(), - }) + AlreadyDoubleVoted => { + // println!("Already double voted!"); + None } + DoubleVote(existing) | SurroundedByExisting(existing) => Some(AttesterSlashing { + attestation_1: *existing, + attestation_2: new_attestation.clone(), + }), + SurroundsExisting(existing) => Some(AttesterSlashing { + attestation_1: new_attestation.clone(), + attestation_2: *existing, + }), } } } diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index e68555e2a50..c2265e8fe0e 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -113,7 +113,7 @@ impl Slasher { self.attestation_queue.requeue(deferred); // Insert attestations into database. - debug!( + info!( self.log, "Storing attestations in slasher DB"; "num_valid" => snapshot.len(), diff --git a/slasher/src/test_utils.rs b/slasher/src/test_utils.rs index 256710fc94a..970642f7830 100644 --- a/slasher/src/test_utils.rs +++ b/slasher/src/test_utils.rs @@ -10,7 +10,7 @@ pub type E = MainnetEthSpec; pub fn logger() -> Logger { if cfg!(feature = "test_logger") { sloggers::terminal::TerminalLoggerBuilder::new() - .level(sloggers::types::Severity::Debug) + .level(sloggers::types::Severity::Trace) .build() .unwrap() } else { diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index 532a42eea16..a4e2432d2c7 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -5,14 +5,34 @@ use slasher::{ Config, Slasher, }; use std::cmp::max; +use std::collections::HashSet; +use std::iter::FromIterator; use tempdir::TempDir; -use types::Epoch; +use types::{AttesterSlashing, Epoch, IndexedAttestation}; -fn random_test(seed: u64, check_slashings: bool) { - let num_validators = 4_usize; - let max_attestations = 50; +#[derive(Debug)] +struct TestConfig { + num_validators: usize, + max_attestations: usize, + check_slashings: bool, +} + +impl Default for TestConfig { + fn default() -> Self { + Self { + num_validators: 4, + max_attestations: 50, + check_slashings: false, + } + } +} + +fn random_test(seed: u64, test_config: TestConfig) { + let check_slashings = test_config.check_slashings; + let num_validators = test_config.num_validators; + let max_attestations = test_config.max_attestations; - eprintln!("Running with seed {}", seed); + println!("Running with seed {}", seed); let mut rng = StdRng::seed_from_u64(seed); let tempdir = TempDir::new("slasher").unwrap(); @@ -20,7 +40,7 @@ fn random_test(seed: u64, check_slashings: bool) { let mut config = Config::new(tempdir.path().into()); config.validator_chunk_size = 1 << rng.gen_range(1, 4); - println!("Validator chunk size: {}", config.validator_chunk_size); + eprintln!("Validator chunk size: {}", config.validator_chunk_size); let chunk_size_exponent = rng.gen_range(1, 4); config.chunk_size = 1 << chunk_size_exponent; @@ -49,7 +69,9 @@ fn random_test(seed: u64, check_slashings: bool) { // If checking slashings, generate valid attestations in range. let (source, target) = if check_slashings { let source = rng.gen_range( - current_epoch.as_u64() - config.history_length as u64 + 1, + current_epoch + .as_u64() + .saturating_sub(config.history_length as u64 - 1), current_epoch.as_u64() + 1, ); let target = rng.gen_range(source, current_epoch.as_u64() + 1); @@ -101,14 +123,62 @@ fn random_test(seed: u64, check_slashings: bool) { slasher.process_queued(current_epoch).unwrap(); - let _slashings = slasher.get_attester_slashings(); + let slashings = slasher.get_attester_slashings(); + + let slashed_validators = slashed_validators_from_slashings(&slashings); + let expected_slashed_validators = slashed_validators_from_attestations(&attestations); + assert_eq!(slashed_validators, expected_slashed_validators); +} + +fn hashset_intersection( + attestation_1_indices: &[u64], + attestation_2_indices: &[u64], +) -> HashSet { + &HashSet::from_iter(attestation_1_indices.iter().copied()) + & &HashSet::from_iter(attestation_2_indices.iter().copied()) +} + +fn slashed_validators_from_slashings(slashings: &[AttesterSlashing]) -> HashSet { + slashings + .iter() + .flat_map(|slashing| { + let att1 = &slashing.attestation_1; + let att2 = &slashing.attestation_2; + assert!( + att1.is_double_vote(att2) || att1.is_surround_vote(att2), + "invalid slashing: {:#?}", + slashing + ); + hashset_intersection(&att1.attesting_indices, &att2.attesting_indices) + }) + .collect() +} + +fn slashed_validators_from_attestations(attestations: &[IndexedAttestation]) -> HashSet { + let mut slashed_validators = HashSet::new(); + // O(n^2) code, watch out. + for att1 in attestations { + for att2 in attestations { + if att1 == att2 { + continue; + } + + if att1.is_double_vote(att2) || att1.is_surround_vote(att2) { + slashed_validators.extend(hashset_intersection( + &att1.attesting_indices, + &att2.attesting_indices, + )); + } + } + } + slashed_validators } #[test] fn no_crash() { let mut rng = thread_rng(); loop { - random_test(rng.gen(), false); + random_test(rng.gen(), TestConfig::default()); } } @@ -116,16 +186,46 @@ fn no_crash() { fn check_slashings() { let mut rng = thread_rng(); loop { - random_test(rng.gen(), true); + random_test( + rng.gen(), + TestConfig { + check_slashings: true, + ..TestConfig::default() + }, + ); } } #[test] -fn problem() { - random_test(2064946994010930548, false); +fn problema() { + random_test( + 17417858527589321514, + TestConfig { + check_slashings: true, + ..TestConfig::default() + }, + ); +} + +#[test] +fn slash_out_of_order() { + random_test( + 3534213164912297730, + TestConfig { + check_slashings: true, + max_attestations: 3, + ..TestConfig::default() + }, + ); } #[test] -fn problem2() { - random_test(10684284558065464334, false); +fn ooft() { + random_test( + 16346384169145986037, + TestConfig { + check_slashings: true, + ..TestConfig::default() + }, + ); } From b697dac015d80880d22dbae7eda0936cb2a24814 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 5 Nov 2020 10:57:02 +1100 Subject: [PATCH 23/34] Resolve more fixmes --- Cargo.lock | 1 + consensus/types/src/beacon_block_header.rs | 4 +- consensus/types/src/proposer_slashing.rs | 4 +- .../types/src/signed_beacon_block_header.rs | 18 ++++- crypto/bls/src/generic_signature.rs | 2 +- slasher/Cargo.toml | 1 + slasher/src/array.rs | 17 +++-- slasher/src/database.rs | 45 +++++------ slasher/src/error.rs | 1 + slasher/src/slasher.rs | 18 ++--- slasher/tests/attester_slashings.rs | 74 +++++++------------ slasher/tests/random.rs | 4 +- 12 files changed, 91 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4223f68f2ea..72eb5c33677 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5334,6 +5334,7 @@ dependencies = [ "flate2", "lmdb", "lmdb-sys", + "maplit", "parking_lot 0.11.0", "rand 0.7.3", "rayon", diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index 708c0e16fe7..82222b03589 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -11,7 +11,9 @@ use tree_hash_derive::TreeHash; /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct BeaconBlockHeader { pub slot: Slot, #[serde(with = "serde_utils::quoted_u64")] diff --git a/consensus/types/src/proposer_slashing.rs b/consensus/types/src/proposer_slashing.rs index 0055a04ce8a..ff12b0611a2 100644 --- a/consensus/types/src/proposer_slashing.rs +++ b/consensus/types/src/proposer_slashing.rs @@ -10,7 +10,9 @@ use tree_hash_derive::TreeHash; /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct ProposerSlashing { pub signed_header_1: SignedBeaconBlockHeader, pub signed_header_2: SignedBeaconBlockHeader, diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index 0a6411bae35..0e4f9fb0969 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -2,8 +2,11 @@ use crate::{ test_utils::TestRandom, BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, Signature, SignedRoot, }; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; +use ssz::Encode; use ssz_derive::{Decode, Encode}; +use std::hash::{Hash, Hasher}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -11,12 +14,25 @@ use tree_hash_derive::TreeHash; /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive(Derivative, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derivative(PartialEq, Eq)] pub struct SignedBeaconBlockHeader { pub message: BeaconBlockHeader, pub signature: Signature, } +/// Implementation of non-crypto-secure `Hash`, for use with `HashMap` and `HashSet`. +/// +/// Guarantees `header1 == header2 -> hash(header1) == hash(header2)`. +/// +/// Used in the operation pool. +impl Hash for SignedBeaconBlockHeader { + fn hash(&self, state: &mut H) { + self.message.hash(state); + self.signature.as_ssz_bytes().hash(state); + } +} + impl SignedBeaconBlockHeader { pub fn verify_signature( &self, diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index 44250d4a6ba..a041d7c18ca 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -44,7 +44,7 @@ pub trait TSignature: Sized + Clone { /// /// Provides generic functionality whilst deferring all serious cryptographic operations to the /// generics. -#[derive(Clone, PartialEq)] +#[derive(Clone, PartialEq, Eq)] pub struct GenericSignature { /// The underlying point which performs *actual* cryptographic operations. point: Option, diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 7e18668c8f9..4dd2ee76401 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -30,6 +30,7 @@ types = { path = "../consensus/types" } [dev-dependencies] criterion = "0.3" +maplit = "1.0.2" rayon = "1.3.0" tempdir = "0.3.7" diff --git a/slasher/src/array.rs b/slasher/src/array.rs index a043359d4d5..6aaae90de94 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -2,9 +2,10 @@ use crate::{AttesterRecord, AttesterSlashingStatus, Config, Error, SlasherDB}; use flate2::bufread::{ZlibDecoder, ZlibEncoder}; use lmdb::{RwTransaction, Transaction}; use serde_derive::{Deserialize, Serialize}; -use std::collections::{btree_map::Entry, BTreeMap}; +use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::convert::TryFrom; use std::io::Read; +use std::iter::Extend; use std::sync::Arc; use types::{AttesterSlashing, Epoch, EthSpec, IndexedAttestation}; @@ -263,7 +264,6 @@ impl TargetArrayChunk for MinTargetChunk { Ok(epoch >= min_epoch) } - // FIXME(sproul): fix modulo behaviour fn first_start_epoch( source_epoch: Epoch, current_epoch: Epoch, @@ -477,7 +477,7 @@ pub fn update( batch: Vec, AttesterRecord)>>, current_epoch: Epoch, config: &Config, -) -> Result>, Error> { +) -> Result>, Error> { // Split the batch up into horizontal segments. // Map chunk indexes in the range `0..self.config.chunk_size` to attestations // for those chunks. @@ -507,10 +507,11 @@ pub fn update( )?); // Update all current epochs. + // FIXME(sproul): abstract for validator_index in validator_chunk_index * config.validator_chunk_size ..(validator_chunk_index + 1) * config.validator_chunk_size { - db.update_current_epoch(validator_index as u64, current_epoch, txn)?; + db.update_current_epoch_for_validator(validator_index as u64, current_epoch, txn)?; } Ok(slashings) @@ -526,7 +527,7 @@ pub fn epoch_update_for_validator( config: &Config, ) -> Result<(), Error> { let previous_current_epoch = - if let Some(epoch) = db.get_stored_current_epoch(validator_index, txn)? { + if let Some(epoch) = db.get_current_epoch_for_validator(validator_index, txn)? { epoch } else { return Ok(()); @@ -565,8 +566,8 @@ pub fn update_array( chunk_attestations: &BTreeMap, AttesterRecord)>>>, current_epoch: Epoch, config: &Config, -) -> Result>, Error> { - let mut slashings = vec![]; +) -> Result>, Error> { + let mut slashings = HashSet::new(); // Map from chunk index to updated chunk at that index. let mut updated_chunks = BTreeMap::new(); @@ -600,7 +601,7 @@ pub fn update_array( config, )?; if let Some(slashing) = slashing_status.into_slashing(&attestation.0) { - slashings.push(slashing); + slashings.insert(slashing); } } } diff --git a/slasher/src/database.rs b/slasher/src/database.rs index f7bbaf87e6f..c005b690857 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -13,8 +13,7 @@ use types::{ }; /// Map from `(target_epoch, validator_index)` to `AttesterRecord`. -// FIXME(sproul): rename to `attesters_DB` -const ATTESTER_DB: &str = "attesters"; +const ATTESTERS_DB: &str = "attesters"; /// Map from `indexed_attestation_hash` to `IndexedAttestation`. const INDEXED_ATTESTATION_DB: &str = "indexed_attestations"; const MIN_TARGETS_DB: &str = "min_targets"; @@ -147,7 +146,7 @@ impl SlasherDB { .open(&config.database_path)?; let indexed_attestation_db = env.create_db(Some(INDEXED_ATTESTATION_DB), Self::db_flags())?; - let attesters_db = env.create_db(Some(ATTESTER_DB), Self::db_flags())?; + let attesters_db = env.create_db(Some(ATTESTERS_DB), Self::db_flags())?; let min_targets_db = env.create_db(Some(MIN_TARGETS_DB), Self::db_flags())?; let max_targets_db = env.create_db(Some(MAX_TARGETS_DB), Self::db_flags())?; let current_epochs_db = env.create_db(Some(CURRENT_EPOCHS_DB), Self::db_flags())?; @@ -179,8 +178,7 @@ impl SlasherDB { Ok(self.env.begin_rw_txn()?) } - // FIXME(sproul): rename - pub fn get_stored_current_epoch( + pub fn get_current_epoch_for_validator( &self, validator_index: u64, txn: &mut RwTransaction<'_>, @@ -195,7 +193,7 @@ impl SlasherDB { .transpose()?) } - pub fn update_current_epoch( + pub fn update_current_epoch_for_validator( &self, validator_index: u64, current_epoch: Epoch, @@ -266,8 +264,7 @@ impl SlasherDB { existing_attestation, ))) } else { - // FIXME(sproul): this could be an Err - Ok(AttesterSlashingStatus::NotSlashable) + Err(Error::AttesterRecordInconsistentRoot) } } // If no attestation exists, insert a record for this validator. @@ -412,10 +409,12 @@ impl SlasherDB { let mut cursor = txn.open_rw_cursor(self.attesters_db)?; // Position cursor at first key, bailing out if the database is empty. - match cursor.get(None, None, lmdb_sys::MDB_FIRST) { - Ok(_) => (), - Err(lmdb::Error::NotFound) => return Ok(()), - Err(e) => return Err(e.into()), + if cursor + .get(None, None, lmdb_sys::MDB_FIRST) + .optional()? + .is_none() + { + return Ok(()); } let mut indexed_attestations_to_delete = HashSet::new(); @@ -433,11 +432,13 @@ impl SlasherDB { cursor.del(Self::write_flags())?; - // FIXME(sproul): abstract this pattern - match cursor.get(None, None, lmdb_sys::MDB_NEXT) { - Ok(_) => (), - Err(lmdb::Error::NotFound) => break, - Err(e) => return Err(e.into()), + // End the loop if there is no next entry. + if cursor + .get(None, None, lmdb_sys::MDB_NEXT) + .optional()? + .is_none() + { + break; } } else { break; @@ -452,13 +453,3 @@ impl SlasherDB { Ok(()) } } - -// FIXME(sproul): consider using this to avoid allocations -#[allow(unused)] -fn hash256_from_slice(data: &[u8]) -> Result { - if data.len() == 32 { - Ok(Hash256::from_slice(data)) - } else { - Err(Error::AttesterRecordCorrupt { length: data.len() }) - } -} diff --git a/slasher/src/error.rs b/slasher/src/error.rs index 352ea668e26..e123b571e91 100644 --- a/slasher/src/error.rs +++ b/slasher/src/error.rs @@ -34,6 +34,7 @@ pub enum Error { }, MissingAttesterKey, MissingProposerKey, + AttesterRecordInconsistentRoot, } impl From for Error { diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index c2265e8fe0e..fc7cbab106b 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -5,6 +5,7 @@ use crate::{ use lmdb::{RwTransaction, Transaction}; use parking_lot::Mutex; use slog::{debug, error, info, Logger}; +use std::collections::HashSet; use std::sync::Arc; use types::{ AttesterSlashing, Epoch, EthSpec, IndexedAttestation, ProposerSlashing, SignedBeaconBlockHeader, @@ -15,9 +16,8 @@ pub struct Slasher { db: SlasherDB, pub(crate) attestation_queue: AttestationQueue, pub(crate) block_queue: BlockQueue, - // TODO: consider using a set - attester_slashings: Mutex>>, - proposer_slashings: Mutex>, + attester_slashings: Mutex>>, + proposer_slashings: Mutex>, // TODO: consider removing Arc config: Arc, pub(crate) log: Logger, @@ -28,8 +28,8 @@ impl Slasher { config.validate()?; let config = Arc::new(config); let db = SlasherDB::open(config.clone())?; - let attester_slashings = Mutex::new(vec![]); - let proposer_slashings = Mutex::new(vec![]); + let attester_slashings = Mutex::new(HashSet::new()); + let proposer_slashings = Mutex::new(HashSet::new()); let attestation_queue = AttestationQueue::new(); let block_queue = BlockQueue::new(); Ok(Self { @@ -43,12 +43,12 @@ impl Slasher { }) } - pub fn get_attester_slashings(&self) -> Vec> { - std::mem::replace(&mut self.attester_slashings.lock(), vec![]) + pub fn get_attester_slashings(&self) -> HashSet> { + std::mem::replace(&mut self.attester_slashings.lock(), HashSet::new()) } - pub fn get_proposer_slashings(&self) -> Vec { - std::mem::replace(&mut self.proposer_slashings.lock(), vec![]) + pub fn get_proposer_slashings(&self) -> HashSet { + std::mem::replace(&mut self.proposer_slashings.lock(), HashSet::new()) } pub fn config(&self) -> &Config { diff --git a/slasher/tests/attester_slashings.rs b/slasher/tests/attester_slashings.rs index 185b36b3f5a..d95c9f1e26d 100644 --- a/slasher/tests/attester_slashings.rs +++ b/slasher/tests/attester_slashings.rs @@ -1,9 +1,11 @@ +use maplit::hashset; use rayon::prelude::*; use slasher::{ config::DEFAULT_CHUNK_SIZE, test_utils::{att_slashing, indexed_att, logger, E}, Config, Slasher, }; +use std::collections::HashSet; use tempdir::TempDir; use types::{AttesterSlashing, Epoch, IndexedAttestation}; @@ -12,7 +14,7 @@ fn double_vote_single_val() { let v = vec![99]; let att1 = indexed_att(&v, 0, 1, 0); let att2 = indexed_att(&v, 0, 1, 1); - let slashings = vec![att_slashing(&att1, &att2)]; + let slashings = hashset![att_slashing(&att1, &att2)]; let attestations = vec![att1, att2]; slasher_test_indiv(&attestations, &slashings, 1); slasher_test_indiv(&attestations, &slashings, 1000); @@ -23,7 +25,7 @@ fn double_vote_multi_vals() { let v = vec![0, 1, 2]; let att1 = indexed_att(&v, 0, 1, 0); let att2 = indexed_att(&v, 0, 1, 1); - let slashings = vec![att_slashing(&att1, &att2)]; + let slashings = hashset![att_slashing(&att1, &att2)]; let attestations = vec![att1, att2]; slasher_test_indiv(&attestations, &slashings, 1); slasher_test_indiv(&attestations, &slashings, 1000); @@ -36,7 +38,7 @@ fn double_vote_some_vals() { let v2 = vec![0, 2, 4, 6]; let att1 = indexed_att(&v1, 0, 1, 0); let att2 = indexed_att(&v2, 0, 1, 1); - let slashings = vec![att_slashing(&att1, &att2)]; + let slashings = hashset![att_slashing(&att1, &att2)]; let attestations = vec![att1, att2]; slasher_test_indiv(&attestations, &slashings, 1); slasher_test_indiv(&attestations, &slashings, 1000); @@ -51,7 +53,7 @@ fn double_vote_some_vals_repeat() { let att1 = indexed_att(&v1, 0, 1, 0); let att2 = indexed_att(&v2, 0, 1, 1); let att3 = indexed_att(&v3, 0, 1, 0); - let slashings = vec![att_slashing(&att1, &att2)]; + let slashings = hashset![att_slashing(&att1, &att2)]; let attestations = vec![att1, att2, att3]; slasher_test_indiv(&attestations, &slashings, 1); slasher_test_indiv(&attestations, &slashings, 1000); @@ -65,8 +67,8 @@ fn no_double_vote_same_target() { let att1 = indexed_att(&v1, 0, 1, 0); let att2 = indexed_att(&v2, 0, 1, 0); let attestations = vec![att1, att2]; - slasher_test_indiv(&attestations, &[], 1); - slasher_test_indiv(&attestations, &[], 1000); + slasher_test_indiv(&attestations, &hashset! {}, 1); + slasher_test_indiv(&attestations, &hashset! {}, 1000); } // Two groups votes for different things, no slashings. @@ -77,8 +79,8 @@ fn no_double_vote_distinct_vals() { let att1 = indexed_att(&v1, 0, 1, 0); let att2 = indexed_att(&v2, 0, 1, 1); let attestations = vec![att1, att2]; - slasher_test_indiv(&attestations, &[], 1); - slasher_test_indiv(&attestations, &[], 1000); + slasher_test_indiv(&attestations, &hashset! {}, 1); + slasher_test_indiv(&attestations, &hashset! {}, 1000); } #[test] @@ -87,8 +89,8 @@ fn no_double_vote_repeated() { let att1 = indexed_att(&v, 0, 1, 0); let att2 = att1.clone(); let attestations = vec![att1, att2]; - slasher_test_indiv(&attestations, &[], 1); - slasher_test_batch(&attestations, &[], 1); + slasher_test_indiv(&attestations, &hashset! {}, 1); + slasher_test_batch(&attestations, &hashset! {}, 1); parallel_slasher_test(&attestations, vec![], 1); } @@ -97,47 +99,36 @@ fn surrounds_existing_single_val_single_chunk() { let v = vec![0]; let att1 = indexed_att(&v, 1, 2, 0); let att2 = indexed_att(&v, 0, 3, 0); - let slashings = vec![att_slashing(&att1, &att2)]; + let slashings = hashset![att_slashing(&att2, &att1)]; slasher_test_indiv(&[att1, att2], &slashings, 3); } -/* FIXME: refactor these tests #[test] fn surrounds_existing_multi_vals_single_chunk() { - let v = vec![0]; - let att1 = indexed_att(&v, 1, 2, 0); - let att2 = indexed_att(&v, 0, 3, 0); - let slashings = vec![att_slashing(&att1, &att2)]; - slasher_test_indiv(&[att1, att2], &slashings, 3); let validators = vec![0, 16, 1024, 300_000, 300_001]; let att1 = indexed_att(validators.clone(), 1, 2, 0); let att2 = indexed_att(validators.clone(), 0, 3, 0); - - slasher.accept_attestation(att1); - slasher.process_attestations(); - slasher.accept_attestation(att2); - slasher.process_attestations(); + let slashings = hashset![att_slashing(&att2, &att1)]; + slasher_test_indiv(&[att1, att2], &slashings, 3); } - #[test] fn surrounds_existing_many_chunks() { let v = vec![0]; - let chunk_size = Config::default().chunk_size as u64; + let chunk_size = DEFAULT_CHUNK_SIZE as u64; let att1 = indexed_att(&v, 3 * chunk_size, 3 * chunk_size + 1, 0); let att2 = indexed_att(&v, 0, 3 * chunk_size + 2, 0); - let slashings = vec![att_slashing(&att1, &att2)]; + let slashings = hashset![att_slashing(&att2, &att1)]; let attestations = vec![att1, att2]; - slasher_test(&attestations, &slashings, 4 * chunk_size, |_| true); + slasher_test_indiv(&attestations, &slashings, 4 * chunk_size); } -*/ #[test] fn surrounded_by_single_val_single_chunk() { let v = vec![0]; let att1 = indexed_att(&v, 0, 15, 0); let att2 = indexed_att(&v, 1, 14, 0); - let slashings = vec![att_slashing(&att1, &att2)]; + let slashings = hashset![att_slashing(&att1, &att2)]; let attestations = vec![att1, att2]; slasher_test_indiv(&attestations, &slashings, 15); } @@ -148,27 +139,16 @@ fn surrounded_by_single_val_multi_chunk() { let chunk_size = DEFAULT_CHUNK_SIZE as u64; let att1 = indexed_att(&v, 0, 3 * chunk_size, 0); let att2 = indexed_att(&v, chunk_size, chunk_size + 1, 0); - let slashings = vec![att_slashing(&att1, &att2)]; + let slashings = hashset![att_slashing(&att1, &att2)]; let attestations = vec![att1, att2]; slasher_test_indiv(&attestations, &slashings, 3 * chunk_size); slasher_test_indiv(&attestations, &slashings, 4 * chunk_size); } -/* -fn slasher_tests(attestations: &[IndexedAttestation], expected: &[AttesterSlashing]) { - // Process after every attestation. - // slasher_test(attestations, expected, |_| true); - // Process only at the end. - slasher_test(attestations, expected, |_| false); - // Process every second attestation. - // slasher_test(attestations, expected, |i| i % 2 == 0); -} -*/ - // Process each attestation individually, and confirm that the slashings produced are as expected. fn slasher_test_indiv( attestations: &[IndexedAttestation], - expected: &[AttesterSlashing], + expected: &HashSet>, current_epoch: u64, ) { slasher_test(attestations, expected, current_epoch, |_| true); @@ -177,7 +157,7 @@ fn slasher_test_indiv( // Process all attestations in one batch. fn slasher_test_batch( attestations: &[IndexedAttestation], - expected: &[AttesterSlashing], + expected: &HashSet>, current_epoch: u64, ) { slasher_test(attestations, expected, current_epoch, |_| false); @@ -185,7 +165,7 @@ fn slasher_test_batch( fn slasher_test( attestations: &[IndexedAttestation], - expected: &[AttesterSlashing], + expected: &HashSet>, current_epoch: u64, should_process_after: impl Fn(usize) -> bool, ) { @@ -205,11 +185,7 @@ fn slasher_test( let slashings = slasher.get_attester_slashings(); - for (i, slashing) in expected.iter().enumerate() { - assert_eq!(*slashing, slashings[i], "slashing {} should match", i); - } - - assert_eq!(expected, &slashings[..]); + assert_eq!(&slashings, expected); // Pruning should not error. slasher.prune_database(current_epoch).unwrap(); @@ -217,7 +193,7 @@ fn slasher_test( fn parallel_slasher_test( attestations: &[IndexedAttestation], - // TODO(sproul): check slashed validators + // FIXME(sproul): check slashed validators _slashed_validators: Vec, current_epoch: u64, ) { diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index a4e2432d2c7..4ba7d4b171f 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -138,7 +138,7 @@ fn hashset_intersection( & &HashSet::from_iter(attestation_2_indices.iter().copied()) } -fn slashed_validators_from_slashings(slashings: &[AttesterSlashing]) -> HashSet { +fn slashed_validators_from_slashings(slashings: &HashSet>) -> HashSet { slashings .iter() .flat_map(|slashing| { @@ -175,6 +175,7 @@ fn slashed_validators_from_attestations(attestations: &[IndexedAttestation]) } #[test] +#[ignore] fn no_crash() { let mut rng = thread_rng(); loop { @@ -183,6 +184,7 @@ fn no_crash() { } #[test] +#[ignore] fn check_slashings() { let mut rng = thread_rng(); loop { From 2eac9547e4d531c944c1901a9530f35c4daf53cc Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 5 Nov 2020 12:16:55 +1100 Subject: [PATCH 24/34] Address almost all remaining FIXMEs --- slasher/src/array.rs | 18 +++---- slasher/src/config.rs | 12 ++++- slasher/src/database.rs | 3 +- slasher/src/lib.rs | 5 +- slasher/src/slasher.rs | 27 +++------- slasher/src/slasher_server.rs | 15 +++++- slasher/src/test_utils.rs | 48 +++++++++++++++++ slasher/tests/attester_slashings.rs | 11 ++-- slasher/tests/random.rs | 83 ++++++++++------------------- slasher/tests/wrap_around.rs | 8 --- 10 files changed, 124 insertions(+), 106 deletions(-) diff --git a/slasher/src/array.rs b/slasher/src/array.rs index 6aaae90de94..5fda1c79dd3 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -30,7 +30,6 @@ pub struct Chunk { } impl Chunk { - // TODO: write tests for epochs greater than length pub fn get_target( &self, validator_index: u64, @@ -430,7 +429,6 @@ pub fn apply_attestation_for_validator( let slashing_status = current_chunk.check_slashable(db, txn, validator_index, attestation, config)?; - // TODO: consider removing this early return and updating the array if slashing_status != AttesterSlashingStatus::NotSlashable { return Ok(slashing_status); } @@ -507,11 +505,8 @@ pub fn update( )?); // Update all current epochs. - // FIXME(sproul): abstract - for validator_index in validator_chunk_index * config.validator_chunk_size - ..(validator_chunk_index + 1) * config.validator_chunk_size - { - db.update_current_epoch_for_validator(validator_index as u64, current_epoch, txn)?; + for validator_index in config.validator_indices_in_chunk(validator_chunk_index) { + db.update_current_epoch_for_validator(validator_index, current_epoch, txn)?; } Ok(slashings) @@ -571,15 +566,14 @@ pub fn update_array( // Map from chunk index to updated chunk at that index. let mut updated_chunks = BTreeMap::new(); - for validator_index in validator_chunk_index * config.validator_chunk_size - ..(validator_chunk_index + 1) * config.validator_chunk_size - { + // Update the arrays for the change of current epoch. + for validator_index in config.validator_indices_in_chunk(validator_chunk_index) { epoch_update_for_validator( db, txn, &mut updated_chunks, validator_chunk_index, - validator_index as u64, + validator_index, current_epoch, config, )?; @@ -588,7 +582,7 @@ pub fn update_array( for attestations in chunk_attestations.values() { for attestation in attestations { for validator_index in - config.attesting_validators_for_chunk(&attestation.0, validator_chunk_index) + config.attesting_validators_in_chunk(&attestation.0, validator_chunk_index) { let slashing_status = apply_attestation_for_validator::( db, diff --git a/slasher/src/config.rs b/slasher/src/config.rs index 5f59dd46f3b..2f6e463b3b8 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -68,8 +68,18 @@ impl Config { validator_offset * self.chunk_size + chunk_offset } + /// Return an iterator over all the validator indices in a validator chunk. + pub fn validator_indices_in_chunk( + &self, + validator_chunk_index: usize, + ) -> impl Iterator { + (validator_chunk_index * self.validator_chunk_size + ..(validator_chunk_index + 1) * self.validator_chunk_size) + .map(|index| index as u64) + } + /// Iterate over the attesting indices which belong to the `validator_chunk_index` chunk. - pub fn attesting_validators_for_chunk<'a, E: EthSpec>( + pub fn attesting_validators_in_chunk<'a, E: EthSpec>( &'a self, attestation: &'a IndexedAttestation, validator_chunk_index: usize, diff --git a/slasher/src/database.rs b/slasher/src/database.rs index c005b690857..a46a9b78cec 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -138,12 +138,11 @@ impl AsRef<[u8]> for CurrentEpochKey { impl SlasherDB { pub fn open(config: Arc) -> Result { - // TODO: open_with_permissions std::fs::create_dir_all(&config.database_path)?; let env = Environment::new() .set_max_dbs(LMDB_MAX_DBS) .set_map_size(LMDB_MAP_SIZE) - .open(&config.database_path)?; + .open_with_permissions(&config.database_path, 0o600)?; let indexed_attestation_db = env.create_db(Some(INDEXED_ATTESTATION_DB), Self::db_flags())?; let attesters_db = env.create_db(Some(ATTESTERS_DB), Self::db_flags())?; diff --git a/slasher/src/lib.rs b/slasher/src/lib.rs index a1886e5646d..50bb328b6b6 100644 --- a/slasher/src/lib.rs +++ b/slasher/src/lib.rs @@ -51,10 +51,7 @@ impl AttesterSlashingStatus { // The surrounding attestation must be in `attestation_1` to be valid. match self { NotSlashable => None, - AlreadyDoubleVoted => { - // println!("Already double voted!"); - None - } + AlreadyDoubleVoted => None, DoubleVote(existing) | SurroundedByExisting(existing) => Some(AttesterSlashing { attestation_1: *existing, attestation_2: new_attestation.clone(), diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index fc7cbab106b..1994aa6d0bc 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -18,7 +18,6 @@ pub struct Slasher { pub(crate) block_queue: BlockQueue, attester_slashings: Mutex>>, proposer_slashings: Mutex>, - // TODO: consider removing Arc config: Arc, pub(crate) log: Logger, } @@ -113,21 +112,13 @@ impl Slasher { self.attestation_queue.requeue(deferred); // Insert attestations into database. - info!( + debug!( self.log, "Storing attestations in slasher DB"; "num_valid" => snapshot.len(), "num_deferred" => num_deferred, "num_dropped" => num_dropped, ); - /* - eprintln!( - "valid: {}, deferred: {}, dropped: {}", - snapshot.len(), - num_deferred, - num_dropped - ); - */ for attestation in snapshot.attestations.iter() { self.db.store_indexed_attestation( txn, @@ -215,12 +206,12 @@ impl Slasher { subqueue_id: usize, attestation: &IndexedAttestation, attester_record: AttesterRecord, - ) -> Result>, Error> { - let mut slashings = vec![]; + ) -> Result>, Error> { + let mut slashings = HashSet::new(); for validator_index in self .config - .attesting_validators_for_chunk(attestation, subqueue_id) + .attesting_validators_in_chunk(attestation, subqueue_id) { let slashing_status = self.db.check_and_update_attester_record( txn, @@ -236,12 +227,7 @@ impl Slasher { "validator_index" => validator_index, "epoch" => slashing.attestation_1.data.target.epoch, ); - - // Avoid creating duplicate slashings for the same attestation. - // PERF: this is O(n) instead of O(1), but n should be small. - if !slashings.contains(&slashing) { - slashings.push(slashing); - } + slashings.insert(slashing); } } @@ -292,8 +278,9 @@ impl Slasher { ) } + /// Prune unnecessary attestations and blocks from the on-disk database. + /// /// Must only be called after `process_queued(current_epoch)`. - // FIXME(sproul): consider checking this condition pub fn prune_database(&self, current_epoch: Epoch) -> Result<(), Error> { self.db.prune(current_epoch) } diff --git a/slasher/src/slasher_server.rs b/slasher/src/slasher_server.rs index ebf431bdd09..673b01f5d10 100644 --- a/slasher/src/slasher_server.rs +++ b/slasher/src/slasher_server.rs @@ -27,7 +27,8 @@ impl SlasherServer { executor.spawn( async move { - // FIXME(sproul): read slot time from config, align to some fraction of each slot + // NOTE: could align each run to some fixed point in each slot, see: + // https://github.com/sigp/lighthouse/issues/1861 let slot_clock = Arc::new(slot_clock); let mut interval = interval_at(Instant::now(), Duration::from_secs(update_period)); while interval.next().await.is_some() { @@ -54,12 +55,24 @@ impl SlasherServer { error!( slasher.log, "Error during scheduled slasher processing"; + "epoch" => current_epoch, "error" => format!("{:?}", e) ); + continue; + } + if let Err(e) = slasher.prune_database(current_epoch) { + error!( + slasher.log, + "Error during slasher database pruning"; + "epoch" => current_epoch, + "error" => format!("{:?}", e), + ); + continue; } debug!( slasher.log, "Completed slasher update"; + "epoch" => current_epoch, "time_taken" => format!("{}ms", t.elapsed().as_millis()), "num_attestations" => num_attestations, "num_blocks" => num_blocks, diff --git a/slasher/src/test_utils.rs b/slasher/src/test_utils.rs index 970642f7830..e69105d5bd0 100644 --- a/slasher/src/test_utils.rs +++ b/slasher/src/test_utils.rs @@ -1,5 +1,7 @@ use slog::Logger; use sloggers::Build; +use std::collections::HashSet; +use std::iter::FromIterator; use types::{ AggregateSignature, AttestationData, AttesterSlashing, Checkpoint, Epoch, Hash256, IndexedAttestation, MainnetEthSpec, Slot, @@ -52,3 +54,49 @@ pub fn att_slashing( attestation_2: attestation_2.clone(), } } + +pub fn hashset_intersection( + attestation_1_indices: &[u64], + attestation_2_indices: &[u64], +) -> HashSet { + &HashSet::from_iter(attestation_1_indices.iter().copied()) + & &HashSet::from_iter(attestation_2_indices.iter().copied()) +} + +pub fn slashed_validators_from_slashings(slashings: &HashSet>) -> HashSet { + slashings + .iter() + .flat_map(|slashing| { + let att1 = &slashing.attestation_1; + let att2 = &slashing.attestation_2; + assert!( + att1.is_double_vote(att2) || att1.is_surround_vote(att2), + "invalid slashing: {:#?}", + slashing + ); + hashset_intersection(&att1.attesting_indices, &att2.attesting_indices) + }) + .collect() +} + +pub fn slashed_validators_from_attestations( + attestations: &[IndexedAttestation], +) -> HashSet { + let mut slashed_validators = HashSet::new(); + // O(n^2) code, watch out. + for att1 in attestations { + for att2 in attestations { + if att1 == att2 { + continue; + } + + if att1.is_double_vote(att2) || att1.is_surround_vote(att2) { + slashed_validators.extend(hashset_intersection( + &att1.attesting_indices, + &att2.attesting_indices, + )); + } + } + } + slashed_validators +} diff --git a/slasher/tests/attester_slashings.rs b/slasher/tests/attester_slashings.rs index d95c9f1e26d..a0a26a96d36 100644 --- a/slasher/tests/attester_slashings.rs +++ b/slasher/tests/attester_slashings.rs @@ -2,7 +2,7 @@ use maplit::hashset; use rayon::prelude::*; use slasher::{ config::DEFAULT_CHUNK_SIZE, - test_utils::{att_slashing, indexed_att, logger, E}, + test_utils::{att_slashing, indexed_att, logger, slashed_validators_from_slashings, E}, Config, Slasher, }; use std::collections::HashSet; @@ -91,7 +91,7 @@ fn no_double_vote_repeated() { let attestations = vec![att1, att2]; slasher_test_indiv(&attestations, &hashset! {}, 1); slasher_test_batch(&attestations, &hashset! {}, 1); - parallel_slasher_test(&attestations, vec![], 1); + parallel_slasher_test(&attestations, hashset! {}, 1); } #[test] @@ -193,8 +193,7 @@ fn slasher_test( fn parallel_slasher_test( attestations: &[IndexedAttestation], - // FIXME(sproul): check slashed validators - _slashed_validators: Vec, + expected_slashed_validators: HashSet, current_epoch: u64, ) { let tempdir = TempDir::new("slasher").unwrap(); @@ -209,4 +208,8 @@ fn parallel_slasher_test( slasher.process_queued(current_epoch) }) .expect("parallel processing shouldn't race"); + + let slashings = slasher.get_attester_slashings(); + let slashed_validators = slashed_validators_from_slashings(&slashings); + assert_eq!(slashed_validators, expected_slashed_validators); } diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index 4ba7d4b171f..e0b2bd58571 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -1,14 +1,15 @@ use rand::prelude::*; use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng}; use slasher::{ - test_utils::{indexed_att, logger, E}, + test_utils::{ + indexed_att, logger, slashed_validators_from_attestations, + slashed_validators_from_slashings, E, + }, Config, Slasher, }; use std::cmp::max; -use std::collections::HashSet; -use std::iter::FromIterator; use tempdir::TempDir; -use types::{AttesterSlashing, Epoch, IndexedAttestation}; +use types::Epoch; #[derive(Debug)] struct TestConfig { @@ -130,50 +131,7 @@ fn random_test(seed: u64, test_config: TestConfig) { assert_eq!(slashed_validators, expected_slashed_validators); } -fn hashset_intersection( - attestation_1_indices: &[u64], - attestation_2_indices: &[u64], -) -> HashSet { - &HashSet::from_iter(attestation_1_indices.iter().copied()) - & &HashSet::from_iter(attestation_2_indices.iter().copied()) -} - -fn slashed_validators_from_slashings(slashings: &HashSet>) -> HashSet { - slashings - .iter() - .flat_map(|slashing| { - let att1 = &slashing.attestation_1; - let att2 = &slashing.attestation_2; - assert!( - att1.is_double_vote(att2) || att1.is_surround_vote(att2), - "invalid slashing: {:#?}", - slashing - ); - hashset_intersection(&att1.attesting_indices, &att2.attesting_indices) - }) - .collect() -} - -fn slashed_validators_from_attestations(attestations: &[IndexedAttestation]) -> HashSet { - let mut slashed_validators = HashSet::new(); - // O(n^2) code, watch out. - for att1 in attestations { - for att2 in attestations { - if att1 == att2 { - continue; - } - - if att1.is_double_vote(att2) || att1.is_surround_vote(att2) { - slashed_validators.extend(hashset_intersection( - &att1.attesting_indices, - &att2.attesting_indices, - )); - } - } - } - slashed_validators -} - +// Fuzz-like test that runs forever on different seeds looking for crashes. #[test] #[ignore] fn no_crash() { @@ -183,6 +141,7 @@ fn no_crash() { } } +// Fuzz-like test that runs forever on different seeds looking for missed slashings. #[test] #[ignore] fn check_slashings() { @@ -199,9 +158,9 @@ fn check_slashings() { } #[test] -fn problema() { +fn check_slashings_example1() { random_test( - 17417858527589321514, + 1, TestConfig { check_slashings: true, ..TestConfig::default() @@ -210,9 +169,9 @@ fn problema() { } #[test] -fn slash_out_of_order() { +fn check_slashings_example2() { random_test( - 3534213164912297730, + 2, TestConfig { check_slashings: true, max_attestations: 3, @@ -222,12 +181,28 @@ fn slash_out_of_order() { } #[test] -fn ooft() { +fn check_slashings_example3() { random_test( - 16346384169145986037, + 3, TestConfig { check_slashings: true, + max_attestations: 100, ..TestConfig::default() }, ); } + +#[test] +fn no_crash_example1() { + random_test(1, TestConfig::default()); +} + +#[test] +fn no_crash_example2() { + random_test(2, TestConfig::default()); +} + +#[test] +fn no_crash_example3() { + random_test(3, TestConfig::default()); +} diff --git a/slasher/tests/wrap_around.rs b/slasher/tests/wrap_around.rs index ed49f0303a1..0ed3860dcfb 100644 --- a/slasher/tests/wrap_around.rs +++ b/slasher/tests/wrap_around.rs @@ -5,13 +5,6 @@ use slasher::{ use tempdir::TempDir; use types::Epoch; -/* -#[test] -fn attestation_pruning_basic() { - unimplemented!() -} -*/ - #[test] fn attestation_pruning_empty_wrap_around() { let tempdir = TempDir::new("slasher").unwrap(); @@ -27,7 +20,6 @@ fn attestation_pruning_empty_wrap_around() { let mut current_epoch = Epoch::new(history_length - 1); - // FIXME(sproul): add bounds check that attestation isn't wider than history length slasher.accept_attestation(indexed_att(v.clone(), 0, history_length - 1, 0)); slasher.process_queued(current_epoch).unwrap(); slasher.prune_database(current_epoch).unwrap(); From 4bbaf7ea78e05d0daf43923f3196e762c6f5647f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 5 Nov 2020 14:28:06 +1100 Subject: [PATCH 25/34] Remove printlns --- slasher/tests/random.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index e0b2bd58571..870e674921c 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -41,15 +41,10 @@ fn random_test(seed: u64, test_config: TestConfig) { let mut config = Config::new(tempdir.path().into()); config.validator_chunk_size = 1 << rng.gen_range(1, 4); - eprintln!("Validator chunk size: {}", config.validator_chunk_size); - let chunk_size_exponent = rng.gen_range(1, 4); config.chunk_size = 1 << chunk_size_exponent; config.history_length = 1 << rng.gen_range(chunk_size_exponent, chunk_size_exponent + 3); - eprintln!("Chunk size: {}", config.chunk_size); - eprintln!("History length: {}", config.history_length); - let slasher = Slasher::::open(config.clone(), logger()).unwrap(); let validators = (0..num_validators as u64).collect::>(); @@ -85,11 +80,6 @@ fn random_test(seed: u64, test_config: TestConfig) { let target_root = rng.gen_range(0, 3); let attestation = indexed_att(&attesting_indices, source, target, target_root); - eprintln!( - "Attestation {}=>{} from {:?} for root {}", - source, target, attesting_indices, target_root - ); - if check_slashings { attestations.push(attestation.clone()); } @@ -99,12 +89,10 @@ fn random_test(seed: u64, test_config: TestConfig) { // Maybe process if rng.gen_bool(0.1) { - eprintln!("Processing {}", current_epoch); slasher.process_queued(current_epoch).unwrap(); // Maybe prune if rng.gen_bool(0.1) { - eprintln!("Pruning at epoch {}", current_epoch); slasher.prune_database(current_epoch).unwrap(); } } From 188900ec86fbc118db20d6e5e4d4b74c0a8e446d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 5 Nov 2020 20:15:59 +1100 Subject: [PATCH 26/34] Tidy up some more, add CLI config --- .../src/attestation_verification.rs | 2 - beacon_node/beacon_chain/src/beacon_chain.rs | 1 - .../beacon_chain/src/block_verification.rs | 62 ++++++------ beacon_node/client/src/builder.rs | 11 ++- beacon_node/src/cli.rs | 21 ++++ beacon_node/src/config.rs | 6 ++ beacon_node/src/lib.rs | 6 -- slasher/src/config.rs | 10 ++ slasher/src/database.rs | 95 ++++++++++++++++--- slasher/src/error.rs | 9 ++ 10 files changed, 170 insertions(+), 53 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index bd5725131df..739c33a3584 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -364,7 +364,6 @@ fn process_slash_info( ) .map_err(SlasherVerificationError::SignatureError) }) { - // FIXME(sproul): differentiate error from invalid sig. debug!( chain.log, "Signature verification for slasher failed"; @@ -511,7 +510,6 @@ impl VerifiedAggregatedAttestation { Ok(()) } - // TODO(sproul): naming pub fn verify_slashable( signed_aggregate: SignedAggregateAndProof, chain: &BeaconChain, diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b3aaf488ac6..f44018654d2 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1580,7 +1580,6 @@ impl BeaconChain { metrics::stop_timer(attestation_observation_timer); - // FIXME(sproul): add timer if let Some(slasher) = self.slasher.as_ref() { for attestation in &signed_block.message.body.attestations { let committee = diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index b2f5f3bc857..04b6efef345 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -386,36 +386,6 @@ pub struct FullyVerifiedBlock<'a, T: BeaconChainTypes> { pub confirmation_db_batch: Vec>, } -// FIXME(sproul): put this on the beacon chain? -fn verify_header_signature( - chain: &BeaconChain, - header: &SignedBeaconBlockHeader, -) -> Result<(), BlockError> { - let proposer_pubkey = get_validator_pubkey_cache(chain)? - .get(header.message.proposer_index as usize) - .cloned() - .ok_or_else(|| BlockError::UnknownValidator(header.message.proposer_index))?; - let (fork, genesis_validators_root) = chain - .with_head(|head| { - Ok(( - head.beacon_state.fork, - head.beacon_state.genesis_validators_root, - )) - }) - .map_err(|e: BlockError| e)?; - - if header.verify_signature::( - &proposer_pubkey, - &fork, - genesis_validators_root, - &chain.spec, - ) { - Ok(()) - } else { - Err(BlockError::ProposalSignatureInvalid) - } -} - /// Implemented on types that can be converted into a `FullyVerifiedBlock`. /// /// Used to allow functions to accept blocks at various stages of verification. @@ -1232,6 +1202,38 @@ fn get_signature_verifier<'a, E: EthSpec>( ) } +/// Verify that `header` was signed with a valid signature from its proposer. +/// +/// Return `Ok(())` if the signature is valid, and an `Err` otherwise. +fn verify_header_signature( + chain: &BeaconChain, + header: &SignedBeaconBlockHeader, +) -> Result<(), BlockError> { + let proposer_pubkey = get_validator_pubkey_cache(chain)? + .get(header.message.proposer_index as usize) + .cloned() + .ok_or_else(|| BlockError::UnknownValidator(header.message.proposer_index))?; + let (fork, genesis_validators_root) = chain + .with_head(|head| { + Ok(( + head.beacon_state.fork, + head.beacon_state.genesis_validators_root, + )) + }) + .map_err(|e: BlockError| e)?; + + if header.verify_signature::( + &proposer_pubkey, + &fork, + genesis_validators_root, + &chain.spec, + ) { + Ok(()) + } else { + Err(BlockError::ProposalSignatureInvalid) + } +} + fn expose_participation_metrics(summaries: &[EpochProcessingSummary]) { if !cfg!(feature = "participation_metrics") { return; diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 9f6c615e03b..9eb39f0198a 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -294,7 +294,10 @@ where self } - pub fn slasher_server(self) -> Result { + /// Immediately start the slasher service. + /// + /// Error if no slasher is configured. + pub fn start_slasher_server(&self) -> Result<(), String> { let context = self .runtime_context .as_ref() @@ -309,7 +312,7 @@ where .clone() .ok_or_else(|| "slasher server requires a slot clock")?; SlasherServer::run(slasher, slot_clock, &context.executor); - Ok(self) + Ok(()) } /// Immediately starts the service that periodically logs information each slot. @@ -411,6 +414,10 @@ where None }; + if self.slasher.is_some() { + self.start_slasher_server()?; + } + Ok(Client { beacon_chain: self.beacon_chain, network_globals: self.network_globals, diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index c8bf1e19c1b..872127bf7bc 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -353,6 +353,27 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .requires("slasher") .takes_value(true) ) + .arg( + Arg::with_name("slasher-history-length") + .long("slasher-history-length") + .help( + "Configure how many epochs of history the slasher keeps. Immutable after \ + initialization." + ) + .value_name("EPOCHS") + .requires("slasher") + .takes_value(true) + ) + .arg( + Arg::with_name("slasher-max-db-size") + .long("slasher-max-db-size") + .help( + "Maximum size of the LMDB database used by the slasher." + ) + .value_name("GIGABYTES") + .requires("slasher") + .takes_value(true) + ) .arg( Arg::with_name("wss-checkpoint") .long("wss-checkpoint") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 5237ba9fdff..dfed76693e0 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -350,6 +350,12 @@ pub fn get_config( slasher_config.update_period = update_period; } + if let Some(history_length) = + clap_utils::parse_optional(cli_args, "slasher-history-length")? + { + slasher_config.history_length = history_length; + } + client_config.slasher = Some(slasher_config); } diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 9f3e9a8eca6..331e779c474 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -138,12 +138,6 @@ impl ProductionBeaconNode { .http_api_config(client_config.http_api.clone()) .http_metrics_config(client_config.http_metrics.clone()); - // FIXME(sproul): chain this - let builder = if client_config.slasher.is_some() { - builder.slasher_server()? - } else { - builder - }; builder.build().map(Self) } diff --git a/slasher/src/config.rs b/slasher/src/config.rs index 2f6e463b3b8..e7c9a7c1573 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -7,6 +7,7 @@ pub const DEFAULT_CHUNK_SIZE: usize = 16; pub const DEFAULT_VALIDATOR_CHUNK_SIZE: usize = 256; pub const DEFAULT_HISTORY_LENGTH: usize = 54_000; pub const DEFAULT_UPDATE_PERIOD: u64 = 12; +pub const DEFAULT_MAX_DB_SIZE: usize = 256; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { @@ -17,6 +18,8 @@ pub struct Config { pub history_length: usize, /// Update frequency in seconds. pub update_period: u64, + /// Maximum size of the LMDB database in gigabytes. + pub max_db_size_gbs: usize, } impl Config { @@ -27,6 +30,7 @@ impl Config { validator_chunk_size: DEFAULT_VALIDATOR_CHUNK_SIZE, history_length: DEFAULT_HISTORY_LENGTH, update_period: DEFAULT_UPDATE_PERIOD, + max_db_size_gbs: DEFAULT_MAX_DB_SIZE, } } @@ -41,6 +45,12 @@ impl Config { } } + pub fn is_compatible(&self, other: &Config) -> bool { + self.chunk_size == other.chunk_size + && self.validator_chunk_size == other.validator_chunk_size + && self.history_length == other.history_length + } + pub fn chunk_index(&self, epoch: Epoch) -> usize { (epoch.as_usize() % self.history_length) / self.chunk_size } diff --git a/slasher/src/database.rs b/slasher/src/database.rs index a46a9b78cec..c71a3455a33 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -12,28 +12,37 @@ use types::{ Epoch, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, SignedBeaconBlockHeader, Slot, }; +/// Current database schema version, to check compatibility of on-disk DB with software. +const CURRENT_SCHEMA_VERSION: u64 = 0; + +/// Metadata about the slashing database itself. +const METADATA_DB: &str = "metadata"; /// Map from `(target_epoch, validator_index)` to `AttesterRecord`. const ATTESTERS_DB: &str = "attesters"; /// Map from `indexed_attestation_hash` to `IndexedAttestation`. const INDEXED_ATTESTATION_DB: &str = "indexed_attestations"; +/// Table of minimum targets for every source epoch within range. const MIN_TARGETS_DB: &str = "min_targets"; +/// Table of maximum targets for every source epoch within range. const MAX_TARGETS_DB: &str = "max_targets"; -/// Map from `validator_index` to the `current_epoch` stored for that validator's min and max -/// target arrays. +/// Map from `validator_index` to the `current_epoch` for that validator. +/// +/// Used to implement wrap-around semantics for the min and max target arrays. const CURRENT_EPOCHS_DB: &str = "current_epochs"; /// Map from `(slot, validator_index)` to `SignedBeaconBlockHeader`. -const PROPOSER_DB: &str = "proposers"; -/// Metadata about the slashing database itself. -const METADATA_DB: &str = "metadata"; +const PROPOSERS_DB: &str = "proposers"; /// The number of DBs for LMDB to use (equal to the number of DBs defined above). const LMDB_MAX_DBS: u32 = 7; -/// The size of the in-memory map for LMDB (larger than the maximum size of the database). -// FIXME(sproul): make this user configurable -const LMDB_MAP_SIZE: usize = 256 * (1 << 30); // 256GiB + +/// Constant key under which the schema version is stored in the `metadata_db`. +const METADATA_VERSION_KEY: &[u8] = &[0]; +/// Constant key under which the slasher configuration is stored in the `metadata_db`. +const METADATA_CONFIG_KEY: &[u8] = &[1]; const ATTESTER_KEY_SIZE: usize = 16; const PROPOSER_KEY_SIZE: usize = 16; +const GIGABYTE: usize = 1 << 30; #[derive(Debug)] pub struct SlasherDB { @@ -141,7 +150,7 @@ impl SlasherDB { std::fs::create_dir_all(&config.database_path)?; let env = Environment::new() .set_max_dbs(LMDB_MAX_DBS) - .set_map_size(LMDB_MAP_SIZE) + .set_map_size(config.max_db_size_gbs * GIGABYTE) .open_with_permissions(&config.database_path, 0o600)?; let indexed_attestation_db = env.create_db(Some(INDEXED_ATTESTATION_DB), Self::db_flags())?; @@ -149,9 +158,10 @@ impl SlasherDB { let min_targets_db = env.create_db(Some(MIN_TARGETS_DB), Self::db_flags())?; let max_targets_db = env.create_db(Some(MAX_TARGETS_DB), Self::db_flags())?; let current_epochs_db = env.create_db(Some(CURRENT_EPOCHS_DB), Self::db_flags())?; - let proposers_db = env.create_db(Some(PROPOSER_DB), Self::db_flags())?; + let proposers_db = env.create_db(Some(PROPOSERS_DB), Self::db_flags())?; let metadata_db = env.create_db(Some(METADATA_DB), Self::db_flags())?; - Ok(Self { + + let db = Self { env, indexed_attestation_db, attesters_db, @@ -162,7 +172,32 @@ impl SlasherDB { metadata_db, config, _phantom: PhantomData, - }) + }; + + let mut txn = db.begin_rw_txn()?; + + if let Some(schema_version) = db.load_schema_version(&mut txn)? { + if schema_version != CURRENT_SCHEMA_VERSION { + return Err(Error::IncompatibleSchemaVersion { + database_schema_version: schema_version, + software_schema_version: CURRENT_SCHEMA_VERSION, + }); + } + } + db.store_schema_version(&mut txn)?; + + if let Some(on_disk_config) = db.load_config(&mut txn)? { + if !db.config.is_compatible(&on_disk_config) { + return Err(Error::ConfigIncompatible { + on_disk_config, + config: (*db.config).clone(), + }); + } + } + db.store_config(&mut txn)?; + txn.commit()?; + + Ok(db) } pub fn db_flags() -> DatabaseFlags { @@ -177,6 +212,42 @@ impl SlasherDB { Ok(self.env.begin_rw_txn()?) } + pub fn load_schema_version(&self, txn: &mut RwTransaction<'_>) -> Result, Error> { + Ok(txn + .get(self.metadata_db, &METADATA_VERSION_KEY) + .optional()? + .map(bincode::deserialize) + .transpose()?) + } + + pub fn store_schema_version(&self, txn: &mut RwTransaction<'_>) -> Result<(), Error> { + txn.put( + self.metadata_db, + &METADATA_VERSION_KEY, + &bincode::serialize(&CURRENT_SCHEMA_VERSION)?, + Self::write_flags(), + )?; + Ok(()) + } + + pub fn load_config(&self, txn: &mut RwTransaction<'_>) -> Result, Error> { + Ok(txn + .get(self.metadata_db, &METADATA_CONFIG_KEY) + .optional()? + .map(bincode::deserialize) + .transpose()?) + } + + pub fn store_config(&self, txn: &mut RwTransaction<'_>) -> Result<(), Error> { + txn.put( + self.metadata_db, + &METADATA_CONFIG_KEY, + &bincode::serialize(self.config.as_ref())?, + Self::write_flags(), + )?; + Ok(()) + } + pub fn get_current_epoch_for_validator( &self, validator_index: u64, diff --git a/slasher/src/error.rs b/slasher/src/error.rs index e123b571e91..cc7be695f96 100644 --- a/slasher/src/error.rs +++ b/slasher/src/error.rs @@ -1,3 +1,4 @@ +use crate::Config; use std::io; use types::{Epoch, Hash256}; @@ -9,10 +10,18 @@ pub enum Error { BincodeError(bincode::Error), ArithError(safe_arith::ArithError), ChunkIndexOutOfBounds(usize), + IncompatibleSchemaVersion { + database_schema_version: u64, + software_schema_version: u64, + }, ConfigInvalidChunkSize { chunk_size: usize, history_length: usize, }, + ConfigIncompatible { + on_disk_config: Config, + config: Config, + }, DistanceTooLarge, DistanceCalculationOverflow, /// Missing an attester record that we expected to exist. From cffd2d612f174e622583327ab30a5f49a884347d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 6 Nov 2020 13:03:08 +1100 Subject: [PATCH 27/34] More configuration, docs --- beacon_node/src/cli.rs | 22 +++++++- beacon_node/src/config.rs | 14 +++++ book/src/SUMMARY.md | 1 + book/src/slasher.md | 112 ++++++++++++++++++++++++++++++++++++++ slasher/src/config.rs | 2 +- 5 files changed, 149 insertions(+), 2 deletions(-) create mode 100644 book/src/slasher.md diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 872127bf7bc..0874d4f98fc 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -339,7 +339,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help( "Set the slasher's database directory." ) - .value_name("DIR") + .value_name("PATH") .takes_value(true) .requires("slasher") ) @@ -374,6 +374,26 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .requires("slasher") .takes_value(true) ) + .arg( + Arg::with_name("slasher-chunk-size") + .long("slasher-chunk-size") + .help( + "Number of epochs per validator per chunk stored on disk." + ) + .value_name("EPOCHS") + .requires("slasher") + .takes_value(true) + ) + .arg( + Arg::with_name("slasher-validator-chunk-size") + .long("slasher-validator-chunk-size") + .help( + "Number of validators per chunk stored on disk." + ) + .value_name("NUM_VALIDATORS") + .requires("slasher") + .takes_value(true) + ) .arg( Arg::with_name("wss-checkpoint") .long("wss-checkpoint") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index dfed76693e0..0efd7512c3f 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -356,6 +356,20 @@ pub fn get_config( slasher_config.history_length = history_length; } + if let Some(max_db_size) = clap_utils::parse_optional(cli_args, "slasher-max-db-size")? { + slasher_config.max_db_size_gbs = max_db_size; + } + + if let Some(chunk_size) = clap_utils::parse_optional(cli_args, "slasher-chunk-size")? { + slasher_config.chunk_size = chunk_size; + } + + if let Some(validator_chunk_size) = + clap_utils::parse_optional(cli_args, "slasher-validator-chunk-size")? + { + slasher_config.validator_chunk_size = validator_chunk_size; + } + client_config.slasher = Some(slasher_config); } diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 82046da68ea..25e02bb925f 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -29,6 +29,7 @@ * [Database Configuration](./advanced_database.md) * [Local Testnets](./local-testnets.md) * [Advanced Networking](./advanced_networking.md) + * [Running a Slasher](./slasher.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/slasher.md b/book/src/slasher.md new file mode 100644 index 00000000000..0b4ff962d3e --- /dev/null +++ b/book/src/slasher.md @@ -0,0 +1,112 @@ +# Running a Slasher + +Lighthouse includes a slasher for identifying slashable offences comitted by other validators and +including proof of those offences in blocks. + +Running a slasher is a good way to contribute to the health of the network, and doing so can earn +extra income for your validators. However it is currently only recommended for expert users because +of the immaturity of the slasher UX and the extra resources required. + +## Minimum System Requirements + +* Quad-core CPU +* 16 GB RAM +* 256 GB solid state storage (in addition to space for the beacon node DB) + +## How to Run + +The slasher runs inside the same process as the beacon node, when enabled via the `--slasher` flag: + +``` +lighthouse bn --slasher --debug-level=debug +``` + +The slasher hooks into Lighthouse's block and attestation processing, and pushes messages into an +in-memory queue for regular processing. It will increase the CPU usage of the beacon node because it +verifies the signatures of otherwise invalid messages. When a slasher batch update runs, the +messages are filtered for relevancy, and all relevant messages are checked for slashings and written +to the slasher database. + +You **should** run with debug logs, so that you can see the slasher's internal machinations, and +provide logs to the devs should you encounter any bugs. + +## Configuration + +The slasher has several configuration options that control its functioning. + +### Database Directory + +* Flag: `--slasher-dir PATH` +* Argument: path to directory + +By default the slasher stores data in the `slasher_db` directory inside the beacon node's datadir, +e.g. `~/.lighthouse/{testnet}/beacon/slasher_db`. You can use this flag to change that storage +directory. + +### History Length + +* Flag: `--slasher-history-length EPOCHS` +* Argument: number of epochs +* Default: 8192 epochs + +The slasher stores data for the `history-length` most recent epochs. By default the history length +is set high in order to catch all validator misbehaviour since the last weak subjectivity +checkpoint. If you would like to reduce the resource requirements (particularly disk space), set the +history length to a lower value, although a lower history length may prevent your slasher from +finding some slashings. + +**Note:** See the `--slasher-max-db-size` section below to ensure that your disk space savings are +applied. The history length must be a multiple of the chunk size (default 16), and cannot be +changed after initialization. + +### Max Database Size + +* Flag: `--slasher-max-db-size GIGABYTES` +* Argument: maximum size of the database in gigabytes +* Default: 256 GB + +The slasher uses LMDB as its backing store, and LMDB will consume up to the maximum amount of disk +space allocated to it. By default the limit is set to accomodate the default history +length, but you can set it lower if running with a reduced history length. + +### Update Period + +* Flag: `--slasher-update-period SECONDS` +* Argument: number of seconds +* Default: 12 seconds + +Set the length of the time interval between each slasher batch update. You can check if your +slasher is keeping up with its update period by looking for a log message like this: + +``` +DEBG Completed slasher update num_blocks: 1, num_attestations: 279, time_taken: 1821ms, epoch: 20889, service: slasher +``` + +If the `time_taken` is substantially longer than the update period then it indicates your machine is +struggling under the load, and you should consider increasing the update period or lowering the +resource requirements by tweaking the history length. + +### Chunk Size and Validator Chunk Size + +* Flags: `--slasher-chunk-size EPOCHS`, `--slasher-validator-chunk-size NUM_VALIDATORS` +* Arguments: number of ecochs, number of validators +* Defaults: 16, 256 + +Adjusting these parameter should only be done in conjunction with reading in detail +about [how the slasher works][design-notes], and/or reading the source code. + +[design-notes]: https://hackmd.io/@sproul/min-max-slasher + +### Short-Range Example + +If you would like to run a lightweight slasher that just checks blocks and attestations within +the last day or so, you can use this combination of arguments: + +``` +lighthouse bn --slasher --slasher-history-length 256 --slasher-max-db-size 4 --debug-level=debug +``` + +## Stability Warning + +The slasher code is currently unstable, so we may update the schema of the slasher database in a +backwards-incompatible way which will require re-initialization. diff --git a/slasher/src/config.rs b/slasher/src/config.rs index e7c9a7c1573..5cc4ce9332b 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -5,7 +5,7 @@ use types::{Epoch, EthSpec, IndexedAttestation}; pub const DEFAULT_CHUNK_SIZE: usize = 16; pub const DEFAULT_VALIDATOR_CHUNK_SIZE: usize = 256; -pub const DEFAULT_HISTORY_LENGTH: usize = 54_000; +pub const DEFAULT_HISTORY_LENGTH: usize = 8192; pub const DEFAULT_UPDATE_PERIOD: u64 = 12; pub const DEFAULT_MAX_DB_SIZE: usize = 256; From 244954d065fd0948e2b0b18a7e85ceea00e1cff9 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 6 Nov 2020 14:33:01 +1100 Subject: [PATCH 28/34] Fix Clippy lints --- .../beacon_chain/src/block_verification.rs | 2 +- slasher/src/array.rs | 1 + slasher/src/attestation_queue.rs | 18 ++++++++++-------- slasher/src/block_queue.rs | 12 +++++------- slasher/src/slasher.rs | 4 ++-- 5 files changed, 19 insertions(+), 18 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 04b6efef345..a76722afd56 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -283,7 +283,7 @@ impl BlockSlashInfo> { BlockError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e), // `InvalidSignature` could indicate any signature in the block, so we want // to recheck the proposer signature alone. - BlockError::InvalidSignature | _ => BlockSlashInfo::SignatureNotChecked(header, e), + _ => BlockSlashInfo::SignatureNotChecked(header, e), } } } diff --git a/slasher/src/array.rs b/slasher/src/array.rs index 5fda1c79dd3..48938d87d40 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -554,6 +554,7 @@ pub fn epoch_update_for_validator( Ok(()) } +#[allow(clippy::type_complexity)] pub fn update_array( db: &SlasherDB, txn: &mut RwTransaction<'_>, diff --git a/slasher/src/attestation_queue.rs b/slasher/src/attestation_queue.rs index 5f6fdf8a301..70ea2ea13e8 100644 --- a/slasher/src/attestation_queue.rs +++ b/slasher/src/attestation_queue.rs @@ -7,7 +7,7 @@ use types::{EthSpec, IndexedAttestation}; /// Staging area for attestations received from the network. /// /// To be added to the database in batches, for efficiency and to prevent data races. -#[derive(Debug)] +#[derive(Debug, Default)] pub struct AttestationQueue { /// All attestations (unique) for storage on disk. pub queue: Mutex>, @@ -30,6 +30,10 @@ impl AttestationBatch { self.attestations.len() } + pub fn is_empty(&self) -> bool { + self.attestations.is_empty() + } + /// Group the attestations by validator index. pub fn group_by_validator_index(self, config: &Config) -> GroupedAttestations { let mut grouped_attestations = GroupedAttestations { subqueues: vec![] }; @@ -62,12 +66,6 @@ impl AttestationBatch { } impl AttestationQueue { - pub fn new() -> Self { - Self { - queue: Mutex::new(AttestationBatch::default()), - } - } - /// Add an attestation to the queue. pub fn queue(&self, attestation: IndexedAttestation) { let attester_record = AttesterRecord::from(attestation.clone()); @@ -78,7 +76,7 @@ impl AttestationQueue { } pub fn dequeue(&self) -> AttestationBatch { - std::mem::replace(&mut self.queue.lock(), AttestationBatch::default()) + std::mem::take(&mut self.queue.lock()) } pub fn requeue(&self, batch: AttestationBatch) { @@ -88,4 +86,8 @@ impl AttestationQueue { pub fn len(&self) -> usize { self.queue.lock().len() } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } } diff --git a/slasher/src/block_queue.rs b/slasher/src/block_queue.rs index b80d0fb07bb..10086ce3757 100644 --- a/slasher/src/block_queue.rs +++ b/slasher/src/block_queue.rs @@ -1,18 +1,12 @@ use parking_lot::Mutex; use types::SignedBeaconBlockHeader; -#[derive(Debug)] +#[derive(Debug, Default)] pub struct BlockQueue { blocks: Mutex>, } impl BlockQueue { - pub fn new() -> Self { - BlockQueue { - blocks: Mutex::new(vec![]), - } - } - pub fn queue(&self, block_header: SignedBeaconBlockHeader) { self.blocks.lock().push(block_header) } @@ -25,4 +19,8 @@ impl BlockQueue { pub fn len(&self) -> usize { self.blocks.lock().len() } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } } diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 1994aa6d0bc..5e1c950241d 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -29,8 +29,8 @@ impl Slasher { let db = SlasherDB::open(config.clone())?; let attester_slashings = Mutex::new(HashSet::new()); let proposer_slashings = Mutex::new(HashSet::new()); - let attestation_queue = AttestationQueue::new(); - let block_queue = BlockQueue::new(); + let attestation_queue = AttestationQueue::default(); + let block_queue = BlockQueue::default(); Ok(Self { db, attester_slashings, From e07596379b2ea40ba495c3b38e7c194c626a25b1 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 6 Nov 2020 14:42:47 +1100 Subject: [PATCH 29/34] Remove unused benchmark --- Cargo.lock | 2 -- slasher/Cargo.toml | 8 +------- slasher/benches/blake2b.rs | 32 -------------------------------- 3 files changed, 1 insertion(+), 41 deletions(-) delete mode 100644 slasher/benches/blake2b.rs diff --git a/Cargo.lock b/Cargo.lock index 6589e7e2d61..7b819c2fa4a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5305,8 +5305,6 @@ name = "slasher" version = "0.1.0" dependencies = [ "bincode", - "blake2b_simd", - "byte-slice-cast", "byteorder", "criterion", "eth2_ssz", diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 4dd2ee76401..4ffbb59e3f0 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -6,10 +6,7 @@ edition = "2018" [dependencies] bincode = "1.3.1" -blake2b_simd = "0.5.10" -byte-slice-cast = "0.3.5" byteorder = "1.3.4" -task_executor = { path = "../common/task_executor" } eth2_ssz = { path = "../consensus/ssz" } eth2_ssz_derive = { path = "../consensus/ssz_derive" } flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } @@ -23,6 +20,7 @@ serde_derive = "1.0" slog = "2.5.2" sloggers = "*" slot_clock = { path = "../common/slot_clock" } +task_executor = { path = "../common/task_executor" } tokio = { version = "0.2.21", features = ["full"] } tree_hash = { path = "../consensus/tree_hash" } tree_hash_derive = { path = "../consensus/tree_hash_derive" } @@ -36,7 +34,3 @@ tempdir = "0.3.7" [features] test_logger = [] - -[[bench]] -name = "blake2b" -harness = false diff --git a/slasher/benches/blake2b.rs b/slasher/benches/blake2b.rs deleted file mode 100644 index 8bdfdc6db3c..00000000000 --- a/slasher/benches/blake2b.rs +++ /dev/null @@ -1,32 +0,0 @@ -use blake2b_simd::{Hash, Params}; -use byte_slice_cast::AsByteSlice; -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use rand::{thread_rng, Rng}; - -const CHUNK_SIZE: usize = 2048; -type Chunk = [u16; CHUNK_SIZE]; - -fn blake2b(data: &Chunk) -> Hash { - let mut params = Params::new(); - params.hash_length(16); - params.hash(data.as_byte_slice()) -} - -fn make_random_chunk() -> Chunk { - let mut chunk = [0; CHUNK_SIZE]; - thread_rng().fill(&mut chunk[..]); - chunk -} - -pub fn uniform_chunk(c: &mut Criterion) { - let chunk = [33; CHUNK_SIZE]; - c.bench_function("uniform_chunk", |b| b.iter(|| blake2b(&black_box(chunk)))); -} - -pub fn random_chunk(c: &mut Criterion) { - let chunk = make_random_chunk(); - c.bench_function("random_chunk", |b| b.iter(|| blake2b(&black_box(chunk)))); -} - -criterion_group!(benches, uniform_chunk, random_chunk); -criterion_main!(benches); From 4790aff3dde68376c09fb3d727b36124a2533719 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 6 Nov 2020 15:44:46 +1100 Subject: [PATCH 30/34] Self-review: docs, etc --- Cargo.lock | 1 - .../beacon_chain/src/attestation_verification.rs | 15 ++++++++++++++- beacon_node/beacon_chain/src/beacon_chain.rs | 2 ++ .../beacon_chain/src/block_verification.rs | 10 ++++++++-- beacon_node/src/lib.rs | 8 ++++---- consensus/types/src/signed_beacon_block.rs | 1 + consensus/types/src/signed_beacon_block_header.rs | 3 ++- slasher/Cargo.toml | 1 - slasher/src/slasher.rs | 8 ++++++-- 9 files changed, 37 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7b819c2fa4a..f95e2d4e075 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5306,7 +5306,6 @@ version = "0.1.0" dependencies = [ "bincode", "byteorder", - "criterion", "eth2_ssz", "eth2_ssz_derive", "flate2", diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 739c33a3584..0d4e1545086 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -259,9 +259,9 @@ impl From for Error { } } +/// Errors that may occur while verifying an attestation for consumption by the slasher. #[derive(Debug)] enum SlasherVerificationError { - /// There was an error while verifying the indexed attestation for the slasher. SignatureError(BlockOperationError), BeaconChainError(BeaconChainError), } @@ -327,6 +327,12 @@ pub enum AttestationSlashInfo { SignatureValid(IndexedAttestation, TErr), } +/// After processing an attestation normally, optionally process it further for the slasher. +/// +/// This maps an `AttestationSlashInfo` error back into a regular `Error`, performing signature +/// checks on attestations that failed verification for other reasons. +/// +/// No substantial extra work will be done if there is no slasher configured. fn process_slash_info( slash_info: AttestationSlashInfo, chain: &BeaconChain, @@ -404,6 +410,7 @@ impl VerifiedAggregatedAttestation { .map_err(|slash_info| process_slash_info(slash_info, chain)) } + /// Run the checks that happen before an indexed attestation is constructed. fn verify_early_checks( signed_aggregate: &SignedAggregateAndProof, chain: &BeaconChain, @@ -472,6 +479,7 @@ impl VerifiedAggregatedAttestation { } } + /// Run the checks that happen after the indexed attestation and signature have been checked. fn verify_late_checks( signed_aggregate: &SignedAggregateAndProof, attestation_root: Hash256, @@ -510,6 +518,7 @@ impl VerifiedAggregatedAttestation { Ok(()) } + /// Verify the attestation, producing extra information about whether it might be slashable. pub fn verify_slashable( signed_aggregate: SignedAggregateAndProof, chain: &BeaconChain, @@ -591,6 +600,7 @@ impl VerifiedAggregatedAttestation { } impl VerifiedUnaggregatedAttestation { + /// Run the checks that happen before an indexed attestation is constructed. pub fn verify_early_checks( attestation: &Attestation, chain: &BeaconChain, @@ -631,6 +641,7 @@ impl VerifiedUnaggregatedAttestation { Ok(()) } + /// Run the checks that apply to the indexed attestation before the signature is checked. pub fn verify_middle_checks( attestation: &Attestation, indexed_attestation: &IndexedAttestation, @@ -678,6 +689,7 @@ impl VerifiedUnaggregatedAttestation { Ok((validator_index, expected_subnet_id)) } + /// Run the checks that apply after the signature has been checked. fn verify_late_checks( attestation: &Attestation, validator_index: u64, @@ -722,6 +734,7 @@ impl VerifiedUnaggregatedAttestation { .map_err(|slash_info| process_slash_info(slash_info, chain)) } + /// Verify the attestation, producing extra information about whether it might be slashable. pub fn verify_slashable( attestation: Attestation, subnet_id: Option, diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f44018654d2..64cd758abc5 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1085,6 +1085,7 @@ impl BeaconChain { Ok(signed_aggregate) } + /// Move slashings collected by the slasher into the op pool for block inclusion. fn ingest_slashings_to_op_pool(&self, state: &BeaconState) { if let Some(slasher) = self.slasher.as_ref() { let attester_slashings = slasher.get_attester_slashings(); @@ -1580,6 +1581,7 @@ impl BeaconChain { metrics::stop_timer(attestation_observation_timer); + // If a slasher is configured, provide the attestations from the block. if let Some(slasher) = self.slasher.as_ref() { for attestation in &signed_block.message.body.attestations { let committee = diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index a76722afd56..8192975d1c8 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -268,10 +268,11 @@ impl From for BlockError { } } +/// Information about invalid blocks which might still be slashable despite being invalid. pub enum BlockSlashInfo { - /// The block is invalid, but its signature wasn't checked. + /// The block is invalid, but its proposer signature wasn't checked. SignatureNotChecked(SignedBeaconBlockHeader, TErr), - /// The block's signature is invalid, so it will never be slashable. + /// The block's proposer signature is invalid, so it will never be slashable. SignatureInvalid(TErr), /// The signature is valid but the attestation is invalid in some other way. SignatureValid(SignedBeaconBlockHeader, TErr), @@ -396,12 +397,14 @@ pub trait IntoFullyVerifiedBlock: Sized { ) -> Result, BlockError> { self.into_fully_verified_block_slashable(chain) .map(|fully_verified| { + // Supply valid block to slasher. if let Some(slasher) = chain.slasher.as_ref() { slasher.accept_block_header(fully_verified.block.signed_block_header()); } fully_verified }) .map_err(|slash_info| { + // Process invalid blocks to see if they are suitable for the slasher. if let Some(slasher) = chain.slasher.as_ref() { let (verified_header, error) = match slash_info { BlockSlashInfo::SignatureNotChecked(header, e) => { @@ -427,6 +430,7 @@ pub trait IntoFullyVerifiedBlock: Sized { }) } + /// Convert the block to fully-verified form while producing data to aid checking slashability. fn into_fully_verified_block_slashable( self, chain: &BeaconChain, @@ -614,6 +618,7 @@ impl SignatureVerifiedBlock { } } + /// As for `new` above but prodcuding `BlockSlashInfo`. pub fn check_slashable( block: SignedBeaconBlock, chain: &BeaconChain, @@ -654,6 +659,7 @@ impl SignatureVerifiedBlock { } } + /// Same as `from_gossip_verified_block` but producing slashing-relevant data as well. pub fn from_gossip_verified_block_check_slashable( from: GossipVerifiedBlock, chain: &BeaconChain, diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 331e779c474..d140598198b 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -130,15 +130,15 @@ impl ProductionBeaconNode { let discv5_executor = Discv5Executor(executor); client_config.network.discv5_config.executor = Some(Box::new(discv5_executor)); - let builder = builder + builder .build_beacon_chain()? .network(&client_config.network) .await? .notifier()? .http_api_config(client_config.http_api.clone()) - .http_metrics_config(client_config.http_metrics.clone()); - - builder.build().map(Self) + .http_metrics_config(client_config.http_metrics.clone()) + .build() + .map(Self) } pub fn into_inner(self) -> ProductionClient { diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 489dd252856..cd2e8507221 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -82,6 +82,7 @@ impl SignedBeaconBlock { self.signature.verify(pubkey, message) } + /// Produce a signed beacon block header corresponding to this block. pub fn signed_block_header(&self) -> SignedBeaconBlockHeader { SignedBeaconBlockHeader { message: self.message.block_header(), diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index 0e4f9fb0969..b35765942b2 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -25,7 +25,7 @@ pub struct SignedBeaconBlockHeader { /// /// Guarantees `header1 == header2 -> hash(header1) == hash(header2)`. /// -/// Used in the operation pool. +/// Used in the slasher. impl Hash for SignedBeaconBlockHeader { fn hash(&self, state: &mut H) { self.message.hash(state); @@ -34,6 +34,7 @@ impl Hash for SignedBeaconBlockHeader { } impl SignedBeaconBlockHeader { + /// Verify that this block header was signed by `pubkey`. pub fn verify_signature( &self, pubkey: &PublicKey, diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 4ffbb59e3f0..108c2e149a8 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -27,7 +27,6 @@ tree_hash_derive = { path = "../consensus/tree_hash_derive" } types = { path = "../consensus/types" } [dev-dependencies] -criterion = "0.3" maplit = "1.0.2" rayon = "1.3.0" tempdir = "0.3.7" diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 5e1c950241d..201ebeb7fe3 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -42,12 +42,14 @@ impl Slasher { }) } + /// Harvest all attester slashings found, removing them from the slasher. pub fn get_attester_slashings(&self) -> HashSet> { - std::mem::replace(&mut self.attester_slashings.lock(), HashSet::new()) + std::mem::take(&mut self.attester_slashings.lock()) } + /// Harvest all proposer slashings found, removing them from the slasher. pub fn get_proposer_slashings(&self) -> HashSet { - std::mem::replace(&mut self.proposer_slashings.lock(), HashSet::new()) + std::mem::take(&mut self.proposer_slashings.lock()) } pub fn config(&self) -> &Config { @@ -238,6 +240,8 @@ impl Slasher { /// /// Drop any attestations that are too old to ever be relevant, and return any attestations /// that might be valid in the future. + /// + /// Returns `(valid, deferred, num_dropped)`. fn validate( &self, batch: AttestationBatch, From 2c7e44422f12e7eeab14e86800ff60aa00fd4ce9 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 10 Nov 2020 17:32:04 +1100 Subject: [PATCH 31/34] Prune even if batch processing fails --- slasher/src/slasher.rs | 2 -- slasher/src/slasher_server.rs | 3 ++- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 201ebeb7fe3..d5772120083 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -283,8 +283,6 @@ impl Slasher { } /// Prune unnecessary attestations and blocks from the on-disk database. - /// - /// Must only be called after `process_queued(current_epoch)`. pub fn prune_database(&self, current_epoch: Epoch) -> Result<(), Error> { self.db.prune(current_epoch) } diff --git a/slasher/src/slasher_server.rs b/slasher/src/slasher_server.rs index 673b01f5d10..c418a143c87 100644 --- a/slasher/src/slasher_server.rs +++ b/slasher/src/slasher_server.rs @@ -58,8 +58,9 @@ impl SlasherServer { "epoch" => current_epoch, "error" => format!("{:?}", e) ); - continue; } + // Prune the database, even in the case where batch processing failed. + // If the LMDB database is full then pruning could help to free it up. if let Err(e) = slasher.prune_database(current_epoch) { error!( slasher.log, From fdbfebdc5445067bcf470d80814fecced741fc3a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 18 Nov 2020 12:36:41 +1100 Subject: [PATCH 32/34] Address Paul's review comments --- .../src/attestation_verification.rs | 29 ++----------------- .../beacon_chain/src/block_verification.rs | 2 +- beacon_node/src/cli.rs | 4 ++- book/src/slasher.md | 27 +++++++++++++---- slasher/src/config.rs | 12 ++++++-- slasher/src/error.rs | 3 ++ 6 files changed, 40 insertions(+), 37 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 0d4e1545086..262f0abadfb 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -39,13 +39,9 @@ use bls::verify_signature_sets; use proto_array::Block as ProtoBlock; use slog::debug; use slot_clock::SlotClock; -use state_processing::per_block_processing::is_valid_indexed_attestation; use state_processing::{ common::get_indexed_attestation, - per_block_processing::{ - errors::{AttestationValidationError, BlockOperationError, IndexedAttestationInvalid}, - VerifySignatures, - }, + per_block_processing::errors::AttestationValidationError, signature_sets::{ indexed_attestation_signature_set_from_pubkeys, signed_aggregate_selection_proof_signature_set, signed_aggregate_signature_set, @@ -259,19 +255,6 @@ impl From for Error { } } -/// Errors that may occur while verifying an attestation for consumption by the slasher. -#[derive(Debug)] -enum SlasherVerificationError { - SignatureError(BlockOperationError), - BeaconChainError(BeaconChainError), -} - -impl From for SlasherVerificationError { - fn from(e: BeaconChainError) -> Self { - Self::BeaconChainError(e) - } -} - /// Wraps a `SignedAggregateAndProof` that has been verified for propagation on the gossip network. pub struct VerifiedAggregatedAttestation { signed_aggregate: SignedAggregateAndProof, @@ -361,15 +344,7 @@ fn process_slash_info( }; if check_signature { - if let Err(e) = chain.with_head(|head| { - is_valid_indexed_attestation( - &head.beacon_state, - &indexed_attestation, - VerifySignatures::True, - &chain.spec, - ) - .map_err(SlasherVerificationError::SignatureError) - }) { + if let Err(e) = verify_attestation_signature(chain, &indexed_attestation) { debug!( chain.log, "Signature verification for slasher failed"; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 8192975d1c8..3927012aa28 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -618,7 +618,7 @@ impl SignatureVerifiedBlock { } } - /// As for `new` above but prodcuding `BlockSlashInfo`. + /// As for `new` above but producing `BlockSlashInfo`. pub fn check_slashable( block: SignedBeaconBlock, chain: &BeaconChain, diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 0874d4f98fc..0d81a9a998b 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -329,7 +329,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("slasher") .long("slasher") .help( - "Run a slasher alongside the beacon node [EXPERIMENTAL]." + "Run a slasher alongside the beacon node. It is currently only recommended for \ + expert users because of the immaturity of the slasher UX and the extra \ + resources required." ) .takes_value(false) ) diff --git a/book/src/slasher.md b/book/src/slasher.md index 0b4ff962d3e..0be9a65f64e 100644 --- a/book/src/slasher.md +++ b/book/src/slasher.md @@ -18,7 +18,7 @@ of the immaturity of the slasher UX and the extra resources required. The slasher runs inside the same process as the beacon node, when enabled via the `--slasher` flag: ``` -lighthouse bn --slasher --debug-level=debug +lighthouse bn --slasher --debug-level debug ``` The slasher hooks into Lighthouse's block and attestation processing, and pushes messages into an @@ -47,7 +47,7 @@ directory. * Flag: `--slasher-history-length EPOCHS` * Argument: number of epochs -* Default: 8192 epochs +* Default: 4096 epochs The slasher stores data for the `history-length` most recent epochs. By default the history length is set high in order to catch all validator misbehaviour since the last weak subjectivity @@ -66,8 +66,23 @@ changed after initialization. * Default: 256 GB The slasher uses LMDB as its backing store, and LMDB will consume up to the maximum amount of disk -space allocated to it. By default the limit is set to accomodate the default history -length, but you can set it lower if running with a reduced history length. +space allocated to it. By default the limit is set to accomodate the default history length and +around 150K validators but you can set it lower if running with a reduced history length. The space +required scales approximately linearly in validator count and history length, i.e. if you halve +either you can halve the space required. + +If you want a better estimate you can use this formula: + +``` +352 * V * N + (16 * V * N)/(C * K) + 15000 * N +``` + +where + +* `V` is the validator count +* `N` is the history length +* `C` is the chunk size +* `K` is the validator chunk size ### Update Period @@ -103,10 +118,10 @@ If you would like to run a lightweight slasher that just checks blocks and attes the last day or so, you can use this combination of arguments: ``` -lighthouse bn --slasher --slasher-history-length 256 --slasher-max-db-size 4 --debug-level=debug +lighthouse bn --slasher --slasher-history-length 256 --slasher-max-db-size 16 --debug-level debug ``` ## Stability Warning -The slasher code is currently unstable, so we may update the schema of the slasher database in a +The slasher code is still quite new, so we may update the schema of the slasher database in a backwards-incompatible way which will require re-initialization. diff --git a/slasher/src/config.rs b/slasher/src/config.rs index 5cc4ce9332b..dba2e604ebb 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -5,7 +5,7 @@ use types::{Epoch, EthSpec, IndexedAttestation}; pub const DEFAULT_CHUNK_SIZE: usize = 16; pub const DEFAULT_VALIDATOR_CHUNK_SIZE: usize = 256; -pub const DEFAULT_HISTORY_LENGTH: usize = 8192; +pub const DEFAULT_HISTORY_LENGTH: usize = 4096; pub const DEFAULT_UPDATE_PERIOD: u64 = 12; pub const DEFAULT_MAX_DB_SIZE: usize = 256; @@ -35,7 +35,15 @@ impl Config { } pub fn validate(&self) -> Result<(), Error> { - if self.history_length % self.chunk_size != 0 { + if self.chunk_size == 0 + || self.validator_chunk_size == 0 + || self.history_length == 0 + || self.max_db_size_gbs == 0 + { + Err(Error::ConfigInvalidZeroParameter { + config: self.clone(), + }) + } else if self.history_length % self.chunk_size != 0 { Err(Error::ConfigInvalidChunkSize { chunk_size: self.chunk_size, history_length: self.history_length, diff --git a/slasher/src/error.rs b/slasher/src/error.rs index cc7be695f96..f1c8f727e99 100644 --- a/slasher/src/error.rs +++ b/slasher/src/error.rs @@ -18,6 +18,9 @@ pub enum Error { chunk_size: usize, history_length: usize, }, + ConfigInvalidZeroParameter { + config: Config, + }, ConfigIncompatible { on_disk_config: Config, config: Config, From 756b9aee231560e989331e2318c72e0716b5c16c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 18 Nov 2020 17:09:47 +1100 Subject: [PATCH 33/34] Fix bug in block pruning, add metrics --- Cargo.lock | 2 ++ slasher/Cargo.toml | 2 ++ slasher/src/database.rs | 20 ++++++++++---- slasher/src/lib.rs | 1 + slasher/src/metrics.rs | 21 +++++++++++++++ slasher/src/slasher.rs | 8 ++++++ slasher/src/slasher_server.rs | 5 ++++ slasher/src/test_utils.rs | 17 ++++++++++-- slasher/tests/proposer_slashings.rs | 29 ++++++++------------ slasher/tests/random.rs | 41 +++++++++++++++++++++++++++-- 10 files changed, 119 insertions(+), 27 deletions(-) create mode 100644 slasher/src/metrics.rs diff --git a/Cargo.lock b/Cargo.lock index 02c9b578a11..9f3dbf51153 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5366,6 +5366,8 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "flate2", + "lazy_static", + "lighthouse_metrics", "lmdb", "lmdb-sys", "maplit", diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 108c2e149a8..a47b303130a 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -10,6 +10,8 @@ byteorder = "1.3.4" eth2_ssz = { path = "../consensus/ssz" } eth2_ssz_derive = { path = "../consensus/ssz_derive" } flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } +lazy_static = "1.4.0" +lighthouse_metrics = { path = "../common/lighthouse_metrics" } lmdb = "0.8" lmdb-sys = "0.8" parking_lot = "0.11.0" diff --git a/slasher/src/database.rs b/slasher/src/database.rs index c71a3455a33..8899d05479a 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -443,10 +443,12 @@ impl SlasherDB { let mut cursor = txn.open_rw_cursor(self.proposers_db)?; // Position cursor at first key, bailing out if the database is empty. - match cursor.get(None, None, lmdb_sys::MDB_FIRST) { - Ok(_) => (), - Err(lmdb::Error::NotFound) => return Ok(()), - Err(e) => return Err(e.into()), + if cursor + .get(None, None, lmdb_sys::MDB_FIRST) + .optional()? + .is_none() + { + return Ok(()); } loop { @@ -458,7 +460,15 @@ impl SlasherDB { let (slot, _) = ProposerKey::parse(key_bytes)?; if slot < min_slot { cursor.del(Self::write_flags())?; - cursor.get(None, None, lmdb_sys::MDB_NEXT)?; + + // End the loop if there is no next entry. + if cursor + .get(None, None, lmdb_sys::MDB_NEXT) + .optional()? + .is_none() + { + break; + } } else { break; } diff --git a/slasher/src/lib.rs b/slasher/src/lib.rs index 50bb328b6b6..d173f26a02b 100644 --- a/slasher/src/lib.rs +++ b/slasher/src/lib.rs @@ -7,6 +7,7 @@ mod block_queue; pub mod config; mod database; mod error; +mod metrics; mod slasher; mod slasher_server; pub mod test_utils; diff --git a/slasher/src/metrics.rs b/slasher/src/metrics.rs new file mode 100644 index 00000000000..920a00e2905 --- /dev/null +++ b/slasher/src/metrics.rs @@ -0,0 +1,21 @@ +use lazy_static::lazy_static; +pub use lighthouse_metrics::*; + +lazy_static! { + pub static ref SLASHER_RUN_TIME: Result = try_create_histogram( + "slasher_process_batch_time", + "Time taken to process a batch of blocks and attestations" + ); + pub static ref SLASHER_NUM_ATTESTATIONS_DROPPED: Result = try_create_int_gauge( + "slasher_num_attestations_dropped", + "Number of attestations dropped per batch" + ); + pub static ref SLASHER_NUM_ATTESTATIONS_DEFERRED: Result = try_create_int_gauge( + "slasher_num_attestations_deferred", + "Number of attestations deferred per batch" + ); + pub static ref SLASHER_NUM_ATTESTATIONS_VALID: Result = try_create_int_gauge( + "slasher_num_attestations_valid", + "Number of valid attestations per batch" + ); +} diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index d5772120083..7a3a5be9bcf 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -1,3 +1,7 @@ +use crate::metrics::{ + self, SLASHER_NUM_ATTESTATIONS_DEFERRED, SLASHER_NUM_ATTESTATIONS_DROPPED, + SLASHER_NUM_ATTESTATIONS_VALID, +}; use crate::{ array, AttestationBatch, AttestationQueue, AttesterRecord, BlockQueue, Config, Error, ProposerSlashingStatus, SlasherDB, @@ -121,6 +125,10 @@ impl Slasher { "num_deferred" => num_deferred, "num_dropped" => num_dropped, ); + metrics::set_gauge(&SLASHER_NUM_ATTESTATIONS_VALID, snapshot.len() as i64); + metrics::set_gauge(&SLASHER_NUM_ATTESTATIONS_DEFERRED, num_deferred as i64); + metrics::set_gauge(&SLASHER_NUM_ATTESTATIONS_DROPPED, num_dropped as i64); + for attestation in snapshot.attestations.iter() { self.db.store_indexed_attestation( txn, diff --git a/slasher/src/slasher_server.rs b/slasher/src/slasher_server.rs index c418a143c87..4b20095f7da 100644 --- a/slasher/src/slasher_server.rs +++ b/slasher/src/slasher_server.rs @@ -1,3 +1,4 @@ +use crate::metrics::{self, SLASHER_RUN_TIME}; use crate::Slasher; use slog::{debug, error, info, trace}; use slot_clock::SlotClock; @@ -51,6 +52,8 @@ impl SlasherServer { let t = Instant::now(); let num_attestations = slasher.attestation_queue.len(); let num_blocks = slasher.block_queue.len(); + + let batch_timer = metrics::start_timer(&SLASHER_RUN_TIME); if let Err(e) = slasher.process_queued(current_epoch) { error!( slasher.log, @@ -59,6 +62,8 @@ impl SlasherServer { "error" => format!("{:?}", e) ); } + drop(batch_timer); + // Prune the database, even in the case where batch processing failed. // If the LMDB database is full then pruning could help to free it up. if let Err(e) = slasher.prune_database(current_epoch) { diff --git a/slasher/src/test_utils.rs b/slasher/src/test_utils.rs index e69105d5bd0..bd3b06c5272 100644 --- a/slasher/src/test_utils.rs +++ b/slasher/src/test_utils.rs @@ -3,8 +3,8 @@ use sloggers::Build; use std::collections::HashSet; use std::iter::FromIterator; use types::{ - AggregateSignature, AttestationData, AttesterSlashing, Checkpoint, Epoch, Hash256, - IndexedAttestation, MainnetEthSpec, Slot, + AggregateSignature, AttestationData, AttesterSlashing, BeaconBlockHeader, Checkpoint, Epoch, + Hash256, IndexedAttestation, MainnetEthSpec, Signature, SignedBeaconBlockHeader, Slot, }; pub type E = MainnetEthSpec; @@ -100,3 +100,16 @@ pub fn slashed_validators_from_attestations( } slashed_validators } + +pub fn block(slot: u64, proposer_index: u64, block_root: u64) -> SignedBeaconBlockHeader { + SignedBeaconBlockHeader { + message: BeaconBlockHeader { + slot: Slot::new(slot), + proposer_index, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body_root: Hash256::from_low_u64_be(block_root), + }, + signature: Signature::empty(), + } +} diff --git a/slasher/tests/proposer_slashings.rs b/slasher/tests/proposer_slashings.rs index 5a8f60013af..2f303ad35bf 100644 --- a/slasher/tests/proposer_slashings.rs +++ b/slasher/tests/proposer_slashings.rs @@ -1,23 +1,16 @@ -use slasher::{test_utils::logger, Config, Slasher}; -use tempdir::TempDir; -use types::{ - BeaconBlockHeader, Epoch, EthSpec, Hash256, MainnetEthSpec, Signature, SignedBeaconBlockHeader, - Slot, +use slasher::{ + test_utils::{block as test_block, logger, E}, + Config, Slasher, }; +use tempdir::TempDir; +use types::{Epoch, EthSpec}; -type E = MainnetEthSpec; - -fn test_block(slot: u64, proposer_index: u64, block_root: u64) -> SignedBeaconBlockHeader { - SignedBeaconBlockHeader { - message: BeaconBlockHeader { - slot: Slot::new(slot), - proposer_index, - parent_root: Hash256::zero(), - state_root: Hash256::zero(), - body_root: Hash256::from_low_u64_be(block_root), - }, - signature: Signature::empty(), - } +#[test] +fn empty_pruning() { + let tempdir = TempDir::new("slasher").unwrap(); + let config = Config::new(tempdir.path().into()); + let slasher = Slasher::::open(config.clone(), logger()).unwrap(); + slasher.prune_database(Epoch::new(0)).unwrap(); } #[test] diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index 870e674921c..251cbaa6839 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -2,20 +2,21 @@ use rand::prelude::*; use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng}; use slasher::{ test_utils::{ - indexed_att, logger, slashed_validators_from_attestations, + block, indexed_att, logger, slashed_validators_from_attestations, slashed_validators_from_slashings, E, }, Config, Slasher, }; use std::cmp::max; use tempdir::TempDir; -use types::Epoch; +use types::{Epoch, EthSpec}; #[derive(Debug)] struct TestConfig { num_validators: usize, max_attestations: usize, check_slashings: bool, + add_blocks: bool, } impl Default for TestConfig { @@ -24,6 +25,7 @@ impl Default for TestConfig { num_validators: 4, max_attestations: 50, check_slashings: false, + add_blocks: false, } } } @@ -87,6 +89,14 @@ fn random_test(seed: u64, test_config: TestConfig) { // Supply to slasher slasher.accept_attestation(attestation); + // Maybe add a random block too + if test_config.add_blocks && rng.gen_bool(0.1) { + let slot = rng.gen_range(0, 1 + 3 * current_epoch.as_u64() * E::slots_per_epoch() / 2); + let proposer = rng.gen_range(0, num_validators as u64); + let block_root = rng.gen_range(0, 2); + slasher.accept_block_header(block(slot, proposer, block_root)); + } + // Maybe process if rng.gen_bool(0.1) { slasher.process_queued(current_epoch).unwrap(); @@ -129,6 +139,22 @@ fn no_crash() { } } +// Fuzz-like test that runs forever on different seeds looking for crashes. +#[test] +#[ignore] +fn no_crash_with_blocks() { + let mut rng = thread_rng(); + loop { + random_test( + rng.gen(), + TestConfig { + add_blocks: true, + ..TestConfig::default() + }, + ); + } +} + // Fuzz-like test that runs forever on different seeds looking for missed slashings. #[test] #[ignore] @@ -194,3 +220,14 @@ fn no_crash_example2() { fn no_crash_example3() { random_test(3, TestConfig::default()); } + +#[test] +fn no_crash_blocks_example1() { + random_test( + 1, + TestConfig { + add_blocks: true, + ..TestConfig::default() + }, + ); +} From 32f73900084fa6e9c45c3f17091cf56213c48d76 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 19 Nov 2020 12:31:22 +1100 Subject: [PATCH 34/34] Add more metrics --- Cargo.lock | 2 ++ beacon_node/store/Cargo.toml | 1 + beacon_node/store/src/metrics.rs | 16 +--------------- common/directory/src/lib.rs | 20 +++++++++++++++++++- common/lighthouse_metrics/src/lib.rs | 8 +++++++- slasher/Cargo.toml | 1 + slasher/src/array.rs | 20 ++++++++++++++++++++ slasher/src/metrics.rs | 17 +++++++++++++++++ slasher/src/slasher.rs | 4 +++- slasher/src/slasher_server.rs | 6 +++++- 10 files changed, 76 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9f3dbf51153..29980974207 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5363,6 +5363,7 @@ version = "0.1.0" dependencies = [ "bincode", "byteorder", + "directory", "eth2_ssz", "eth2_ssz_derive", "flate2", @@ -5689,6 +5690,7 @@ version = "0.2.0" dependencies = [ "criterion", "db-key", + "directory", "eth2_ssz", "eth2_ssz_derive", "itertools 0.9.0", diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index a0fa4c24e52..81c94949517 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -30,3 +30,4 @@ lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lru = "0.6.0" sloggers = "1.0.1" +directory = { path = "../../common/directory" } diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 826712a72c9..72c5e61969e 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -1,6 +1,6 @@ pub use lighthouse_metrics::{set_gauge, try_create_int_gauge, *}; -use std::fs; +use directory::size_of_dir; use std::path::Path; lazy_static! { @@ -134,17 +134,3 @@ pub fn scrape_for_metrics(db_path: &Path, freezer_db_path: &Path) { let freezer_db_size = size_of_dir(freezer_db_path); set_gauge(&FREEZER_DB_SIZE, freezer_db_size as i64); } - -fn size_of_dir(path: &Path) -> u64 { - if let Ok(iter) = fs::read_dir(path) { - iter.filter_map(std::result::Result::ok) - .map(size_of_dir_entry) - .sum() - } else { - 0 - } -} - -fn size_of_dir_entry(dir: fs::DirEntry) -> u64 { - dir.metadata().map(|m| m.len()).unwrap_or(0) -} diff --git a/common/directory/src/lib.rs b/common/directory/src/lib.rs index 765fdabd621..1036e828970 100644 --- a/common/directory/src/lib.rs +++ b/common/directory/src/lib.rs @@ -1,6 +1,6 @@ use clap::ArgMatches; pub use eth2_testnet_config::DEFAULT_HARDCODED_TESTNET; -use std::fs::create_dir_all; +use std::fs::{self, create_dir_all}; use std::path::{Path, PathBuf}; /// Names for the default directories. @@ -58,3 +58,21 @@ pub fn parse_path_or_default_with_flag( .join(flag), ) } + +/// Get the approximate size of a directory and its contents. +/// +/// Will skip unreadable files, and files. Not 100% accurate if files are being created and deleted +/// while this function is running. +pub fn size_of_dir(path: &Path) -> u64 { + if let Ok(iter) = fs::read_dir(path) { + iter.filter_map(std::result::Result::ok) + .map(size_of_dir_entry) + .sum() + } else { + 0 + } +} + +fn size_of_dir_entry(dir: fs::DirEntry) -> u64 { + dir.metadata().map(|m| m.len()).unwrap_or(0) +} diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index 1bed7b74b19..39d759d1dca 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -143,7 +143,7 @@ pub fn try_create_float_gauge_vec( Ok(counter_vec) } -/// Attempts to create a `IntGaugeVec`, returning `Err` if the registry does not accept the gauge +/// Attempts to create a `IntCounterVec`, returning `Err` if the registry does not accept the gauge /// (potentially due to naming conflict). pub fn try_create_int_counter_vec( name: &str, @@ -221,6 +221,12 @@ pub fn inc_counter_vec(int_counter_vec: &Result, name: &[&str]) { } } +pub fn inc_counter_vec_by(int_counter_vec: &Result, name: &[&str], amount: i64) { + if let Some(counter) = get_int_counter(int_counter_vec, name) { + counter.inc_by(amount); + } +} + /// If `histogram_vec.is_ok()`, returns a histogram with the given `name`. pub fn get_histogram(histogram_vec: &Result, name: &[&str]) -> Option { if let Ok(histogram_vec) = histogram_vec { diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index a47b303130a..bd0bb1675df 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] bincode = "1.3.1" byteorder = "1.3.4" +directory = { path = "../common/directory" } eth2_ssz = { path = "../consensus/ssz" } eth2_ssz_derive = { path = "../consensus/ssz_derive" } flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } diff --git a/slasher/src/array.rs b/slasher/src/array.rs index 48938d87d40..6a19dbdc3f1 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -1,3 +1,4 @@ +use crate::metrics::{self, SLASHER_COMPRESSION_RATIO, SLASHER_NUM_CHUNKS_UPDATED}; use crate::{AttesterRecord, AttesterSlashingStatus, Config, Error, SlasherDB}; use flate2::bufread::{ZlibDecoder, ZlibEncoder}; use lmdb::{RwTransaction, Transaction}; @@ -110,6 +111,8 @@ pub struct MaxTargetChunk { } pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwned { + fn name() -> &'static str; + fn empty(config: &Config) -> Self; fn chunk(&mut self) -> &mut Chunk; @@ -178,6 +181,9 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn let mut compressed_value = vec![]; encoder.read_to_end(&mut compressed_value)?; + let compression_ratio = value.len() as f64 / compressed_value.len() as f64; + metrics::set_float_gauge(&SLASHER_COMPRESSION_RATIO, compression_ratio); + txn.put( Self::select_db(db), &disk_key.to_be_bytes(), @@ -189,6 +195,10 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn } impl TargetArrayChunk for MinTargetChunk { + fn name() -> &'static str { + "min" + } + fn empty(config: &Config) -> Self { MinTargetChunk { chunk: Chunk { @@ -288,6 +298,10 @@ impl TargetArrayChunk for MinTargetChunk { } impl TargetArrayChunk for MaxTargetChunk { + fn name() -> &'static str { + "max" + } + fn empty(config: &Config) -> Self { MaxTargetChunk { chunk: Chunk { @@ -603,6 +617,12 @@ pub fn update_array( } // Store chunks on disk. + metrics::inc_counter_vec_by( + &SLASHER_NUM_CHUNKS_UPDATED, + &[T::name()], + updated_chunks.len() as i64, + ); + for (chunk_index, chunk) in updated_chunks { chunk.store(db, txn, validator_chunk_index, chunk_index, config)?; } diff --git a/slasher/src/metrics.rs b/slasher/src/metrics.rs index 920a00e2905..7f95ad8cca6 100644 --- a/slasher/src/metrics.rs +++ b/slasher/src/metrics.rs @@ -2,6 +2,10 @@ use lazy_static::lazy_static; pub use lighthouse_metrics::*; lazy_static! { + pub static ref SLASHER_DATABASE_SIZE: Result = try_create_int_gauge( + "slasher_database_size", + "Size of the LMDB database backing the slasher, in bytes" + ); pub static ref SLASHER_RUN_TIME: Result = try_create_histogram( "slasher_process_batch_time", "Time taken to process a batch of blocks and attestations" @@ -18,4 +22,17 @@ lazy_static! { "slasher_num_attestations_valid", "Number of valid attestations per batch" ); + pub static ref SLASHER_NUM_BLOCKS_PROCESSED: Result = try_create_int_gauge( + "slasher_num_blocks_processed", + "Number of blocks processed per batch", + ); + pub static ref SLASHER_NUM_CHUNKS_UPDATED: Result = try_create_int_counter_vec( + "slasher_num_chunks_updated", + "Number of min or max target chunks updated on disk", + &["array"], + ); + pub static ref SLASHER_COMPRESSION_RATIO: Result = try_create_float_gauge( + "slasher_compression_ratio", + "Compression ratio for min-max array chunks (higher is better)" + ); } diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 7a3a5be9bcf..1589dc2bf68 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -1,6 +1,6 @@ use crate::metrics::{ self, SLASHER_NUM_ATTESTATIONS_DEFERRED, SLASHER_NUM_ATTESTATIONS_DROPPED, - SLASHER_NUM_ATTESTATIONS_VALID, + SLASHER_NUM_ATTESTATIONS_VALID, SLASHER_NUM_BLOCKS_PROCESSED, }; use crate::{ array, AttestationBatch, AttestationQueue, AttesterRecord, BlockQueue, Config, Error, @@ -84,6 +84,8 @@ impl Slasher { let blocks = self.block_queue.dequeue(); let mut slashings = vec![]; + metrics::set_gauge(&SLASHER_NUM_BLOCKS_PROCESSED, blocks.len() as i64); + for block in blocks { if let ProposerSlashingStatus::DoubleVote(slashing) = self.db.check_or_insert_block_proposal(txn, block)? diff --git a/slasher/src/slasher_server.rs b/slasher/src/slasher_server.rs index 4b20095f7da..b542a8023b7 100644 --- a/slasher/src/slasher_server.rs +++ b/slasher/src/slasher_server.rs @@ -1,5 +1,6 @@ -use crate::metrics::{self, SLASHER_RUN_TIME}; +use crate::metrics::{self, SLASHER_DATABASE_SIZE, SLASHER_RUN_TIME}; use crate::Slasher; +use directory::size_of_dir; use slog::{debug, error, info, trace}; use slot_clock::SlotClock; use std::sync::mpsc::{sync_channel, TrySendError}; @@ -83,6 +84,9 @@ impl SlasherServer { "num_attestations" => num_attestations, "num_blocks" => num_blocks, ); + + let database_size = size_of_dir(&slasher.config().database_path); + metrics::set_gauge(&SLASHER_DATABASE_SIZE, database_size as i64); } }, "slasher_server_process_queued",