diff --git a/Cargo.lock b/Cargo.lock index 8044a3799fc..167f4c3d1f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1951,6 +1951,7 @@ dependencies = [ "smallvec", "snap", "strum", + "superstruct", "task_executor", "tempfile", "tiny-keccak 2.0.2", @@ -2753,6 +2754,7 @@ version = "0.1.0" dependencies = [ "beacon_chain", "bs58", + "discv5", "environment", "eth1", "eth2", @@ -3326,13 +3328,13 @@ dependencies = [ [[package]] name = "libp2p" version = "0.39.1" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "atomic", "bytes 1.0.1", "futures", "lazy_static", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-deflate", "libp2p-dns", "libp2p-floodsub", @@ -3398,7 +3400,7 @@ dependencies = [ [[package]] name = "libp2p-core" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "asn1_der", "bs58", @@ -3431,21 +3433,21 @@ dependencies = [ [[package]] name = "libp2p-deflate" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "flate2", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", ] [[package]] name = "libp2p-dns" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "async-std-resolver", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", "smallvec", "trust-dns-resolver", @@ -3454,12 +3456,12 @@ dependencies = [ [[package]] name = "libp2p-floodsub" version = "0.30.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "cuckoofilter", "fnv", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "prost", @@ -3471,7 +3473,7 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" version = "0.32.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "asynchronous-codec", "base64 0.13.0", @@ -3480,7 +3482,7 @@ dependencies = [ "fnv", "futures", "hex_fmt", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "prost", @@ -3496,10 +3498,10 @@ dependencies = [ [[package]] name = "libp2p-identify" version = "0.30.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "prost", @@ -3511,7 +3513,7 @@ dependencies = [ [[package]] name = "libp2p-kad" version = "0.31.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "arrayvec 0.5.2", "asynchronous-codec", @@ -3519,7 +3521,7 @@ dependencies = [ "either", "fnv", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "prost", @@ -3536,7 +3538,7 @@ dependencies = [ [[package]] name = "libp2p-mdns" version = "0.31.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "async-io", "data-encoding", @@ -3544,7 +3546,7 @@ dependencies = [ "futures", "if-watch", "lazy_static", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "rand 0.8.4", @@ -3556,12 +3558,12 @@ dependencies = [ [[package]] name = "libp2p-mplex" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "asynchronous-codec", "bytes 1.0.1", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", "nohash-hasher", "parking_lot", @@ -3573,13 +3575,13 @@ dependencies = [ [[package]] name = "libp2p-noise" version = "0.32.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "bytes 1.0.1", "curve25519-dalek", "futures", "lazy_static", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", "prost", "prost-build", @@ -3594,10 +3596,10 @@ dependencies = [ [[package]] name = "libp2p-ping" version = "0.30.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "rand 0.7.3", @@ -3608,12 +3610,12 @@ dependencies = [ [[package]] name = "libp2p-plaintext" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "asynchronous-codec", "bytes 1.0.1", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", "prost", "prost-build", @@ -3624,7 +3626,7 @@ dependencies = [ [[package]] name = "libp2p-pnet" version = "0.21.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "futures", "log", @@ -3637,13 +3639,13 @@ dependencies = [ [[package]] name = "libp2p-relay" version = "0.3.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "asynchronous-codec", "bytes 1.0.1", "futures", "futures-timer", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "pin-project 1.0.7", @@ -3659,12 +3661,12 @@ dependencies = [ [[package]] name = "libp2p-request-response" version = "0.12.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "async-trait", "bytes 1.0.1", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "libp2p-swarm", "log", "lru", @@ -3678,11 +3680,11 @@ dependencies = [ [[package]] name = "libp2p-swarm" version = "0.30.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "either", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", "rand 0.7.3", "smallvec", @@ -3693,7 +3695,7 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" version = "0.24.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "quote", "syn", @@ -3702,7 +3704,7 @@ dependencies = [ [[package]] name = "libp2p-tcp" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "async-io", "futures", @@ -3711,7 +3713,7 @@ dependencies = [ "if-watch", "ipnet", "libc", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", "socket2 0.4.0", "tokio 1.8.1", @@ -3720,22 +3722,22 @@ dependencies = [ [[package]] name = "libp2p-uds" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "async-std", "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", ] [[package]] name = "libp2p-wasm-ext" version = "0.29.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "futures", "js-sys", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "parity-send-wrapper", "wasm-bindgen", "wasm-bindgen-futures", @@ -3744,12 +3746,12 @@ dependencies = [ [[package]] name = "libp2p-websocket" version = "0.30.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "log", "quicksink", "rw-stream-sink", @@ -3761,10 +3763,10 @@ dependencies = [ [[package]] name = "libp2p-yamux" version = "0.33.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "futures", - "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783)", + "libp2p-core 0.29.0 (git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a)", "parking_lot", "thiserror", "yamux", @@ -4253,7 +4255,7 @@ dependencies = [ [[package]] name = "multistream-select" version = "0.10.3" -source = "git+https://github.com/sigp/rust-libp2p?rev=323cae1d08112052740834aa1fb262ae43e6f783#323cae1d08112052740834aa1fb262ae43e6f783" +source = "git+https://github.com/sigp/rust-libp2p?rev=75fd53ec5407a58ae1ff600fd1c68ea49079364a#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "bytes 1.0.1", "futures", @@ -4286,6 +4288,7 @@ name = "network" version = "0.2.0" dependencies = [ "beacon_chain", + "discv5", "environment", "error-chain", "eth2_libp2p", @@ -7318,6 +7321,7 @@ dependencies = [ "futures", "hex", "hyper", + "itertools 0.10.1", "lazy_static", "libc", "libsecp256k1", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index aeb30bf2e63..5c42a515cc9 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -43,7 +43,7 @@ use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::BeaconForkChoiceStore; use crate::BeaconSnapshot; use crate::{metrics, BeaconChainError}; -use eth2::types::{EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead}; +use eth2::types::{EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead, SyncDuty}; use fork_choice::ForkChoice; use futures::channel::mpsc::Sender; use itertools::process_results; @@ -1081,6 +1081,29 @@ impl BeaconChain { Ok(pubkey_cache.get_index(pubkey)) } + /// Return the validator indices of all public keys fetched from an iterator. + /// + /// If any public key doesn't belong to a known validator then an error will be returned. + /// We could consider relaxing this by returning `Vec>` in future. + pub fn validator_indices<'a>( + &self, + validator_pubkeys: impl Iterator, + ) -> Result, Error> { + let pubkey_cache = self + .validator_pubkey_cache + .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) + .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + + validator_pubkeys + .map(|pubkey| { + pubkey_cache + .get_index(pubkey) + .map(|id| id as u64) + .ok_or(Error::ValidatorPubkeyUnknown(*pubkey)) + }) + .collect() + } + /// Returns the validator pubkey (if any) for the given validator index. /// /// ## Notes @@ -1214,6 +1237,16 @@ impl BeaconChain { .get_by_slot_and_root(slot, attestation_data_root) } + /// Return an aggregated `SyncCommitteeContribution` matching the given `root`. + pub fn get_aggregated_sync_committee_contribution( + &self, + sync_contribution_data: &SyncContributionData, + ) -> Option> { + self.naive_sync_aggregation_pool + .read() + .get(sync_contribution_data) + } + /// Produce an unaggregated `Attestation` that is valid for the given `slot` and `index`. /// /// The produced `Attestation` will not be valid until it has been signed by exactly one @@ -1882,6 +1915,19 @@ impl BeaconChain { Ok(()) } + /// Attempt to obtain sync committee duties from the head. + pub fn sync_committee_duties_from_head( + &self, + epoch: Epoch, + validator_indices: &[u64], + ) -> Result>, Error> { + self.with_head(move |head| { + head.beacon_state + .get_sync_committee_duties(epoch, validator_indices, &self.spec) + .map_err(Error::SyncDutiesError) + }) + } + /// Attempt to verify and import a chain of blocks to `self`. /// /// The provided blocks _must_ each reference the previous block via `block.parent_root` (i.e., @@ -2624,6 +2670,22 @@ impl BeaconChain { let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; let voluntary_exits = self.op_pool.get_voluntary_exits(&state, &self.spec).into(); + // Closure to fetch a sync aggregate in cases where it is required. + let get_sync_aggregate = || -> Result, BlockProductionError> { + Ok(self + .op_pool + .get_sync_aggregate(&state) + .map_err(BlockProductionError::OpPoolError)? + .unwrap_or_else(|| { + warn!( + self.log, + "Producing block with no sync contributions"; + "slot" => state.slot(), + ); + SyncAggregate::new() + })) + }; + let inner_block = match state { BeaconState::Base(_) => BeaconBlock::Base(BeaconBlockBase { slot, @@ -2641,24 +2703,26 @@ impl BeaconChain { voluntary_exits, }, }), - BeaconState::Altair(_) => BeaconBlock::Altair(BeaconBlockAltair { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyAltair { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations, - deposits, - voluntary_exits, - // FIXME(altair): put a sync aggregate from the pool here (once implemented) - sync_aggregate: SyncAggregate::new(), - }, - }), + BeaconState::Altair(_) => { + let sync_aggregate = get_sync_aggregate()?; + BeaconBlock::Altair(BeaconBlockAltair { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations, + deposits, + voluntary_exits, + sync_aggregate, + }, + }) + } }; let block = SignedBeaconBlock::from_block( @@ -3324,14 +3388,21 @@ impl BeaconChain { // therefore use the genesis slot. let slot = self.slot().unwrap_or(self.spec.genesis_slot); - self.spec.enr_fork_id(slot, self.genesis_validators_root) + self.spec + .enr_fork_id::(slot, self.genesis_validators_root) } - /// Calculates the `Duration` to the next fork, if one exists. - pub fn duration_to_next_fork(&self) -> Option { - let epoch = self.spec.next_fork_epoch()?; + /// Calculates the `Duration` to the next fork if it exists and returns it + /// with it's corresponding `ForkName`. + pub fn duration_to_next_fork(&self) -> Option<(ForkName, Duration)> { + // If we are unable to read the slot clock we assume that it is prior to genesis and + // therefore use the genesis slot. + let slot = self.slot().unwrap_or(self.spec.genesis_slot); + + let (fork_name, epoch) = self.spec.next_fork_epoch::(slot)?; self.slot_clock .duration_to_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) + .map(|duration| (fork_name, duration)) } pub fn dump_as_dot(&self, output: &mut W) { diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index f484b194549..543f4222269 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -87,6 +87,7 @@ pub enum BeaconChainError { DuplicateValidatorPublicKey, ValidatorPubkeyCacheFileError(String), ValidatorIndexUnknown(usize), + ValidatorPubkeyUnknown(PublicKeyBytes), OpPoolError(OpPoolError), NaiveAggregationError(NaiveAggregationError), ObservedAttestationsError(ObservedAttestationsError), @@ -120,6 +121,7 @@ pub enum BeaconChainError { state_epoch: Epoch, shuffling_epoch: Epoch, }, + SyncDutiesError(BeaconStateError), InconsistentForwardsIter { request_slot: Slot, slot: Slot, diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index e34205c700d..67a53b90497 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -151,7 +151,7 @@ pub fn test_spec() -> ChainSpec { pub struct BeaconChainHarness { pub validator_keypairs: Vec, - pub chain: BeaconChain, + pub chain: Arc>, pub spec: ChainSpec, pub data_dir: TempDir, pub shutdown_receiver: Receiver, @@ -229,6 +229,29 @@ impl BeaconChainHarness> { target_aggregators_per_committee: u64, store_config: StoreConfig, chain_config: ChainConfig, + ) -> Self { + Self::new_with_mutator( + eth_spec_instance, + spec, + validator_keypairs, + target_aggregators_per_committee, + store_config, + chain_config, + |x| x, + ) + } + + /// Apply a function to beacon chain builder before building. + pub fn new_with_mutator( + eth_spec_instance: E, + spec: Option, + validator_keypairs: Vec, + target_aggregators_per_committee: u64, + store_config: StoreConfig, + chain_config: ChainConfig, + mutator: impl FnOnce( + BeaconChainBuilder>, + ) -> BeaconChainBuilder>, ) -> Self { let data_dir = tempdir().expect("should create temporary data_dir"); let mut spec = spec.unwrap_or_else(test_spec::); @@ -240,7 +263,7 @@ impl BeaconChainHarness> { let log = test_logger(); let store = HotColdDB::open_ephemeral(store_config, spec.clone(), log.clone()).unwrap(); - let chain = BeaconChainBuilder::new(eth_spec_instance) + let builder = BeaconChainBuilder::new(eth_spec_instance) .logger(log.clone()) .custom_spec(spec.clone()) .store(Arc::new(store)) @@ -260,13 +283,13 @@ impl BeaconChainHarness> { log.clone(), 1, ))) - .monitor_validators(true, vec![], log) - .build() - .expect("should build"); + .monitor_validators(true, vec![], log); + + let chain = mutator(builder).build().expect("should build"); Self { spec: chain.spec.clone(), - chain, + chain: Arc::new(chain), validator_keypairs, data_dir, shutdown_receiver, @@ -311,7 +334,7 @@ impl BeaconChainHarness> { Self { spec: chain.spec.clone(), - chain, + chain: Arc::new(chain), validator_keypairs, data_dir, shutdown_receiver, @@ -353,7 +376,7 @@ impl BeaconChainHarness> { Self { spec: chain.spec.clone(), - chain, + chain: Arc::new(chain), validator_keypairs, data_dir, shutdown_receiver, diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 446b19195ab..ccdb98a2310 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -4,7 +4,7 @@ extern crate lazy_static; use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, + test_logger, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::{BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult}; use slasher::{Config as SlasherConfig, Slasher}; @@ -830,17 +830,25 @@ fn block_gossip_verification() { #[test] fn verify_block_for_gossip_slashing_detection() { - let mut harness = get_harness(VALIDATOR_COUNT); - let slasher_dir = tempdir().unwrap(); let slasher = Arc::new( Slasher::open( SlasherConfig::new(slasher_dir.path().into()).for_testing(), - harness.logger().clone(), + test_logger(), ) .unwrap(), ); - harness.chain.slasher = Some(slasher.clone()); + + let harness = BeaconChainHarness::new_with_mutator( + MainnetEthSpec, + None, + KEYPAIRS.to_vec(), + 1 << 32, + StoreConfig::default(), + ChainConfig::default(), + |builder| builder.slasher(slasher.clone()), + ); + harness.advance_slot(); let state = harness.get_current_state(); let (block1, _) = harness.make_block(state.clone(), Slot::new(1)); diff --git a/beacon_node/eth2_libp2p/Cargo.toml b/beacon_node/eth2_libp2p/Cargo.toml index 9f3e48cfad0..3029cb03c61 100644 --- a/beacon_node/eth2_libp2p/Cargo.toml +++ b/beacon_node/eth2_libp2p/Cargo.toml @@ -40,12 +40,15 @@ rand = "0.7.3" directory = { path = "../../common/directory" } regex = "1.3.9" strum = { version = "0.20", features = ["derive"] } +superstruct = "0.2.0" [dependencies.libp2p] #version = "0.39.1" #default-features = false +# TODO: Update once https://github.com/libp2p/rust-libp2p/pull/2103 and +# https://github.com/libp2p/rust-libp2p/pull/2137 are merged upstream. git = "https://github.com/sigp/rust-libp2p" -rev = "323cae1d08112052740834aa1fb262ae43e6f783" +rev = "75fd53ec5407a58ae1ff600fd1c68ea49079364a" features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio"] [dev-dependencies] diff --git a/beacon_node/eth2_libp2p/src/behaviour/mod.rs b/beacon_node/eth2_libp2p/src/behaviour/mod.rs index c509da6a312..83161c6704b 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/mod.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/mod.rs @@ -1,6 +1,7 @@ use crate::behaviour::gossipsub_scoring_parameters::{ lighthouse_gossip_thresholds, PeerScoreSettings, }; +use crate::config::gossipsub_config; use crate::discovery::{subnet_predicate, Discovery, DiscoveryEvent, TARGET_SUBNET_PEERS}; use crate::peer_manager::{ score::ReportSource, ConnectionDirection, PeerManager, PeerManagerEvent, @@ -8,7 +9,7 @@ use crate::peer_manager::{ use crate::rpc::*; use crate::service::METADATA_FILENAME; use crate::types::{ - subnet_id_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, + subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, }; use crate::Eth2Enr; @@ -42,7 +43,10 @@ use std::{ sync::Arc, task::{Context, Poll}, }; -use types::{ChainSpec, EnrForkId, EthSpec, SignedBeaconBlock, Slot, SubnetId}; +use types::{ + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, ChainSpec, EnrForkId, EthSpec, ForkContext, + SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, +}; pub mod gossipsub_scoring_parameters; @@ -157,6 +161,8 @@ pub struct Behaviour { /// Directory where metadata is stored. #[behaviour(ignore)] network_dir: PathBuf, + #[behaviour(ignore)] + fork_context: Arc, /// Gossipsub score parameters. #[behaviour(ignore)] score_settings: PeerScoreSettings, @@ -172,9 +178,10 @@ pub struct Behaviour { impl Behaviour { pub async fn new( local_key: &Keypair, - config: &NetworkConfig, + mut config: NetworkConfig, network_globals: Arc>, log: &slog::Logger, + fork_context: Arc, chain_spec: &ChainSpec, ) -> error::Result { let behaviour_log = log.new(o!()); @@ -191,7 +198,8 @@ impl Behaviour { }; // Build and start the discovery sub-behaviour - let mut discovery = Discovery::new(local_key, config, network_globals.clone(), log).await?; + let mut discovery = + Discovery::new(local_key, &config, network_globals.clone(), log).await?; // start searching for peers discovery.discover_peers(); @@ -201,13 +209,19 @@ impl Behaviour { .eth2() .expect("Local ENR must have a fork id"); - let possible_fork_digests = vec![enr_fork_id.fork_digest]; + let possible_fork_digests = fork_context.all_fork_digests(); let filter = MaxCountSubscriptionFilter { - filter: Self::create_whitelist_filter(possible_fork_digests, 64), //TODO change this to a constant + filter: Self::create_whitelist_filter( + possible_fork_digests, + chain_spec.attestation_subnet_count, + SYNC_COMMITTEE_SUBNET_COUNT, + ), max_subscribed_topics: 200, //TODO change this to a constant max_subscriptions_per_request: 100, //this is according to the current go implementation }; + config.gs_config = gossipsub_config(fork_context.clone()); + // Build and configure the Gossipsub behaviour let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( @@ -247,11 +261,11 @@ impl Behaviour { Ok(Behaviour { // Sub-behaviours gossipsub, - eth2_rpc: RPC::new(log.clone()), + eth2_rpc: RPC::new(fork_context.clone(), log.clone()), discovery, identify: Identify::new(identify_config), // Auxiliary fields - peer_manager: PeerManager::new(config, network_globals.clone(), log).await?, + peer_manager: PeerManager::new(&config, network_globals.clone(), log).await?, events: VecDeque::new(), internal_events: VecDeque::new(), network_globals, @@ -260,6 +274,7 @@ impl Behaviour { network_dir: config.network_dir.clone(), log: behaviour_log, score_settings, + fork_context, update_gossipsub_scores, }) } @@ -311,28 +326,20 @@ impl Behaviour { self.unsubscribe(gossip_topic) } - /// Subscribes to a specific subnet id; - pub fn subscribe_to_subnet(&mut self, subnet_id: SubnetId) -> bool { - let topic = GossipTopic::new( - subnet_id.into(), - GossipEncoding::default(), - self.enr_fork_id.fork_digest, - ); - self.subscribe(topic) - } - - /// Un-Subscribes from a specific subnet id; - pub fn unsubscribe_from_subnet(&mut self, subnet_id: SubnetId) -> bool { - let topic = GossipTopic::new( - subnet_id.into(), - GossipEncoding::default(), - self.enr_fork_id.fork_digest, - ); - self.unsubscribe(topic) + /// Unsubscribe from all topics that doesn't have the given fork_digest + pub fn unsubscribe_from_fork_topics_except(&mut self, except: [u8; 4]) { + let subscriptions = self.network_globals.gossipsub_subscriptions.read().clone(); + for topic in subscriptions + .iter() + .filter(|topic| topic.fork_digest != except) + .cloned() + { + self.unsubscribe(topic); + } } /// Subscribes to a gossipsub topic. - fn subscribe(&mut self, topic: GossipTopic) -> bool { + pub fn subscribe(&mut self, topic: GossipTopic) -> bool { // update the network globals self.network_globals .gossipsub_subscriptions @@ -354,7 +361,7 @@ impl Behaviour { } /// Unsubscribe from a gossipsub topic. - fn unsubscribe(&mut self, topic: GossipTopic) -> bool { + pub fn unsubscribe(&mut self, topic: GossipTopic) -> bool { // update the network globals self.network_globals .gossipsub_subscriptions @@ -537,15 +544,15 @@ impl Behaviour { self.discovery.add_enr(enr); } - /// Updates a subnet value to the ENR bitfield. + /// Updates a subnet value to the ENR attnets/syncnets bitfield. /// /// The `value` is `true` if a subnet is being added and false otherwise. - pub fn update_enr_subnet(&mut self, subnet_id: SubnetId, value: bool) { + pub fn update_enr_subnet(&mut self, subnet_id: Subnet, value: bool) { if let Err(e) = self.discovery.update_enr_bitfield(subnet_id, value) { crit!(self.log, "Could not update ENR bitfield"; "error" => e); } // update the local meta data which informs our peers of the update during PINGS - self.update_metadata(); + self.update_metadata_bitfields(); } /// Attempts to discover new peers for a given subnet. The `min_ttl` gives the time at which we @@ -564,20 +571,24 @@ impl Behaviour { self.network_globals .peers .write() - .extend_peers_on_subnet(s.subnet_id, min_ttl); + .extend_peers_on_subnet(&s.subnet, min_ttl); + if let Subnet::SyncCommittee(sync_subnet) = s.subnet { + self.peer_manager_mut() + .add_sync_subnet(sync_subnet, min_ttl); + } } // Already have target number of peers, no need for subnet discovery let peers_on_subnet = self .network_globals .peers .read() - .good_peers_on_subnet(s.subnet_id) + .good_peers_on_subnet(s.subnet) .count(); if peers_on_subnet >= TARGET_SUBNET_PEERS { trace!( self.log, "Discovery query ignored"; - "subnet_id" => ?s.subnet_id, + "subnet" => ?s.subnet, "reason" => "Already connected to desired peers", "connected_peers_on_subnet" => peers_on_subnet, "target_subnet_peers" => TARGET_SUBNET_PEERS, @@ -587,7 +598,7 @@ impl Behaviour { // If we connect to the cached peers before the discovery query starts, then we potentially // save a costly discovery query. } else { - self.dial_cached_enrs_in_subnet(s.subnet_id); + self.dial_cached_enrs_in_subnet(s.subnet); true } }) @@ -603,26 +614,6 @@ impl Behaviour { pub fn update_fork_version(&mut self, enr_fork_id: EnrForkId) { self.discovery.update_eth2_enr(enr_fork_id.clone()); - // unsubscribe from all gossip topics and re-subscribe to their new fork counterparts - let subscribed_topics = self - .network_globals - .gossipsub_subscriptions - .read() - .iter() - .cloned() - .collect::>(); - - // unsubscribe from all topics - for topic in &subscribed_topics { - self.unsubscribe(topic.clone()); - } - - // re-subscribe modifying the fork version - for mut topic in subscribed_topics { - *topic.digest() = enr_fork_id.fork_digest; - self.subscribe(topic); - } - // update the local reference self.enr_fork_id = enr_fork_id; } @@ -630,18 +621,28 @@ impl Behaviour { /* Private internal functions */ /// Updates the current meta data of the node to match the local ENR. - fn update_metadata(&mut self) { + fn update_metadata_bitfields(&mut self) { let local_attnets = self .discovery .local_enr() - .bitfield::() - .expect("Local discovery must have bitfield"); + .attestation_bitfield::() + .expect("Local discovery must have attestation bitfield"); + + let local_syncnets = self + .discovery + .local_enr() + .sync_committee_bitfield::() + .expect("Local discovery must have sync committee bitfield"); { // write lock scope let mut meta_data = self.network_globals.local_metadata.write(); - meta_data.seq_number += 1; - meta_data.attnets = local_attnets; + + *meta_data.seq_number_mut() += 1; + *meta_data.attnets_mut() = local_attnets; + if let Ok(syncnets) = meta_data.syncnets_mut() { + *syncnets = local_syncnets; + } } // Save the updated metadata to disk save_metadata_to_disk( @@ -654,7 +655,7 @@ impl Behaviour { /// Sends a Ping request to the peer. fn ping(&mut self, id: RequestId, peer_id: PeerId) { let ping = crate::rpc::Ping { - data: self.network_globals.local_metadata.read().seq_number, + data: *self.network_globals.local_metadata.read().seq_number(), }; trace!(self.log, "Sending Ping"; "request_id" => id, "peer_id" => %peer_id); @@ -665,7 +666,7 @@ impl Behaviour { /// Sends a Pong response to the peer. fn pong(&mut self, id: PeerRequestId, peer_id: PeerId) { let ping = crate::rpc::Ping { - data: self.network_globals.local_metadata.read().seq_number, + data: *self.network_globals.local_metadata.read().seq_number(), }; trace!(self.log, "Sending Pong"; "request_id" => id.1, "peer_id" => %peer_id); let event = RPCCodedResponse::Success(RPCResponse::Pong(ping)); @@ -724,8 +725,8 @@ impl Behaviour { /// Dial cached enrs in discovery service that are in the given `subnet_id` and aren't /// in Connected, Dialing or Banned state. - fn dial_cached_enrs_in_subnet(&mut self, subnet_id: SubnetId) { - let predicate = subnet_predicate::(vec![subnet_id], &self.log); + fn dial_cached_enrs_in_subnet(&mut self, subnet: Subnet) { + let predicate = subnet_predicate::(vec![subnet], &self.log); let peers_to_dial: Vec = self .discovery .cached_enrs() @@ -752,6 +753,7 @@ impl Behaviour { fn create_whitelist_filter( possible_fork_digests: Vec<[u8; 4]>, attestation_subnet_count: u64, + sync_committee_subnet_count: u64, ) -> WhitelistSubscriptionFilter { let mut possible_hashes = HashSet::new(); for fork_digest in possible_fork_digests { @@ -767,9 +769,13 @@ impl Behaviour { add(VoluntaryExit); add(ProposerSlashing); add(AttesterSlashing); + add(SignedContributionAndProof); for id in 0..attestation_subnet_count { add(Attestation(SubnetId::new(id))); } + for id in 0..sync_committee_subnet_count { + add(SyncCommitteeMessage(SyncSubnetId::new(id))); + } } WhitelistSubscriptionFilter(possible_hashes) } @@ -792,9 +798,9 @@ impl NetworkBehaviourEventProcess for Behaviour< } => { // Note: We are keeping track here of the peer that sent us the message, not the // peer that originally published the message. - match PubsubMessage::decode(&gs_msg.topic, &gs_msg.data) { + match PubsubMessage::decode(&gs_msg.topic, &gs_msg.data, &self.fork_context) { Err(e) => { - debug!(self.log, "Could not decode gossipsub message"; "error" => e); + debug!(self.log, "Could not decode gossipsub message"; "topic" => ?gs_msg.topic,"error" => e); //reject the message if let Err(e) = self.gossipsub.report_message_validation_result( &id, @@ -816,12 +822,12 @@ impl NetworkBehaviourEventProcess for Behaviour< } } GossipsubEvent::Subscribed { peer_id, topic } => { - if let Some(subnet_id) = subnet_id_from_topic_hash(&topic) { + if let Some(subnet_id) = subnet_from_topic_hash(&topic) { self.peer_manager.add_subscription(&peer_id, subnet_id); } } GossipsubEvent::Unsubscribed { peer_id, topic } => { - if let Some(subnet_id) = subnet_id_from_topic_hash(&topic) { + if let Some(subnet_id) = subnet_from_topic_hash(&topic) { self.peer_manager.remove_subscription(&peer_id, subnet_id); } } @@ -1089,6 +1095,10 @@ impl Behaviour { // Peer manager has requested a discovery query for more peers. self.discovery.discover_peers(); } + PeerManagerEvent::DiscoverSubnetPeers(subnets_to_discover) => { + // Peer manager has requested a subnet discovery query for more peers. + self.discover_subnet_peers(subnets_to_discover); + } PeerManagerEvent::Ping(peer_id) => { // send a ping request to this peer self.ping(RequestId::Behaviour, peer_id); diff --git a/beacon_node/eth2_libp2p/src/config.rs b/beacon_node/eth2_libp2p/src/config.rs index 5add5fdf9aa..3face7b9858 100644 --- a/beacon_node/eth2_libp2p/src/config.rs +++ b/beacon_node/eth2_libp2p/src/config.rs @@ -12,7 +12,9 @@ use libp2p::Multiaddr; use serde_derive::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use std::path::PathBuf; +use std::sync::Arc; use std::time::Duration; +use types::{ForkContext, ForkName}; /// The maximum transmit size of gossip messages in bytes. pub const GOSSIP_MAX_SIZE: usize = 1_048_576; @@ -109,47 +111,9 @@ impl Default for Config { .join(DEFAULT_BEACON_NODE_DIR) .join(DEFAULT_NETWORK_DIR); - // The function used to generate a gossipsub message id - // We use the first 8 bytes of SHA256(data) for content addressing - let fast_gossip_message_id = |message: &RawGossipsubMessage| { - FastMessageId::from(&Sha256::digest(&message.data)[..8]) - }; - - fn prefix(prefix: [u8; 4], data: &[u8]) -> Vec { - let mut vec = Vec::with_capacity(prefix.len() + data.len()); - vec.extend_from_slice(&prefix); - vec.extend_from_slice(data); - vec - } - - let gossip_message_id = |message: &GossipsubMessage| { - MessageId::from( - &Sha256::digest(prefix(MESSAGE_DOMAIN_VALID_SNAPPY, &message.data).as_slice()) - [..20], - ) - }; - - // gossipsub configuration - // Note: The topics by default are sent as plain strings. Hashes are an optional - // parameter. + // Note: Using the default config here. Use `gossipsub_config` function for getting + // Lighthouse specific configuration for gossipsub. let gs_config = GossipsubConfigBuilder::default() - .max_transmit_size(GOSSIP_MAX_SIZE) - .heartbeat_interval(Duration::from_millis(700)) - .mesh_n(8) - .mesh_n_low(MESH_N_LOW) - .mesh_n_high(12) - .gossip_lazy(6) - .fanout_ttl(Duration::from_secs(60)) - .history_length(12) - .max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large - .history_gossip(3) - .validate_messages() // require validation before propagation - .validation_mode(ValidationMode::Anonymous) - // prevent duplicates for 550 heartbeats(700millis * 550) = 385 secs - .duplicate_cache_time(Duration::from_secs(385)) - .message_id_fn(gossip_message_id) - .fast_message_id_fn(fast_gossip_message_id) - .allow_self_origin(true) .build() .expect("valid gossipsub configuration"); @@ -209,3 +173,65 @@ impl Default for Config { } } } + +/// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork. +pub fn gossipsub_config(fork_context: Arc) -> GossipsubConfig { + // The function used to generate a gossipsub message id + // We use the first 8 bytes of SHA256(data) for content addressing + let fast_gossip_message_id = + |message: &RawGossipsubMessage| FastMessageId::from(&Sha256::digest(&message.data)[..8]); + fn prefix( + prefix: [u8; 4], + message: &GossipsubMessage, + fork_context: Arc, + ) -> Vec { + let topic_bytes = message.topic.as_str().as_bytes(); + match fork_context.current_fork() { + ForkName::Altair => { + let topic_len_bytes = topic_bytes.len().to_le_bytes(); + let mut vec = Vec::with_capacity( + prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), + ); + vec.extend_from_slice(&prefix); + vec.extend_from_slice(&topic_len_bytes); + vec.extend_from_slice(topic_bytes); + vec.extend_from_slice(&message.data); + vec + } + ForkName::Base => { + let mut vec = Vec::with_capacity(prefix.len() + message.data.len()); + vec.extend_from_slice(&prefix); + vec.extend_from_slice(&message.data); + vec + } + } + } + + let gossip_message_id = move |message: &GossipsubMessage| { + MessageId::from( + &Sha256::digest( + prefix(MESSAGE_DOMAIN_VALID_SNAPPY, message, fork_context.clone()).as_slice(), + )[..20], + ) + }; + GossipsubConfigBuilder::default() + .max_transmit_size(GOSSIP_MAX_SIZE) + .heartbeat_interval(Duration::from_millis(700)) + .mesh_n(8) + .mesh_n_low(MESH_N_LOW) + .mesh_n_high(12) + .gossip_lazy(6) + .fanout_ttl(Duration::from_secs(60)) + .history_length(12) + .max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large + .history_gossip(3) + .validate_messages() // require validation before propagation + .validation_mode(ValidationMode::Anonymous) + // prevent duplicates for 550 heartbeats(700millis * 550) = 385 secs + .duplicate_cache_time(Duration::from_secs(385)) + .message_id_fn(gossip_message_id) + .fast_message_id_fn(fast_gossip_message_id) + .allow_self_origin(true) + .build() + .expect("valid gossipsub configuration") +} diff --git a/beacon_node/eth2_libp2p/src/discovery/enr.rs b/beacon_node/eth2_libp2p/src/discovery/enr.rs index a8f05863626..3f2ae759b79 100644 --- a/beacon_node/eth2_libp2p/src/discovery/enr.rs +++ b/beacon_node/eth2_libp2p/src/discovery/enr.rs @@ -4,7 +4,7 @@ pub use discv5::enr::{self, CombinedKey, EnrBuilder}; use super::enr_ext::CombinedKeyExt; use super::ENR_FILENAME; -use crate::types::{Enr, EnrBitfield}; +use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use crate::NetworkConfig; use discv5::enr::EnrKey; use libp2p::core::identity::Keypair; @@ -19,25 +19,47 @@ use types::{EnrForkId, EthSpec}; /// The ENR field specifying the fork id. pub const ETH2_ENR_KEY: &str = "eth2"; -/// The ENR field specifying the subnet bitfield. -pub const BITFIELD_ENR_KEY: &str = "attnets"; +/// The ENR field specifying the attestation subnet bitfield. +pub const ATTESTATION_BITFIELD_ENR_KEY: &str = "attnets"; +/// The ENR field specifying the sync committee subnet bitfield. +pub const SYNC_COMMITTEE_BITFIELD_ENR_KEY: &str = "syncnets"; /// Extension trait for ENR's within Eth2. pub trait Eth2Enr { - /// The subnet bitfield associated with the ENR. - fn bitfield(&self) -> Result, &'static str>; + /// The attestation subnet bitfield associated with the ENR. + fn attestation_bitfield( + &self, + ) -> Result, &'static str>; + + /// The sync committee subnet bitfield associated with the ENR. + fn sync_committee_bitfield( + &self, + ) -> Result, &'static str>; fn eth2(&self) -> Result; } impl Eth2Enr for Enr { - fn bitfield(&self) -> Result, &'static str> { + fn attestation_bitfield( + &self, + ) -> Result, &'static str> { let bitfield_bytes = self - .get(BITFIELD_ENR_KEY) - .ok_or("ENR bitfield non-existent")?; + .get(ATTESTATION_BITFIELD_ENR_KEY) + .ok_or("ENR attestation bitfield non-existent")?; BitVector::::from_ssz_bytes(bitfield_bytes) - .map_err(|_| "Could not decode the ENR SSZ bitfield") + .map_err(|_| "Could not decode the ENR attnets bitfield") + } + + fn sync_committee_bitfield( + &self, + ) -> Result, &'static str> { + let bitfield_bytes = self + .get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) + .ok_or("ENR sync committee bitfield non-existent")?; + + BitVector::::from_ssz_bytes(bitfield_bytes) + .map_err(|_| "Could not decode the ENR syncnets bitfield") } fn eth2(&self) -> Result { @@ -151,7 +173,12 @@ pub fn build_enr( // set the "attnets" field on our ENR let bitfield = BitVector::::new(); - builder.add_value(BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); + builder.add_value(ATTESTATION_BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); + + // set the "syncnets" field on our ENR + let bitfield = BitVector::::new(); + + builder.add_value(SYNC_COMMITTEE_BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); builder .build(enr_key) @@ -169,9 +196,10 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { && local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY) // take preference over disk udp port if one is not specified && (local_enr.udp().is_none() || local_enr.udp() == disk_enr.udp()) - // we need the BITFIELD_ENR_KEY key to match, otherwise we use a new ENR. This will likely only - // be true for non-validating nodes - && local_enr.get(BITFIELD_ENR_KEY) == disk_enr.get(BITFIELD_ENR_KEY) + // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match, + // otherwise we use a new ENR. This will likely only be true for non-validating nodes + && local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY) + && local_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) } /// Loads enr from the given directory diff --git a/beacon_node/eth2_libp2p/src/discovery/mod.rs b/beacon_node/eth2_libp2p/src/discovery/mod.rs index a5159eff2f7..3866e4d4798 100644 --- a/beacon_node/eth2_libp2p/src/discovery/mod.rs +++ b/beacon_node/eth2_libp2p/src/discovery/mod.rs @@ -8,31 +8,28 @@ pub mod enr_ext; // Allow external use of the lighthouse ENR builder use crate::{config, metrics}; -use crate::{error, Enr, NetworkConfig, NetworkGlobals, SubnetDiscovery}; +use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; use discv5::{enr::NodeId, Discv5, Discv5Event}; pub use enr::{ build_enr, create_enr_builder_from_config, load_enr_from_disk, use_or_load_enr, CombinedKey, Eth2Enr, }; -use enr::{BITFIELD_ENR_KEY, ETH2_ENR_KEY}; pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt}; +pub use libp2p::core::identity::{Keypair, PublicKey}; + +use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; use futures::prelude::*; use futures::stream::FuturesUnordered; pub use libp2p::{ - core::{ - connection::ConnectionId, - identity::{Keypair, PublicKey}, - ConnectedPoint, Multiaddr, PeerId, - }, + core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}, swarm::{ protocols_handler::ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction as NBAction, NotifyHandler, PollParameters, SubstreamProtocol, }, }; use lru::LruCache; -use slog::{crit, debug, error, info, warn}; -use ssz::{Decode, Encode}; -use ssz_types::BitVector; +use slog::{crit, debug, error, info, trace, warn}; +use ssz::Encode; use std::{ collections::{HashMap, VecDeque}, net::{IpAddr, SocketAddr}, @@ -43,7 +40,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::mpsc; -use types::{EnrForkId, EthSpec, SubnetId}; +use types::{EnrForkId, EthSpec}; mod subnet_predicate; pub use subnet_predicate::subnet_predicate; @@ -77,13 +74,26 @@ pub enum DiscoveryEvent { SocketUpdated(SocketAddr), } -#[derive(Debug, Clone, PartialEq)] +#[derive(Clone, PartialEq)] struct SubnetQuery { - subnet_id: SubnetId, + subnet: Subnet, min_ttl: Option, retries: usize, } +impl std::fmt::Debug for SubnetQuery { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let min_ttl_secs = self + .min_ttl + .map(|ttl| ttl.saturating_duration_since(Instant::now()).as_secs()); + f.debug_struct("SubnetQuery") + .field("subnet", &self.subnet) + .field("min_ttl_secs", &min_ttl_secs) + .field("retries", &self.retries) + .finish() + } +} + #[derive(Debug, Clone, PartialEq)] enum QueryType { /// We are searching for subnet peers. @@ -335,13 +345,13 @@ impl Discovery { if !self.started { return; } - debug!( + trace!( self.log, "Making discovery query for subnets"; - "subnets" => ?subnets_to_discover.iter().map(|s| s.subnet_id).collect::>() + "subnets" => ?subnets_to_discover.iter().map(|s| s.subnet).collect::>() ); for subnet in subnets_to_discover { - self.add_subnet_query(subnet.subnet_id, subnet.min_ttl, 0); + self.add_subnet_query(subnet.subnet, subnet.min_ttl, 0); } } @@ -426,41 +436,83 @@ impl Discovery { Ok(()) } - /// Adds/Removes a subnet from the ENR Bitfield - pub fn update_enr_bitfield(&mut self, subnet_id: SubnetId, value: bool) -> Result<(), String> { - let id = *subnet_id as usize; - + /// Adds/Removes a subnet from the ENR attnets/syncnets Bitfield + pub fn update_enr_bitfield(&mut self, subnet: Subnet, value: bool) -> Result<(), String> { let local_enr = self.discv5.local_enr(); - let mut current_bitfield = local_enr.bitfield::()?; - - if id >= current_bitfield.len() { - return Err(format!( - "Subnet id: {} is outside the ENR bitfield length: {}", - id, - current_bitfield.len() - )); - } - if current_bitfield - .get(id) - .map_err(|_| String::from("Subnet ID out of bounds"))? - == value - { - return Err(format!( - "Subnet id: {} already in the local ENR already has value: {}", - id, value - )); - } + match subnet { + Subnet::Attestation(id) => { + let id = *id as usize; + let mut current_bitfield = local_enr.attestation_bitfield::()?; + if id >= current_bitfield.len() { + return Err(format!( + "Subnet id: {} is outside the ENR bitfield length: {}", + id, + current_bitfield.len() + )); + } - // set the subnet bitfield in the ENR - current_bitfield - .set(id, value) - .map_err(|_| String::from("Subnet ID out of bounds, could not set subnet ID"))?; + if current_bitfield + .get(id) + .map_err(|_| String::from("Subnet ID out of bounds"))? + == value + { + return Err(format!( + "Subnet id: {} already in the local ENR already has value: {}", + id, value + )); + } - // insert the bitfield into the ENR record - self.discv5 - .enr_insert(BITFIELD_ENR_KEY, ¤t_bitfield.as_ssz_bytes()) - .map_err(|e| format!("{:?}", e))?; + // set the subnet bitfield in the ENR + current_bitfield.set(id, value).map_err(|_| { + String::from("Subnet ID out of bounds, could not set subnet ID") + })?; + + // insert the bitfield into the ENR record + self.discv5 + .enr_insert( + ATTESTATION_BITFIELD_ENR_KEY, + ¤t_bitfield.as_ssz_bytes(), + ) + .map_err(|e| format!("{:?}", e))?; + } + Subnet::SyncCommittee(id) => { + let id = *id as usize; + let mut current_bitfield = local_enr.sync_committee_bitfield::()?; + + if id >= current_bitfield.len() { + return Err(format!( + "Subnet id: {} is outside the ENR bitfield length: {}", + id, + current_bitfield.len() + )); + } + + if current_bitfield + .get(id) + .map_err(|_| String::from("Subnet ID out of bounds"))? + == value + { + return Err(format!( + "Subnet id: {} already in the local ENR already has value: {}", + id, value + )); + } + + // set the subnet bitfield in the ENR + current_bitfield.set(id, value).map_err(|_| { + String::from("Subnet ID out of bounds, could not set subnet ID") + })?; + + // insert the bitfield into the ENR record + self.discv5 + .enr_insert( + SYNC_COMMITTEE_BITFIELD_ENR_KEY, + ¤t_bitfield.as_ssz_bytes(), + ) + .map_err(|e| format!("{:?}", e))?; + } + } // replace the global version *self.network_globals.local_enr.write() = self.discv5.local_enr(); @@ -547,7 +599,7 @@ impl Discovery { /// Adds a subnet query if one doesn't exist. If a subnet query already exists, this /// updates the min_ttl field. - fn add_subnet_query(&mut self, subnet_id: SubnetId, min_ttl: Option, retries: usize) { + fn add_subnet_query(&mut self, subnet: Subnet, min_ttl: Option, retries: usize) { // remove the entry and complete the query if greater than the maximum search count if retries > MAX_DISCOVERY_RETRY { debug!( @@ -562,7 +614,7 @@ impl Discovery { let mut found = false; for query in self.queued_queries.iter_mut() { if let QueryType::Subnet(ref mut subnet_query) = query { - if subnet_query.subnet_id == subnet_id { + if subnet_query.subnet == subnet { if subnet_query.min_ttl < min_ttl { subnet_query.min_ttl = min_ttl; } @@ -577,12 +629,12 @@ impl Discovery { if !found { // Set up the query and add it to the queue let query = QueryType::Subnet(SubnetQuery { - subnet_id, + subnet, min_ttl, retries, }); // update the metrics and insert into the queue. - debug!(self.log, "Queuing subnet query"; "subnet" => *subnet_id, "retries" => retries); + trace!(self.log, "Queuing subnet query"; "subnet" => ?subnet, "retries" => retries); self.queued_queries.push_back(query); metrics::set_gauge(&metrics::DISCOVERY_QUEUE, self.queued_queries.len() as i64); } @@ -636,11 +688,6 @@ impl Discovery { // This query is for searching for peers of a particular subnet // Drain subnet_queries so we can re-use it as we continue to process the queue let grouped_queries: Vec = subnet_queries.drain(..).collect(); - debug!( - self.log, - "Starting grouped subnet query"; - "subnets" => ?grouped_queries.iter().map(|q| q.subnet_id).collect::>(), - ); self.start_subnet_query(grouped_queries); processed = true; } @@ -661,7 +708,7 @@ impl Discovery { /// Runs a discovery request for a given group of subnets. fn start_subnet_query(&mut self, subnet_queries: Vec) { - let mut filtered_subnet_ids: Vec = Vec::new(); + let mut filtered_subnets: Vec = Vec::new(); // find subnet queries that are still necessary let filtered_subnet_queries: Vec = subnet_queries @@ -672,7 +719,7 @@ impl Discovery { .network_globals .peers .read() - .good_peers_on_subnet(subnet_query.subnet_id) + .good_peers_on_subnet(subnet_query.subnet) .count(); if peers_on_subnet >= TARGET_SUBNET_PEERS { @@ -685,16 +732,13 @@ impl Discovery { } let target_peers = TARGET_SUBNET_PEERS - peers_on_subnet; - debug!(self.log, "Discovery query started for subnet"; - "subnet_id" => *subnet_query.subnet_id, + trace!(self.log, "Discovery query started for subnet"; + "subnet_query" => ?subnet_query, "connected_peers_on_subnet" => peers_on_subnet, - "target_subnet_peers" => TARGET_SUBNET_PEERS, "peers_to_find" => target_peers, - "attempt" => subnet_query.retries, - "min_ttl" => ?subnet_query.min_ttl, ); - filtered_subnet_ids.push(subnet_query.subnet_id); + filtered_subnets.push(subnet_query.subnet); true }) .collect(); @@ -702,8 +746,13 @@ impl Discovery { // Only start a discovery query if we have a subnet to look for. if !filtered_subnet_queries.is_empty() { // build the subnet predicate as a combination of the eth2_fork_predicate and the subnet predicate - let subnet_predicate = subnet_predicate::(filtered_subnet_ids, &self.log); + let subnet_predicate = subnet_predicate::(filtered_subnets, &self.log); + debug!( + self.log, + "Starting grouped subnet query"; + "subnets" => ?filtered_subnet_queries, + ); self.start_query( GroupedQueryType::Subnet(filtered_subnet_queries), TARGET_PEERS_FOR_GROUPED_QUERY, @@ -798,17 +847,13 @@ impl Discovery { } } GroupedQueryType::Subnet(queries) => { - let subnets_searched_for: Vec = - queries.iter().map(|query| query.subnet_id).collect(); + let subnets_searched_for: Vec = + queries.iter().map(|query| query.subnet).collect(); match query_result.1 { Ok(r) if r.is_empty() => { debug!(self.log, "Grouped subnet discovery query yielded no results."; "subnets_searched_for" => ?subnets_searched_for); queries.iter().for_each(|query| { - self.add_subnet_query( - query.subnet_id, - query.min_ttl, - query.retries + 1, - ); + self.add_subnet_query(query.subnet, query.min_ttl, query.retries + 1); }) } Ok(r) => { @@ -824,15 +869,11 @@ impl Discovery { // Map each subnet query's min_ttl to the set of ENR's returned for that subnet. queries.iter().for_each(|query| { // A subnet query has completed. Add back to the queue, incrementing retries. - self.add_subnet_query( - query.subnet_id, - query.min_ttl, - query.retries + 1, - ); + self.add_subnet_query(query.subnet, query.min_ttl, query.retries + 1); // Check the specific subnet against the enr let subnet_predicate = - subnet_predicate::(vec![query.subnet_id], &self.log); + subnet_predicate::(vec![query.subnet], &self.log); r.iter() .filter(|enr| subnet_predicate(enr)) @@ -1037,11 +1078,11 @@ impl NetworkBehaviour for Discovery { #[cfg(test)] mod tests { use super::*; - use crate::rpc::methods::MetaData; + use crate::rpc::methods::{MetaData, MetaDataV2}; use enr::EnrBuilder; use slog::{o, Drain}; use std::net::UdpSocket; - use types::MinimalEthSpec; + use types::{BitVector, MinimalEthSpec, SubnetId}; type E = MinimalEthSpec; @@ -1076,10 +1117,11 @@ mod tests { enr, 9000, 9000, - MetaData { + MetaData::V2(MetaDataV2 { seq_number: 0, attnets: Default::default(), - }, + syncnets: Default::default(), + }), vec![], &log, ); @@ -1093,12 +1135,12 @@ mod tests { let mut discovery = build_discovery().await; let now = Instant::now(); let mut subnet_query = SubnetQuery { - subnet_id: SubnetId::new(1), + subnet: Subnet::Attestation(SubnetId::new(1)), min_ttl: Some(now), retries: 0, }; discovery.add_subnet_query( - subnet_query.subnet_id, + subnet_query.subnet, subnet_query.min_ttl, subnet_query.retries, ); @@ -1109,7 +1151,7 @@ mod tests { // New query should replace old query subnet_query.min_ttl = Some(now + Duration::from_secs(1)); - discovery.add_subnet_query(subnet_query.subnet_id, subnet_query.min_ttl, 1); + discovery.add_subnet_query(subnet_query.subnet, subnet_query.min_ttl, 1); subnet_query.retries += 1; @@ -1122,7 +1164,7 @@ mod tests { // Retries > MAX_DISCOVERY_RETRY must return immediately without adding // anything. discovery.add_subnet_query( - subnet_query.subnet_id, + subnet_query.subnet, subnet_query.min_ttl, MAX_DISCOVERY_RETRY + 1, ); @@ -1140,7 +1182,7 @@ mod tests { let now = Instant::now(); let subnet_query = SubnetQuery { - subnet_id: SubnetId::new(1), + subnet: Subnet::Attestation(SubnetId::new(1)), min_ttl: Some(now + Duration::from_secs(10)), retries: 0, }; @@ -1174,7 +1216,7 @@ mod tests { bitfield.set(id, true).unwrap(); } - builder.add_value(BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); + builder.add_value(ATTESTATION_BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); builder.build(&enr_key).unwrap() } @@ -1187,12 +1229,12 @@ mod tests { let query = GroupedQueryType::Subnet(vec![ SubnetQuery { - subnet_id: SubnetId::new(1), + subnet: Subnet::Attestation(SubnetId::new(1)), min_ttl: instant1, retries: 0, }, SubnetQuery { - subnet_id: SubnetId::new(2), + subnet: Subnet::Attestation(SubnetId::new(2)), min_ttl: instant2, retries: 0, }, diff --git a/beacon_node/eth2_libp2p/src/discovery/subnet_predicate.rs b/beacon_node/eth2_libp2p/src/discovery/subnet_predicate.rs index 0b761eeddee..e324532f7ba 100644 --- a/beacon_node/eth2_libp2p/src/discovery/subnet_predicate.rs +++ b/beacon_node/eth2_libp2p/src/discovery/subnet_predicate.rs @@ -1,11 +1,12 @@ ///! The subnet predicate used for searching for a particular subnet. use super::*; +use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use slog::trace; use std::ops::Deref; /// Returns the predicate for a given subnet. pub fn subnet_predicate( - subnet_ids: Vec, + subnets: Vec, log: &slog::Logger, ) -> impl Fn(&Enr) -> bool + Send where @@ -14,39 +15,33 @@ where let log_clone = log.clone(); move |enr: &Enr| { - if let Some(bitfield_bytes) = enr.get(BITFIELD_ENR_KEY) { - let bitfield = match BitVector::::from_ssz_bytes( - bitfield_bytes, - ) { - Ok(v) => v, - Err(e) => { - warn!(log_clone, "Could not decode ENR bitfield for peer"; "peer_id" => format!("{}", enr.peer_id()), "error" => format!("{:?}", e)); - return false; - } + let attestation_bitfield: EnrAttestationBitfield = + match enr.attestation_bitfield::() { + Ok(b) => b, + Err(_e) => return false, }; - let matches: Vec<&SubnetId> = subnet_ids - .iter() - .filter(|id| bitfield.get(**id.deref() as usize).unwrap_or(false)) - .collect(); + // Pre-fork/fork-boundary enrs may not contain a syncnets field. + // Don't return early here + let sync_committee_bitfield: Result, _> = + enr.sync_committee_bitfield::(); - if matches.is_empty() { - trace!( - log_clone, - "Peer found but not on any of the desired subnets"; - "peer_id" => %enr.peer_id() - ); - return false; - } else { - trace!( - log_clone, - "Peer found on desired subnet(s)"; - "peer_id" => %enr.peer_id(), - "subnets" => ?matches.as_slice() - ); - return true; - } + let predicate = subnets.iter().any(|subnet| match subnet { + Subnet::Attestation(s) => attestation_bitfield + .get(*s.deref() as usize) + .unwrap_or(false), + Subnet::SyncCommittee(s) => sync_committee_bitfield + .as_ref() + .map_or(false, |b| b.get(*s.deref() as usize).unwrap_or(false)), + }); + + if !predicate { + trace!( + log_clone, + "Peer found but not on any of the desired subnets"; + "peer_id" => %enr.peer_id() + ); } - false + predicate } } diff --git a/beacon_node/eth2_libp2p/src/lib.rs b/beacon_node/eth2_libp2p/src/lib.rs index 3a582ac711d..c04c6161693 100644 --- a/beacon_node/eth2_libp2p/src/lib.rs +++ b/beacon_node/eth2_libp2p/src/lib.rs @@ -60,7 +60,10 @@ impl<'de> Deserialize<'de> for PeerIdSerialized { } } -pub use crate::types::{error, Enr, GossipTopic, NetworkGlobals, PubsubMessage, SubnetDiscovery}; +pub use crate::types::{ + error, Enr, EnrSyncCommitteeBitfield, GossipTopic, NetworkGlobals, PubsubMessage, Subnet, + SubnetDiscovery, +}; pub use behaviour::{BehaviourEvent, Gossipsub, PeerRequestId, Request, Response}; pub use config::Config as NetworkConfig; pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; diff --git a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs index 26234a93fde..34ba564d612 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs @@ -1,10 +1,12 @@ //! Implementation of Lighthouse's peer management system. pub use self::peerdb::*; +use crate::discovery::TARGET_SUBNET_PEERS; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; use crate::types::SyncState; use crate::{error, metrics, Gossipsub}; use crate::{NetworkConfig, NetworkGlobals, PeerId}; +use crate::{Subnet, SubnetDiscovery}; use discv5::Enr; use futures::prelude::*; use futures::Stream; @@ -19,7 +21,7 @@ use std::{ task::{Context, Poll}, time::{Duration, Instant}, }; -use types::{EthSpec, SubnetId}; +use types::{EthSpec, SyncSubnetId}; pub use libp2p::core::{identity::Keypair, Multiaddr}; @@ -34,7 +36,7 @@ pub use peer_info::{ConnectionDirection, PeerConnectionStatus, PeerConnectionSta pub use peer_sync_status::{PeerSyncStatus, SyncInfo}; use score::{PeerAction, ReportSource, ScoreState}; use std::cmp::Ordering; -use std::collections::HashMap; +use std::collections::{hash_map::Entry, HashMap}; use std::net::IpAddr; /// The time in seconds between re-status's peers. @@ -78,6 +80,11 @@ pub struct PeerManager { target_peers: usize, /// The maximum number of peers we allow (exceptions for subnet peers) max_peers: usize, + /// A collection of sync committee subnets that we need to stay subscribed to. + /// Sync committee subnets are longer term (256 epochs). Hence, we need to re-run + /// discovery queries for subnet peers if we disconnect from existing sync + /// committee subnet peers. + sync_committee_subnets: HashMap, /// The heartbeat interval to perform routine maintenance. heartbeat: tokio::time::Interval, /// Keeps track of whether the discovery service is enabled or not. @@ -108,6 +115,8 @@ pub enum PeerManagerEvent { UnBanned(PeerId, Vec), /// Request the behaviour to discover more peers. DiscoverPeers, + /// Request the behaviour to discover peers on subnets. + DiscoverSubnetPeers(Vec), } impl PeerManager { @@ -127,6 +136,7 @@ impl PeerManager { outbound_ping_peers: HashSetDelay::new(Duration::from_secs(PING_INTERVAL_OUTBOUND)), status_peers: HashSetDelay::new(Duration::from_secs(STATUS_INTERVAL)), target_peers: config.target_peers, + sync_committee_subnets: Default::default(), max_peers: (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as usize, heartbeat, discovery_enabled: !config.disable_discovery, @@ -264,16 +274,16 @@ impl PeerManager { } /// Adds a gossipsub subscription to a peer in the peerdb. - pub fn add_subscription(&self, peer_id: &PeerId, subnet_id: SubnetId) { + pub fn add_subscription(&self, peer_id: &PeerId, subnet: Subnet) { if let Some(info) = self.network_globals.peers.write().peer_info_mut(peer_id) { - info.subnets.insert(subnet_id); + info.subnets.insert(subnet); } } /// Removes a gossipsub subscription to a peer in the peerdb. - pub fn remove_subscription(&self, peer_id: &PeerId, subnet_id: SubnetId) { + pub fn remove_subscription(&self, peer_id: &PeerId, subnet: Subnet) { if let Some(info) = self.network_globals.peers.write().peer_info_mut(peer_id) { - info.subnets.remove(&subnet_id); + info.subnets.remove(&subnet); } } @@ -284,6 +294,21 @@ impl PeerManager { } } + /// Insert the sync subnet into list of long lived sync committee subnets that we need to + /// maintain adequate number of peers for. + pub fn add_sync_subnet(&mut self, subnet_id: SyncSubnetId, min_ttl: Instant) { + match self.sync_committee_subnets.entry(subnet_id) { + Entry::Vacant(_) => { + self.sync_committee_subnets.insert(subnet_id, min_ttl); + } + Entry::Occupied(old) => { + if *old.get() < min_ttl { + self.sync_committee_subnets.insert(subnet_id, min_ttl); + } + } + } + } + /* Notifications from the Swarm */ // A peer is being dialed. @@ -599,9 +624,9 @@ impl PeerManager { // if the sequence number is unknown send an update the meta data of the peer. if let Some(meta_data) = &peer_info.meta_data { - if meta_data.seq_number < seq { + if *meta_data.seq_number() < seq { debug!(self.log, "Requesting new metadata from peer"; - "peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number, "ping_seq_no" => seq); + "peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number(), "ping_seq_no" => seq); self.events.push(PeerManagerEvent::MetaData(*peer_id)); } } else { @@ -623,9 +648,9 @@ impl PeerManager { // if the sequence number is unknown send update the meta data of the peer. if let Some(meta_data) = &peer_info.meta_data { - if meta_data.seq_number < seq { + if *meta_data.seq_number() < seq { debug!(self.log, "Requesting new metadata from peer"; - "peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number, "pong_seq_no" => seq); + "peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number(), "pong_seq_no" => seq); self.events.push(PeerManagerEvent::MetaData(*peer_id)); } } else { @@ -643,19 +668,19 @@ impl PeerManager { pub fn meta_data_response(&mut self, peer_id: &PeerId, meta_data: MetaData) { if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { if let Some(known_meta_data) = &peer_info.meta_data { - if known_meta_data.seq_number < meta_data.seq_number { + if *known_meta_data.seq_number() < *meta_data.seq_number() { debug!(self.log, "Updating peer's metadata"; - "peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number); + "peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number(), "new_seq_no" => meta_data.seq_number()); } else { debug!(self.log, "Received old metadata"; - "peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number); + "peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number(), "new_seq_no" => meta_data.seq_number()); // Updating metadata even in this case to prevent storing - // incorrect `metadata.attnets` for a peer + // incorrect `attnets/syncnets` for a peer } } else { // we have no meta-data for this peer, update debug!(self.log, "Obtained peer's metadata"; - "peer_id" => %peer_id, "new_seq_no" => meta_data.seq_number); + "peer_id" => %peer_id, "new_seq_no" => meta_data.seq_number()); } peer_info.meta_data = Some(meta_data); } else { @@ -965,6 +990,46 @@ impl PeerManager { Ok(()) } + /// Run discovery query for additional sync committee peers if we fall below `TARGET_PEERS`. + fn maintain_sync_committee_peers(&mut self) { + // Remove expired entries + self.sync_committee_subnets + .retain(|_, v| *v > Instant::now()); + + let subnets_to_discover: Vec = self + .sync_committee_subnets + .iter() + .filter_map(|(k, v)| { + if self + .network_globals + .peers + .read() + .good_peers_on_subnet(Subnet::SyncCommittee(*k)) + .count() + < TARGET_SUBNET_PEERS + { + Some(SubnetDiscovery { + subnet: Subnet::SyncCommittee(*k), + min_ttl: Some(*v), + }) + } else { + None + } + }) + .collect(); + + // request the subnet query from discovery + if !subnets_to_discover.is_empty() { + debug!( + self.log, + "Making subnet queries for maintaining sync committee peers"; + "subnets" => ?subnets_to_discover.iter().map(|s| s.subnet).collect::>() + ); + self.events + .push(PeerManagerEvent::DiscoverSubnetPeers(subnets_to_discover)); + } + } + /// The Peer manager's heartbeat maintains the peer count and maintains peer reputations. /// /// It will request discovery queries if the peer count has not reached the desired number of @@ -989,6 +1054,9 @@ impl PeerManager { // Updates peer's scores. self.update_peer_scores(); + // Maintain minimum count for sync committee peers. + self.maintain_sync_committee_peers(); + // Keep a list of peers we are disconnecting let mut disconnecting_peers = Vec::new(); @@ -1115,7 +1183,7 @@ mod tests { use super::*; use crate::discovery::enr::build_enr; use crate::discovery::enr_ext::CombinedKeyExt; - use crate::rpc::methods::MetaData; + use crate::rpc::methods::{MetaData, MetaDataV2}; use crate::Enr; use discv5::enr::CombinedKey; use slog::{o, Drain}; @@ -1156,10 +1224,11 @@ mod tests { enr, 9000, 9000, - MetaData { + MetaData::V2(MetaDataV2 { seq_number: 0, attnets: Default::default(), - }, + syncnets: Default::default(), + }), vec![], &log, ); diff --git a/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs b/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs index c9eeae94726..717782901de 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs @@ -1,8 +1,8 @@ use super::client::Client; use super::score::{PeerAction, Score, ScoreState}; use super::PeerSyncStatus; -use crate::rpc::MetaData; use crate::Multiaddr; +use crate::{rpc::MetaData, types::Subnet}; use discv5::Enr; use serde::{ ser::{SerializeStruct, Serializer}, @@ -12,7 +12,7 @@ use std::collections::HashSet; use std::net::{IpAddr, SocketAddr}; use std::time::Instant; use strum::AsRefStr; -use types::{EthSpec, SubnetId}; +use types::EthSpec; use PeerConnectionStatus::*; /// Information about a given connected peer. @@ -40,7 +40,7 @@ pub struct PeerInfo { /// connection. pub meta_data: Option>, /// Subnets the peer is connected to. - pub subnets: HashSet, + pub subnets: HashSet, /// The time we would like to retain this peer. After this time, the peer is no longer /// necessary. #[serde(skip)] @@ -84,17 +84,26 @@ impl PeerInfo { } } - /// Returns if the peer is subscribed to a given `SubnetId` from the metadata attnets field. - pub fn on_subnet_metadata(&self, subnet_id: SubnetId) -> bool { + /// Returns if the peer is subscribed to a given `Subnet` from the metadata attnets/syncnets field. + pub fn on_subnet_metadata(&self, subnet: &Subnet) -> bool { if let Some(meta_data) = &self.meta_data { - return meta_data.attnets.get(*subnet_id as usize).unwrap_or(false); + match subnet { + Subnet::Attestation(id) => { + return meta_data.attnets().get(**id as usize).unwrap_or(false) + } + Subnet::SyncCommittee(id) => { + return meta_data + .syncnets() + .map_or(false, |s| s.get(**id as usize).unwrap_or(false)) + } + } } false } - /// Returns if the peer is subscribed to a given `SubnetId` from the gossipsub subscriptions. - pub fn on_subnet_gossipsub(&self, subnet_id: SubnetId) -> bool { - self.subnets.contains(&subnet_id) + /// Returns if the peer is subscribed to a given `Subnet` from the gossipsub subscriptions. + pub fn on_subnet_gossipsub(&self, subnet: &Subnet) -> bool { + self.subnets.contains(subnet) } /// Returns the seen IP addresses of the peer. diff --git a/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs b/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs index 438980b9ee0..691600dd447 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs @@ -1,16 +1,19 @@ use super::peer_info::{ConnectionDirection, PeerConnectionStatus, PeerInfo}; use super::peer_sync_status::PeerSyncStatus; use super::score::{Score, ScoreState}; -use crate::multiaddr::{Multiaddr, Protocol}; use crate::rpc::methods::MetaData; use crate::Enr; use crate::PeerId; +use crate::{ + multiaddr::{Multiaddr, Protocol}, + types::Subnet, +}; use rand::seq::SliceRandom; use slog::{crit, debug, error, trace, warn}; use std::collections::HashMap; use std::net::{IpAddr, SocketAddr}; use std::time::Instant; -use types::{EthSpec, SubnetId}; +use types::EthSpec; /// Max number of disconnected nodes to remember. const MAX_DC_PEERS: usize = 500; @@ -267,14 +270,14 @@ impl PeerDB { } /// Gives an iterator of all peers on a given subnet. - pub fn good_peers_on_subnet(&self, subnet_id: SubnetId) -> impl Iterator { + pub fn good_peers_on_subnet(&self, subnet: Subnet) -> impl Iterator { self.peers .iter() .filter(move |(_, info)| { // We check both the metadata and gossipsub data as we only want to count long-lived subscribed peers info.is_connected() - && info.on_subnet_metadata(subnet_id) - && info.on_subnet_gossipsub(subnet_id) + && info.on_subnet_metadata(&subnet) + && info.on_subnet_gossipsub(&subnet) && info.is_good_gossipsub_peer() }) .map(|(peer_id, _)| peer_id) @@ -382,11 +385,11 @@ impl PeerDB { /// Extends the ttl of all peers on the given subnet that have a shorter /// min_ttl than what's given. - pub fn extend_peers_on_subnet(&mut self, subnet_id: SubnetId, min_ttl: Instant) { + pub fn extend_peers_on_subnet(&mut self, subnet: &Subnet, min_ttl: Instant) { let log = &self.log; self.peers.iter_mut() .filter(move |(_, info)| { - info.is_connected() && info.on_subnet_metadata(subnet_id) && info.on_subnet_gossipsub(subnet_id) + info.is_connected() && info.on_subnet_metadata(subnet) && info.on_subnet_gossipsub(subnet) }) .for_each(|(peer_id,info)| { if info.min_ttl.is_none() || Some(min_ttl) > info.min_ttl { diff --git a/beacon_node/eth2_libp2p/src/rpc/codec/base.rs b/beacon_node/eth2_libp2p/src/rpc/codec/base.rs index ff158067aab..8b2df43ef92 100644 --- a/beacon_node/eth2_libp2p/src/rpc/codec/base.rs +++ b/beacon_node/eth2_libp2p/src/rpc/codec/base.rs @@ -181,16 +181,18 @@ where mod tests { use super::super::ssz_snappy::*; use super::*; - use crate::rpc::methods::StatusMessage; use crate::rpc::protocol::*; - use snap::write::FrameEncoder; - use ssz::Encode; - use std::io::Write; - use types::{Epoch, Hash256, Slot}; + + use std::sync::Arc; + use types::{ForkContext, Hash256}; use unsigned_varint::codec::Uvi; type Spec = types::MainnetEthSpec; + fn fork_context() -> ForkContext { + ForkContext::new::(types::Slot::new(0), Hash256::zero(), &Spec::default_spec()) + } + #[test] fn test_decode_status_message() { let message = hex::decode("0054ff060000734e615070590032000006e71e7b54989925efd6c9cbcb8ceb9b5f71216f5137282bf6a1e3b50f64e42d6c7fb347abe07eb0db8200000005029e2800").unwrap(); @@ -200,8 +202,9 @@ mod tests { let snappy_protocol_id = ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy); + let fork_context = Arc::new(fork_context()); let mut snappy_outbound_codec = - SSZSnappyOutboundCodec::::new(snappy_protocol_id, 1_048_576); + SSZSnappyOutboundCodec::::new(snappy_protocol_id, 1_048_576, fork_context); // remove response code let mut snappy_buf = buf.clone(); @@ -233,8 +236,10 @@ mod tests { let snappy_protocol_id = ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy); + + let fork_context = Arc::new(fork_context()); let mut snappy_outbound_codec = - SSZSnappyOutboundCodec::::new(snappy_protocol_id, 1_048_576); + SSZSnappyOutboundCodec::::new(snappy_protocol_id, 1_048_576, fork_context); let snappy_decoded_message = snappy_outbound_codec.decode(&mut dst).unwrap_err(); @@ -260,80 +265,34 @@ mod tests { // Response limits let limit = protocol_id.rpc_response_limits::(); let mut max = encode_len(limit.max + 1); - let mut codec = SSZSnappyOutboundCodec::::new(protocol_id.clone(), 1_048_576); + let fork_context = Arc::new(fork_context()); + let mut codec = SSZSnappyOutboundCodec::::new( + protocol_id.clone(), + 1_048_576, + fork_context.clone(), + ); assert_eq!(codec.decode(&mut max).unwrap_err(), RPCError::InvalidData); let mut min = encode_len(limit.min - 1); - let mut codec = SSZSnappyOutboundCodec::::new(protocol_id.clone(), 1_048_576); + let mut codec = SSZSnappyOutboundCodec::::new( + protocol_id.clone(), + 1_048_576, + fork_context.clone(), + ); assert_eq!(codec.decode(&mut min).unwrap_err(), RPCError::InvalidData); // Request limits let limit = protocol_id.rpc_request_limits(); let mut max = encode_len(limit.max + 1); - let mut codec = SSZSnappyOutboundCodec::::new(protocol_id.clone(), 1_048_576); + let mut codec = SSZSnappyOutboundCodec::::new( + protocol_id.clone(), + 1_048_576, + fork_context.clone(), + ); assert_eq!(codec.decode(&mut max).unwrap_err(), RPCError::InvalidData); let mut min = encode_len(limit.min - 1); - let mut codec = SSZSnappyOutboundCodec::::new(protocol_id, 1_048_576); + let mut codec = SSZSnappyOutboundCodec::::new(protocol_id, 1_048_576, fork_context); assert_eq!(codec.decode(&mut min).unwrap_err(), RPCError::InvalidData); } - - #[test] - fn test_decode_malicious_status_message() { - // 10 byte snappy stream identifier - let stream_identifier: &'static [u8] = b"\xFF\x06\x00\x00sNaPpY"; - - assert_eq!(stream_identifier.len(), 10); - - // byte 0(0xFE) is padding chunk type identifier for snappy messages - // byte 1,2,3 are chunk length (little endian) - let malicious_padding: &'static [u8] = b"\xFE\x00\x00\x00"; - - // Status message is 84 bytes uncompressed. `max_compressed_len` is 32 + 84 + 84/6 = 130. - let status_message_bytes = StatusMessage { - fork_digest: [0; 4], - finalized_root: Hash256::from_low_u64_be(0), - finalized_epoch: Epoch::new(1), - head_root: Hash256::from_low_u64_be(0), - head_slot: Slot::new(1), - } - .as_ssz_bytes(); - - assert_eq!(status_message_bytes.len(), 84); - assert_eq!(snap::raw::max_compress_len(status_message_bytes.len()), 130); - - let mut uvi_codec: Uvi = Uvi::default(); - let mut dst = BytesMut::with_capacity(1024); - - // Insert length-prefix - uvi_codec - .encode(status_message_bytes.len(), &mut dst) - .unwrap(); - - // Insert snappy stream identifier - dst.extend_from_slice(stream_identifier); - - // Insert malicious padding of 80 bytes. - for _ in 0..20 { - dst.extend_from_slice(malicious_padding); - } - - // Insert payload (42 bytes compressed) - let mut writer = FrameEncoder::new(Vec::new()); - writer.write_all(&status_message_bytes).unwrap(); - writer.flush().unwrap(); - assert_eq!(writer.get_ref().len(), 42); - dst.extend_from_slice(writer.get_ref()); - - // 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. - - let snappy_protocol_id = - ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy); - - let mut snappy_outbound_codec = - SSZSnappyOutboundCodec::::new(snappy_protocol_id, 1_048_576); - - let snappy_decoded_message = snappy_outbound_codec.decode(&mut dst).unwrap_err(); - assert_eq!(snappy_decoded_message, RPCError::InvalidData); - } } diff --git a/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs b/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs index b7deb959d47..915572fd12a 100644 --- a/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs @@ -2,7 +2,8 @@ use crate::rpc::{ codec::base::OutboundCodec, protocol::{Encoding, Protocol, ProtocolId, RPCError, Version, ERROR_TYPE_MAX, ERROR_TYPE_MIN}, }; -use crate::rpc::{methods::*, InboundRequest, OutboundRequest, RPCCodedResponse, RPCResponse}; +use crate::rpc::{InboundRequest, OutboundRequest, RPCCodedResponse, RPCResponse}; +use crate::{rpc::methods::*, EnrSyncCommitteeBitfield}; use libp2p::bytes::BytesMut; use snap::read::FrameDecoder; use snap::write::FrameEncoder; @@ -12,10 +13,16 @@ use std::io::Cursor; use std::io::ErrorKind; use std::io::{Read, Write}; use std::marker::PhantomData; +use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; -use types::{EthSpec, SignedBeaconBlock, SignedBeaconBlockBase}; +use types::{ + EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, +}; use unsigned_varint::codec::Uvi; +const CONTEXT_BYTES_LEN: usize = 4; + /* Inbound Codec */ pub struct SSZSnappyInboundCodec { @@ -24,11 +31,16 @@ pub struct SSZSnappyInboundCodec { len: Option, /// Maximum bytes that can be sent in one req/resp chunked responses. max_packet_size: usize, + fork_context: Arc, phantom: PhantomData, } impl SSZSnappyInboundCodec { - pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self { + pub fn new( + protocol: ProtocolId, + max_packet_size: usize, + fork_context: Arc, + ) -> Self { let uvi_codec = Uvi::default(); // this encoding only applies to ssz_snappy. debug_assert_eq!(protocol.encoding, Encoding::SSZSnappy); @@ -38,6 +50,7 @@ impl SSZSnappyInboundCodec { protocol, len: None, phantom: PhantomData, + fork_context, max_packet_size, } } @@ -52,13 +65,38 @@ impl Encoder> for SSZSnappyInboundCodec< item: RPCCodedResponse, dst: &mut BytesMut, ) -> Result<(), Self::Error> { - let bytes = match item { - RPCCodedResponse::Success(resp) => match resp { + let bytes = match &item { + RPCCodedResponse::Success(resp) => match &resp { RPCResponse::Status(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), RPCResponse::Pong(res) => res.data.as_ssz_bytes(), - RPCResponse::MetaData(res) => res.as_ssz_bytes(), + RPCResponse::MetaData(res) => + // Encode the correct version of the MetaData response based on the negotiated version. + { + match self.protocol.version { + Version::V1 => MetaData::::V1(MetaDataV1 { + seq_number: *res.seq_number(), + attnets: res.attnets().clone(), + }) + .as_ssz_bytes(), + Version::V2 => { + // `res` is of type MetaDataV2, return the ssz bytes + if res.syncnets().is_ok() { + res.as_ssz_bytes() + } else { + // `res` is of type MetaDataV1, create a MetaDataV2 by adding a default syncnets field + // Note: This code path is redundant as `res` would be always of type MetaDataV2 + MetaData::::V2(MetaDataV2 { + seq_number: *res.seq_number(), + attnets: res.attnets().clone(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }) + .as_ssz_bytes() + } + } + } + } }, RPCCodedResponse::Error(_, err) => err.as_ssz_bytes(), RPCCodedResponse::StreamTermination(_) => { @@ -71,6 +109,12 @@ impl Encoder> for SSZSnappyInboundCodec< "attempting to encode data > max_packet_size", )); } + + // Add context bytes if required + if let Some(ref context_bytes) = context_bytes(&self.protocol, &self.fork_context, &item) { + dst.extend_from_slice(context_bytes); + } + // Inserts the length prefix of the uncompressed bytes into dst // encoded as a unsigned varint self.inner @@ -93,18 +137,9 @@ impl Decoder for SSZSnappyInboundCodec { type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - let length = if let Some(length) = self.len { - length - } else { - // Decode the length of the uncompressed bytes from an unsigned varint - // Note: length-prefix of > 10 bytes(uint64) would be a decoding error - match self.inner.decode(src).map_err(RPCError::from)? { - Some(length) => { - self.len = Some(length); - length - } - None => return Ok(None), // need more bytes to decode length - } + let length = match handle_length(&mut self.inner, &mut self.len, src)? { + Some(len) => len, + None => return Ok(None), }; // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of @@ -128,46 +163,9 @@ impl Decoder for SSZSnappyInboundCodec { self.len = None; let _read_bytes = src.split_to(n as usize); - // We need not check that decoded_buffer.len() is within bounds here - // since we have already checked `length` above. - match self.protocol.message_name { - Protocol::Status => match self.protocol.version { - Version::V1 => Ok(Some(InboundRequest::Status( - StatusMessage::from_ssz_bytes(&decoded_buffer)?, - ))), - }, - Protocol::Goodbye => match self.protocol.version { - Version::V1 => Ok(Some(InboundRequest::Goodbye( - GoodbyeReason::from_ssz_bytes(&decoded_buffer)?, - ))), - }, - Protocol::BlocksByRange => match self.protocol.version { - Version::V1 => Ok(Some(InboundRequest::BlocksByRange( - BlocksByRangeRequest::from_ssz_bytes(&decoded_buffer)?, - ))), - }, - Protocol::BlocksByRoot => match self.protocol.version { - Version::V1 => { - Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { - block_roots: VariableList::from_ssz_bytes(&decoded_buffer)?, - }))) - } - }, - Protocol::Ping => match self.protocol.version { - Version::V1 => Ok(Some(InboundRequest::Ping(Ping { - data: u64::from_ssz_bytes(&decoded_buffer)?, - }))), - }, - // This case should be unreachable as `MetaData` requests are handled separately in the `InboundUpgrade` - Protocol::MetaData => match self.protocol.version { - Version::V1 => { - if !decoded_buffer.is_empty() { - Err(RPCError::InvalidData) - } else { - Ok(Some(InboundRequest::MetaData(PhantomData))) - } - } - }, + match self.protocol.version { + Version::V1 => handle_v1_request(self.protocol.message_name, &decoded_buffer), + Version::V2 => handle_v2_request(self.protocol.message_name, &decoded_buffer), } } Err(e) => handle_error(e, reader.get_ref().get_ref().position(), max_compressed_len), @@ -182,11 +180,18 @@ pub struct SSZSnappyOutboundCodec { protocol: ProtocolId, /// Maximum bytes that can be sent in one req/resp chunked responses. max_packet_size: usize, + /// The fork name corresponding to the received context bytes. + fork_name: Option, + fork_context: Arc, phantom: PhantomData, } impl SSZSnappyOutboundCodec { - pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self { + pub fn new( + protocol: ProtocolId, + max_packet_size: usize, + fork_context: Arc, + ) -> Self { let uvi_codec = Uvi::default(); // this encoding only applies to ssz_snappy. debug_assert_eq!(protocol.encoding, Encoding::SSZSnappy); @@ -196,6 +201,8 @@ impl SSZSnappyOutboundCodec { protocol, max_packet_size, len: None, + fork_name: None, + fork_context, phantom: PhantomData, } } @@ -251,18 +258,23 @@ impl Decoder for SSZSnappyOutboundCodec { type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - let length = if let Some(length) = self.len { - length - } else { - // Decode the length of the uncompressed bytes from an unsigned varint - // Note: length-prefix of > 10 bytes(uint64) would be a decoding error - match self.inner.decode(src).map_err(RPCError::from)? { - Some(length) => { - self.len = Some(length as usize); - length - } - None => return Ok(None), // need more bytes to decode length + // Read the context bytes if required + if self.protocol.has_context_bytes() && self.fork_name.is_none() { + if src.len() >= CONTEXT_BYTES_LEN { + let context_bytes = src.split_to(CONTEXT_BYTES_LEN); + let mut result = [0; CONTEXT_BYTES_LEN]; + result.copy_from_slice(context_bytes.as_ref()); + self.fork_name = Some(context_bytes_to_fork_name( + result, + self.fork_context.clone(), + )?); + } else { + return Ok(None); } + } + let length = match handle_length(&mut self.inner, &mut self.len, src)? { + Some(len) => len, + None => return Ok(None), }; // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of @@ -286,42 +298,13 @@ impl Decoder for SSZSnappyOutboundCodec { self.len = None; let _read_bytes = src.split_to(n as usize); - // We need not check that decoded_buffer.len() is within bounds here - // since we have already checked `length` above. - match self.protocol.message_name { - Protocol::Status => match self.protocol.version { - Version::V1 => Ok(Some(RPCResponse::Status( - StatusMessage::from_ssz_bytes(&decoded_buffer)?, - ))), - }, - // This case should be unreachable as `Goodbye` has no response. - Protocol::Goodbye => Err(RPCError::InvalidData), - Protocol::BlocksByRange => match self.protocol.version { - Version::V1 => Ok(Some(RPCResponse::BlocksByRange(Box::new( - // FIXME(altair): support Altair blocks - SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes( - &decoded_buffer, - )?), - )))), - }, - Protocol::BlocksByRoot => match self.protocol.version { - // FIXME(altair): support Altair blocks - Version::V1 => Ok(Some(RPCResponse::BlocksByRoot(Box::new( - SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes( - &decoded_buffer, - )?), - )))), - }, - Protocol::Ping => match self.protocol.version { - Version::V1 => Ok(Some(RPCResponse::Pong(Ping { - data: u64::from_ssz_bytes(&decoded_buffer)?, - }))), - }, - Protocol::MetaData => match self.protocol.version { - Version::V1 => Ok(Some(RPCResponse::MetaData(MetaData::from_ssz_bytes( - &decoded_buffer, - )?))), - }, + match self.protocol.version { + Version::V1 => handle_v1_response(self.protocol.message_name, &decoded_buffer), + Version::V2 => handle_v2_response( + self.protocol.message_name, + &decoded_buffer, + &mut self.fork_name, + ), } } Err(e) => handle_error(e, reader.get_ref().get_ref().position(), max_compressed_len), @@ -336,17 +319,9 @@ impl OutboundCodec> for SSZSnappyOutbound &mut self, src: &mut BytesMut, ) -> Result, RPCError> { - let length = if let Some(length) = self.len { - length - } else { - // Decode the length of the uncompressed bytes from an unsigned varint - match self.inner.decode(src).map_err(RPCError::from)? { - Some(length) => { - self.len = Some(length as usize); - length - } - None => return Ok(None), // need more bytes to decode length - } + let length = match handle_length(&mut self.inner, &mut self.len, src)? { + Some(len) => len, + None => return Ok(None), }; // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of @@ -398,3 +373,739 @@ fn handle_error( _ => Err(err).map_err(RPCError::from), } } + +/// Returns `Some(context_bytes)` for encoding RPC responses that require context bytes. +/// Returns `None` when context bytes are not required. +fn context_bytes( + protocol: &ProtocolId, + fork_context: &ForkContext, + resp: &RPCCodedResponse, +) -> Option<[u8; CONTEXT_BYTES_LEN]> { + // Add the context bytes if required + if protocol.has_context_bytes() { + if let RPCCodedResponse::Success(RPCResponse::BlocksByRange(res)) = resp { + if let SignedBeaconBlock::Altair { .. } = **res { + // Altair context being `None` implies that "altair never happened". + // This code should be unreachable if altair is disabled since only Version::V1 would be valid in that case. + return fork_context.to_context_bytes(ForkName::Altair); + } else if let SignedBeaconBlock::Base { .. } = **res { + return Some(fork_context.genesis_context_bytes()); + } + } + + if let RPCCodedResponse::Success(RPCResponse::BlocksByRoot(res)) = resp { + if let SignedBeaconBlock::Altair { .. } = **res { + // Altair context being `None` implies that "altair never happened". + // This code should be unreachable if altair is disabled since only Version::V1 would be valid in that case. + return fork_context.to_context_bytes(ForkName::Altair); + } else if let SignedBeaconBlock::Base { .. } = **res { + return Some(fork_context.genesis_context_bytes()); + } + } + } + None +} + +/// Decodes the length-prefix from the bytes as an unsigned protobuf varint. +/// +/// Returns `Ok(Some(length))` by decoding the bytes if required. +/// Returns `Ok(None)` if more bytes are needed to decode the length-prefix. +/// Returns an `RPCError` for a decoding error. +fn handle_length( + uvi_codec: &mut Uvi, + len: &mut Option, + bytes: &mut BytesMut, +) -> Result, RPCError> { + if let Some(length) = len { + Ok(Some(*length)) + } else { + // Decode the length of the uncompressed bytes from an unsigned varint + // Note: length-prefix of > 10 bytes(uint64) would be a decoding error + match uvi_codec.decode(bytes).map_err(RPCError::from)? { + Some(length) => { + *len = Some(length as usize); + Ok(Some(length)) + } + None => Ok(None), // need more bytes to decode length + } + } +} + +/// Decodes a `Version::V1` `InboundRequest` from the byte stream. +/// `decoded_buffer` should be an ssz-encoded bytestream with +// length = length-prefix received in the beginning of the stream. +fn handle_v1_request( + protocol: Protocol, + decoded_buffer: &[u8], +) -> Result>, RPCError> { + match protocol { + Protocol::Status => Ok(Some(InboundRequest::Status(StatusMessage::from_ssz_bytes( + decoded_buffer, + )?))), + Protocol::Goodbye => Ok(Some(InboundRequest::Goodbye( + GoodbyeReason::from_ssz_bytes(decoded_buffer)?, + ))), + Protocol::BlocksByRange => Ok(Some(InboundRequest::BlocksByRange( + BlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), + Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { + block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, + }))), + Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping { + data: u64::from_ssz_bytes(decoded_buffer)?, + }))), + + // MetaData requests return early from InboundUpgrade and do not reach the decoder. + // Handle this case just for completeness. + Protocol::MetaData => { + if !decoded_buffer.is_empty() { + Err(RPCError::InvalidData) + } else { + Ok(Some(InboundRequest::MetaData(PhantomData))) + } + } + } +} + +/// Decodes a `Version::V2` `InboundRequest` from the byte stream. +/// `decoded_buffer` should be an ssz-encoded bytestream with +// length = length-prefix received in the beginning of the stream. +fn handle_v2_request( + protocol: Protocol, + decoded_buffer: &[u8], +) -> Result>, RPCError> { + match protocol { + Protocol::BlocksByRange => Ok(Some(InboundRequest::BlocksByRange( + BlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), + Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { + block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, + }))), + // MetaData requests return early from InboundUpgrade and do not reach the decoder. + // Handle this case just for completeness. + Protocol::MetaData => { + if !decoded_buffer.is_empty() { + Err(RPCError::InvalidData) + } else { + Ok(Some(InboundRequest::MetaData(PhantomData))) + } + } + _ => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + format!("{} does not support version 2", protocol), + )), + } +} + +/// Decodes a `Version::V1` `RPCResponse` from the byte stream. +/// `decoded_buffer` should be an ssz-encoded bytestream with +// length = length-prefix received in the beginning of the stream. +fn handle_v1_response( + protocol: Protocol, + decoded_buffer: &[u8], +) -> Result>, RPCError> { + match protocol { + Protocol::Status => Ok(Some(RPCResponse::Status(StatusMessage::from_ssz_bytes( + decoded_buffer, + )?))), + // This case should be unreachable as `Goodbye` has no response. + Protocol::Goodbye => Err(RPCError::InvalidData), + Protocol::BlocksByRange => Ok(Some(RPCResponse::BlocksByRange(Box::new( + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), + )))), + Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), + )))), + Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping { + data: u64::from_ssz_bytes(decoded_buffer)?, + }))), + Protocol::MetaData => Ok(Some(RPCResponse::MetaData(MetaData::V1( + MetaDataV1::from_ssz_bytes(decoded_buffer)?, + )))), + } +} + +/// Decodes a `Version::V2` `RPCResponse` from the byte stream. +/// `decoded_buffer` should be an ssz-encoded bytestream with +// length = length-prefix received in the beginning of the stream. +/// +/// For BlocksByRange/BlocksByRoot reponses, decodes the appropriate response +/// according to the received `ForkName`. +fn handle_v2_response( + protocol: Protocol, + decoded_buffer: &[u8], + fork_name: &mut Option, +) -> Result>, RPCError> { + // MetaData does not contain context_bytes + if let Protocol::MetaData = protocol { + Ok(Some(RPCResponse::MetaData(MetaData::V2( + MetaDataV2::from_ssz_bytes(decoded_buffer)?, + )))) + } else { + let fork_name = fork_name.take().ok_or_else(|| { + RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + format!("No context bytes provided for {} response", protocol), + ) + })?; + match protocol { + Protocol::BlocksByRange => match fork_name { + ForkName::Altair => Ok(Some(RPCResponse::BlocksByRange(Box::new( + SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( + decoded_buffer, + )?), + )))), + + ForkName::Base => Ok(Some(RPCResponse::BlocksByRange(Box::new( + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), + )))), + }, + Protocol::BlocksByRoot => match fork_name { + ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( + decoded_buffer, + )?), + )))), + ForkName::Base => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), + )))), + }, + _ => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Invalid v2 request".to_string(), + )), + } + } +} + +/// Takes the context bytes and a fork_context and returns the corresponding fork_name. +fn context_bytes_to_fork_name( + context_bytes: [u8; CONTEXT_BYTES_LEN], + fork_context: Arc, +) -> Result { + fork_context + .from_context_bytes(context_bytes) + .cloned() + .ok_or_else(|| { + RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Context bytes does not correspond to a valid fork".to_string(), + ) + }) +} +#[cfg(test)] +mod tests { + + use super::*; + use crate::rpc::{protocol::*, MetaData}; + use crate::{ + rpc::{methods::StatusMessage, Ping, RPCResponseErrorCode}, + types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, + }; + use std::sync::Arc; + use types::{ + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, Epoch, ForkContext, Hash256, Signature, + SignedBeaconBlock, Slot, + }; + + use snap::write::FrameEncoder; + use ssz::Encode; + use std::io::Write; + + type Spec = types::MainnetEthSpec; + + fn fork_context() -> ForkContext { + ForkContext::new::(types::Slot::new(0), Hash256::zero(), &Spec::default_spec()) + } + + fn base_block() -> SignedBeaconBlock { + let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&Spec::default_spec())); + SignedBeaconBlock::from_block(full_block, Signature::empty()) + } + + fn altair_block() -> SignedBeaconBlock { + let full_block = + BeaconBlock::Altair(BeaconBlockAltair::::full(&Spec::default_spec())); + SignedBeaconBlock::from_block(full_block, Signature::empty()) + } + + fn status_message() -> StatusMessage { + StatusMessage { + fork_digest: [0; 4], + finalized_root: Hash256::from_low_u64_be(0), + finalized_epoch: Epoch::new(1), + head_root: Hash256::from_low_u64_be(0), + head_slot: Slot::new(1), + } + } + + fn ping_message() -> Ping { + Ping { data: 1 } + } + + fn metadata() -> MetaData { + MetaData::V1(MetaDataV1 { + seq_number: 1, + attnets: EnrAttestationBitfield::::default(), + }) + } + + fn metadata_v2() -> MetaData { + MetaData::V2(MetaDataV2 { + seq_number: 1, + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }) + } + + /// Encodes the given protocol response as bytes. + fn encode( + protocol: Protocol, + version: Version, + message: RPCCodedResponse, + ) -> Result { + let max_packet_size = 1_048_576; + let snappy_protocol_id = ProtocolId::new(protocol, version, Encoding::SSZSnappy); + let fork_context = Arc::new(fork_context()); + + let mut buf = BytesMut::new(); + let mut snappy_inbound_codec = + SSZSnappyInboundCodec::::new(snappy_protocol_id, max_packet_size, fork_context); + + snappy_inbound_codec.encode(message, &mut buf)?; + Ok(buf) + } + + /// Attempts to decode the given protocol bytes as an rpc response + fn decode( + protocol: Protocol, + version: Version, + message: &mut BytesMut, + ) -> Result>, RPCError> { + let max_packet_size = 1_048_576; + let snappy_protocol_id = ProtocolId::new(protocol, version, Encoding::SSZSnappy); + let fork_context = Arc::new(fork_context()); + let mut snappy_outbound_codec = + SSZSnappyOutboundCodec::::new(snappy_protocol_id, max_packet_size, fork_context); + // decode message just as snappy message + snappy_outbound_codec.decode(message) + } + + /// Encodes the provided protocol message as bytes and tries to decode the encoding bytes. + fn encode_then_decode( + protocol: Protocol, + version: Version, + message: RPCCodedResponse, + ) -> Result>, RPCError> { + let mut encoded = encode(protocol, version.clone(), message)?; + decode(protocol, version, &mut encoded) + } + + // Test RPCResponse encoding/decoding for V1 messages + #[test] + fn test_encode_then_decode_v1() { + assert_eq!( + encode_then_decode( + Protocol::Status, + Version::V1, + RPCCodedResponse::Success(RPCResponse::Status(status_message())) + ), + Ok(Some(RPCResponse::Status(status_message()))) + ); + + assert_eq!( + encode_then_decode( + Protocol::Ping, + Version::V1, + RPCCodedResponse::Success(RPCResponse::Pong(ping_message())) + ), + Ok(Some(RPCResponse::Pong(ping_message()))) + ); + + assert_eq!( + encode_then_decode( + Protocol::BlocksByRange, + Version::V1, + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(base_block()))) + ), + Ok(Some(RPCResponse::BlocksByRange(Box::new(base_block())))) + ); + + assert!( + matches!( + encode_then_decode( + Protocol::BlocksByRange, + Version::V1, + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))), + ) + .unwrap_err(), + RPCError::SSZDecodeError(_) + ), + "altair block cannot be decoded with blocks by range V1 version" + ); + + assert_eq!( + encode_then_decode( + Protocol::BlocksByRoot, + Version::V1, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(base_block()))) + ), + Ok(Some(RPCResponse::BlocksByRoot(Box::new(base_block())))) + ); + + assert!( + matches!( + encode_then_decode( + Protocol::BlocksByRoot, + Version::V1, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + ) + .unwrap_err(), + RPCError::SSZDecodeError(_) + ), + "altair block cannot be decoded with blocks by range V1 version" + ); + + assert_eq!( + encode_then_decode( + Protocol::MetaData, + Version::V1, + RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + ), + Ok(Some(RPCResponse::MetaData(metadata()))), + ); + + assert_eq!( + encode_then_decode( + Protocol::MetaData, + Version::V1, + RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + ), + Ok(Some(RPCResponse::MetaData(metadata()))), + ); + + // A MetaDataV2 still encodes as a MetaDataV1 since version is Version::V1 + assert_eq!( + encode_then_decode( + Protocol::MetaData, + Version::V1, + RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), + ), + Ok(Some(RPCResponse::MetaData(metadata()))), + ); + } + + // Test RPCResponse encoding/decoding for V1 messages + #[test] + fn test_encode_then_decode_v2() { + assert!( + matches!( + encode_then_decode( + Protocol::Status, + Version::V2, + RPCCodedResponse::Success(RPCResponse::Status(status_message())), + ) + .unwrap_err(), + RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + ), + "status does not have V2 message" + ); + + assert!( + matches!( + encode_then_decode( + Protocol::Ping, + Version::V2, + RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), + ) + .unwrap_err(), + RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + ), + "ping does not have V2 message" + ); + + assert_eq!( + encode_then_decode( + Protocol::BlocksByRange, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(base_block()))) + ), + Ok(Some(RPCResponse::BlocksByRange(Box::new(base_block())))) + ); + + assert_eq!( + encode_then_decode( + Protocol::BlocksByRange, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))) + ), + Ok(Some(RPCResponse::BlocksByRange(Box::new(altair_block())))) + ); + + assert_eq!( + encode_then_decode( + Protocol::BlocksByRoot, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(base_block()))) + ), + Ok(Some(RPCResponse::BlocksByRoot(Box::new(base_block())))) + ); + + assert_eq!( + encode_then_decode( + Protocol::BlocksByRoot, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))) + ), + Ok(Some(RPCResponse::BlocksByRoot(Box::new(altair_block())))) + ); + + // A MetaDataV1 still encodes as a MetaDataV2 since version is Version::V2 + assert_eq!( + encode_then_decode( + Protocol::MetaData, + Version::V2, + RPCCodedResponse::Success(RPCResponse::MetaData(metadata())) + ), + Ok(Some(RPCResponse::MetaData(metadata_v2()))) + ); + + assert_eq!( + encode_then_decode( + Protocol::MetaData, + Version::V2, + RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())) + ), + Ok(Some(RPCResponse::MetaData(metadata_v2()))) + ); + } + + // Test RPCResponse encoding/decoding for V2 messages + #[test] + fn test_context_bytes_v2() { + let fork_context = fork_context(); + + // Removing context bytes for v2 messages should error + let mut encoded_bytes = encode( + Protocol::BlocksByRange, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(base_block()))), + ) + .unwrap(); + + let _ = encoded_bytes.split_to(4); + + assert!(matches!( + decode(Protocol::BlocksByRange, Version::V2, &mut encoded_bytes).unwrap_err(), + RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + )); + + let mut encoded_bytes = encode( + Protocol::BlocksByRoot, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(base_block()))), + ) + .unwrap(); + + let _ = encoded_bytes.split_to(4); + + assert!(matches!( + decode(Protocol::BlocksByRange, Version::V2, &mut encoded_bytes).unwrap_err(), + RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + )); + + // Trying to decode a base block with altair context bytes should give ssz decoding error + let mut encoded_bytes = encode( + Protocol::BlocksByRange, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(base_block()))), + ) + .unwrap(); + + let mut wrong_fork_bytes = BytesMut::new(); + wrong_fork_bytes + .extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap()); + wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); + + assert!(matches!( + decode(Protocol::BlocksByRange, Version::V2, &mut wrong_fork_bytes).unwrap_err(), + RPCError::SSZDecodeError(_), + )); + + // Trying to decode an altair block with base context bytes should give ssz decoding error + let mut encoded_bytes = encode( + Protocol::BlocksByRoot, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + ) + .unwrap(); + + let mut wrong_fork_bytes = BytesMut::new(); + wrong_fork_bytes.extend_from_slice(&fork_context.to_context_bytes(ForkName::Base).unwrap()); + wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); + + assert!(matches!( + decode(Protocol::BlocksByRange, Version::V2, &mut wrong_fork_bytes).unwrap_err(), + RPCError::SSZDecodeError(_), + )); + + // Adding context bytes to Protocols that don't require it should return an error + let mut encoded_bytes = BytesMut::new(); + encoded_bytes.extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap()); + encoded_bytes.extend_from_slice( + &encode( + Protocol::MetaData, + Version::V2, + RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + ) + .unwrap(), + ); + + assert!(decode(Protocol::MetaData, Version::V2, &mut encoded_bytes).is_err()); + + // Sending context bytes which do not correspond to any fork should return an error + let mut encoded_bytes = encode( + Protocol::BlocksByRoot, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(base_block()))), + ) + .unwrap(); + + let mut wrong_fork_bytes = BytesMut::new(); + wrong_fork_bytes.extend_from_slice(&[42, 42, 42, 42]); + wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); + + assert!(matches!( + decode(Protocol::BlocksByRange, Version::V2, &mut wrong_fork_bytes).unwrap_err(), + RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), + )); + + // Sending bytes less than context bytes length should wait for more bytes by returning `Ok(None)` + let mut encoded_bytes = encode( + Protocol::BlocksByRoot, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(base_block()))), + ) + .unwrap(); + + let mut part = encoded_bytes.split_to(3); + + assert_eq!( + decode(Protocol::BlocksByRange, Version::V2, &mut part), + Ok(None) + ) + } + + /// Test a malicious snappy encoding for a V1 `Status` message where the attacker + /// sends a valid message filled with a stream of useless padding before the actual message. + #[test] + fn test_decode_malicious_v1_message() { + // 10 byte snappy stream identifier + let stream_identifier: &'static [u8] = b"\xFF\x06\x00\x00sNaPpY"; + + assert_eq!(stream_identifier.len(), 10); + + // byte 0(0xFE) is padding chunk type identifier for snappy messages + // byte 1,2,3 are chunk length (little endian) + let malicious_padding: &'static [u8] = b"\xFE\x00\x00\x00"; + + // Status message is 84 bytes uncompressed. `max_compressed_len` is 32 + 84 + 84/6 = 130. + let status_message_bytes = StatusMessage { + fork_digest: [0; 4], + finalized_root: Hash256::from_low_u64_be(0), + finalized_epoch: Epoch::new(1), + head_root: Hash256::from_low_u64_be(0), + head_slot: Slot::new(1), + } + .as_ssz_bytes(); + + assert_eq!(status_message_bytes.len(), 84); + assert_eq!(snap::raw::max_compress_len(status_message_bytes.len()), 130); + + let mut uvi_codec: Uvi = Uvi::default(); + let mut dst = BytesMut::with_capacity(1024); + + // Insert length-prefix + uvi_codec + .encode(status_message_bytes.len(), &mut dst) + .unwrap(); + + // Insert snappy stream identifier + dst.extend_from_slice(stream_identifier); + + // Insert malicious padding of 80 bytes. + for _ in 0..20 { + dst.extend_from_slice(malicious_padding); + } + + // Insert payload (42 bytes compressed) + let mut writer = FrameEncoder::new(Vec::new()); + writer.write_all(&status_message_bytes).unwrap(); + writer.flush().unwrap(); + assert_eq!(writer.get_ref().len(), 42); + dst.extend_from_slice(writer.get_ref()); + + // 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. + assert_eq!( + decode(Protocol::Status, Version::V1, &mut dst).unwrap_err(), + RPCError::InvalidData + ); + } + + /// Test a malicious snappy encoding for a V2 `BlocksByRange` message where the attacker + /// sends a valid message filled with a stream of useless padding before the actual message. + #[test] + fn test_decode_malicious_v2_message() { + let fork_context = Arc::new(fork_context()); + + // 10 byte snappy stream identifier + let stream_identifier: &'static [u8] = b"\xFF\x06\x00\x00sNaPpY"; + + assert_eq!(stream_identifier.len(), 10); + + // byte 0(0xFE) is padding chunk type identifier for snappy messages + // byte 1,2,3 are chunk length (little endian) + let malicious_padding: &'static [u8] = b"\xFE\x00\x00\x00"; + + // Full altair block is 157916 bytes uncompressed. `max_compressed_len` is 32 + 157916 + 157916/6 = 184267. + let block_message_bytes = altair_block().as_ssz_bytes(); + + assert_eq!(block_message_bytes.len(), 157916); + assert_eq!( + snap::raw::max_compress_len(block_message_bytes.len()), + 184267 + ); + + let mut uvi_codec: Uvi = Uvi::default(); + let mut dst = BytesMut::with_capacity(1024); + + // Insert context bytes + dst.extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap()); + + // Insert length-prefix + uvi_codec + .encode(block_message_bytes.len(), &mut dst) + .unwrap(); + + // Insert snappy stream identifier + dst.extend_from_slice(stream_identifier); + + // Insert malicious padding of 176156 bytes. + for _ in 0..44039 { + dst.extend_from_slice(malicious_padding); + } + + // Insert payload (8103 bytes compressed) + let mut writer = FrameEncoder::new(Vec::new()); + writer.write_all(&block_message_bytes).unwrap(); + writer.flush().unwrap(); + assert_eq!(writer.get_ref().len(), 8103); + dst.extend_from_slice(writer.get_ref()); + + // 10 (for stream identifier) + 176156 + 8103 = 184269 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. + assert_eq!( + decode(Protocol::BlocksByRange, Version::V2, &mut dst).unwrap_err(), + RPCError::InvalidData + ); + } +} diff --git a/beacon_node/eth2_libp2p/src/rpc/handler.rs b/beacon_node/eth2_libp2p/src/rpc/handler.rs index 554e6787f83..506093ee6cb 100644 --- a/beacon_node/eth2_libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2_libp2p/src/rpc/handler.rs @@ -4,6 +4,7 @@ use super::methods::{ GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, RequestId, ResponseTermination, }; +use super::outbound::OutboundRequestContainer; use super::protocol::{InboundRequest, Protocol, RPCError, RPCProtocol}; use super::{RPCReceived, RPCSend}; use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; @@ -23,12 +24,13 @@ use smallvec::SmallVec; use std::{ collections::hash_map::Entry, pin::Pin, + sync::Arc, task::{Context, Poll}, time::Duration, }; use tokio::time::{sleep_until, Instant as TInstant, Sleep}; use tokio_util::time::{delay_queue, DelayQueue}; -use types::EthSpec; +use types::{EthSpec, ForkContext}; /// The time (in seconds) before a substream that is awaiting a response from the user times out. pub const RESPONSE_TIMEOUT: u64 = 10; @@ -126,6 +128,9 @@ where /// This keeps track of the number of attempts. outbound_io_error_retries: u8, + /// Fork specific info. + fork_context: Arc, + /// Logger for handling RPC streams log: slog::Logger, } @@ -203,6 +208,7 @@ where { pub fn new( listen_protocol: SubstreamProtocol, ()>, + fork_context: Arc, log: &slog::Logger, ) -> Self { RPCHandler { @@ -219,6 +225,7 @@ where state: HandlerState::Active, max_dial_negotiated: 8, outbound_io_error_retries: 0, + fork_context, log: log.clone(), } } @@ -308,7 +315,7 @@ where type OutEvent = HandlerEvent; type Error = RPCError; type InboundProtocol = RPCProtocol; - type OutboundProtocol = OutboundRequest; + type OutboundProtocol = OutboundRequestContainer; type OutboundOpenInfo = (RequestId, OutboundRequest); // Keep track of the id and the request type InboundOpenInfo = (); @@ -874,7 +881,14 @@ where let (id, req) = self.dial_queue.remove(0); self.dial_queue.shrink_to_fit(); return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(req.clone(), ()).map_info(|()| (id, req)), + protocol: SubstreamProtocol::new( + OutboundRequestContainer { + req: req.clone(), + fork_context: self.fork_context.clone(), + }, + (), + ) + .map_info(|()| (id, req)), }); } diff --git a/beacon_node/eth2_libp2p/src/rpc/methods.rs b/beacon_node/eth2_libp2p/src/rpc/methods.rs index e24b6e980b2..b2be196474d 100644 --- a/beacon_node/eth2_libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2_libp2p/src/rpc/methods.rs @@ -1,6 +1,6 @@ //! Available RPC methods types and ids. -use crate::types::EnrBitfield; +use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use regex::bytes::Regex; use serde::Serialize; use ssz_derive::{Decode, Encode}; @@ -10,6 +10,7 @@ use ssz_types::{ }; use std::ops::Deref; use strum::AsStaticStr; +use superstruct::superstruct; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// Maximum number of blocks in a single request. @@ -93,13 +94,23 @@ pub struct Ping { } /// The METADATA response structure. -#[derive(Encode, Decode, Clone, Debug, PartialEq, Serialize)] +#[superstruct( + variants(V1, V2), + variant_attributes( + derive(Encode, Decode, Clone, Debug, PartialEq, Serialize), + serde(bound = "T: EthSpec", deny_unknown_fields), + ) +)] +#[derive(Clone, Debug, PartialEq, Serialize, Encode)] #[serde(bound = "T: EthSpec")] pub struct MetaData { /// A sequential counter indicating when data gets modified. pub seq_number: u64, - /// The persistent subnet bitfield. - pub attnets: EnrBitfield, + /// The persistent attestation subnet bitfield. + pub attnets: EnrAttestationBitfield, + /// The persistent sync committee bitfield. + #[superstruct(only(V2))] + pub syncnets: EnrSyncCommitteeBitfield, } /// The reason given for a `Goodbye` message. @@ -360,7 +371,7 @@ impl std::fmt::Display for RPCResponse { write!(f, "BlocksByRoot: Block slot: {}", block.slot()) } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), - RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number), + RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), } } } diff --git a/beacon_node/eth2_libp2p/src/rpc/mod.rs b/beacon_node/eth2_libp2p/src/rpc/mod.rs index 702e3e20dfe..96fa23506cd 100644 --- a/beacon_node/eth2_libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2_libp2p/src/rpc/mod.rs @@ -15,12 +15,13 @@ use libp2p::{Multiaddr, PeerId}; use rate_limiter::{RPCRateLimiter as RateLimiter, RPCRateLimiterBuilder, RateLimitedErr}; use slog::{crit, debug, o}; use std::marker::PhantomData; +use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; -use types::EthSpec; +use types::{EthSpec, ForkContext}; pub(crate) use handler::HandlerErr; -pub(crate) use methods::{MetaData, Ping, RPCCodedResponse, RPCResponse}; +pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse}; pub(crate) use protocol::{InboundRequest, RPCProtocol}; pub use handler::SubstreamId; @@ -101,12 +102,13 @@ pub struct RPC { limiter: RateLimiter, /// Queue of events to be processed. events: Vec, RPCMessage>>, + fork_context: Arc, /// Slog logger for RPC behaviour. log: slog::Logger, } impl RPC { - pub fn new(log: slog::Logger) -> Self { + pub fn new(fork_context: Arc, log: slog::Logger) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); let limiter = RPCRateLimiterBuilder::new() .n_every(Protocol::MetaData, 2, Duration::from_secs(5)) @@ -124,6 +126,7 @@ impl RPC { RPC { limiter, events: Vec::new(), + fork_context, log, } } @@ -182,10 +185,12 @@ where RPCHandler::new( SubstreamProtocol::new( RPCProtocol { + fork_context: self.fork_context.clone(), phantom: PhantomData, }, (), ), + self.fork_context.clone(), &self.log, ) } diff --git a/beacon_node/eth2_libp2p/src/rpc/outbound.rs b/beacon_node/eth2_libp2p/src/rpc/outbound.rs index b9dbd08b523..89112553419 100644 --- a/beacon_node/eth2_libp2p/src/rpc/outbound.rs +++ b/beacon_node/eth2_libp2p/src/rpc/outbound.rs @@ -14,16 +14,23 @@ use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; use futures::{FutureExt, SinkExt}; use libp2p::core::{OutboundUpgrade, UpgradeInfo}; +use std::sync::Arc; use tokio_util::{ codec::Framed, compat::{Compat, FuturesAsyncReadCompatExt}, }; -use types::EthSpec; +use types::{EthSpec, ForkContext}; /* Outbound request */ // Combines all the RPC requests into a single enum to implement `UpgradeInfo` and // `OutboundUpgrade` +#[derive(Debug, Clone)] +pub struct OutboundRequestContainer { + pub req: OutboundRequest, + pub fork_context: Arc, +} + #[derive(Debug, Clone, PartialEq)] pub enum OutboundRequest { Status(StatusMessage), @@ -34,13 +41,13 @@ pub enum OutboundRequest { MetaData(PhantomData), } -impl UpgradeInfo for OutboundRequest { +impl UpgradeInfo for OutboundRequestContainer { type Info = ProtocolId; type InfoIter = Vec; // add further protocols as we support more encodings/versions fn protocol_info(&self) -> Self::InfoIter { - self.supported_protocols() + self.req.supported_protocols() } } @@ -59,26 +66,23 @@ impl OutboundRequest { Version::V1, Encoding::SSZSnappy, )], - OutboundRequest::BlocksByRange(_) => vec![ProtocolId::new( - Protocol::BlocksByRange, - Version::V1, - Encoding::SSZSnappy, - )], - OutboundRequest::BlocksByRoot(_) => vec![ProtocolId::new( - Protocol::BlocksByRoot, - Version::V1, - Encoding::SSZSnappy, - )], + OutboundRequest::BlocksByRange(_) => vec![ + ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), + ], + OutboundRequest::BlocksByRoot(_) => vec![ + ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), + ], OutboundRequest::Ping(_) => vec![ProtocolId::new( Protocol::Ping, Version::V1, Encoding::SSZSnappy, )], - OutboundRequest::MetaData(_) => vec![ProtocolId::new( - Protocol::MetaData, - Version::V1, - Encoding::SSZSnappy, - )], + OutboundRequest::MetaData(_) => vec![ + ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), + ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), + ], } } @@ -130,7 +134,7 @@ impl OutboundRequest { pub type OutboundFramed = Framed, OutboundCodec>; -impl OutboundUpgrade for OutboundRequest +impl OutboundUpgrade for OutboundRequestContainer where TSpec: EthSpec + Send + 'static, TSocket: AsyncRead + AsyncWrite + Unpin + Send + 'static, @@ -147,6 +151,7 @@ where let ssz_snappy_codec = BaseOutboundCodec::new(SSZSnappyOutboundCodec::new( protocol, usize::max_value(), + self.fork_context.clone(), )); OutboundCodec::SSZSnappy(ssz_snappy_codec) } @@ -155,7 +160,7 @@ where let mut socket = Framed::new(socket, codec); async { - socket.send(self).await?; + socket.send(self.req).await?; socket.close().await?; Ok(socket) } diff --git a/beacon_node/eth2_libp2p/src/rpc/protocol.rs b/beacon_node/eth2_libp2p/src/rpc/protocol.rs index 031246ba16c..b85e48d3134 100644 --- a/beacon_node/eth2_libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2_libp2p/src/rpc/protocol.rs @@ -12,6 +12,7 @@ use ssz::Encode; use ssz_types::VariableList; use std::io; use std::marker::PhantomData; +use std::sync::Arc; use std::time::Duration; use strum::{AsStaticRef, AsStaticStr}; use tokio_io_timeout::TimeoutStream; @@ -19,19 +20,35 @@ use tokio_util::{ codec::Framed, compat::{Compat, FuturesAsyncReadCompatExt}, }; -use types::{BeaconBlock, EthSpec, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock}; +use types::{ + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, EthSpec, ForkContext, Hash256, MainnetEthSpec, + Signature, SignedBeaconBlock, +}; lazy_static! { // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is // same across different `EthSpec` implementations. - pub static ref SIGNED_BEACON_BLOCK_MIN: usize = SignedBeaconBlock::::from_block( - BeaconBlock::empty(&MainnetEthSpec::default_spec()), + pub static ref SIGNED_BEACON_BLOCK_BASE_MIN: usize = SignedBeaconBlock::::from_block( + BeaconBlock::Base(BeaconBlockBase::::empty(&MainnetEthSpec::default_spec())), + Signature::empty(), + ) + .as_ssz_bytes() + .len(); + pub static ref SIGNED_BEACON_BLOCK_BASE_MAX: usize = SignedBeaconBlock::::from_block( + BeaconBlock::Base(BeaconBlockBase::full(&MainnetEthSpec::default_spec())), + Signature::empty(), + ) + .as_ssz_bytes() + .len(); + + pub static ref SIGNED_BEACON_BLOCK_ALTAIR_MIN: usize = SignedBeaconBlock::::from_block( + BeaconBlock::Altair(BeaconBlockAltair::::empty(&MainnetEthSpec::default_spec())), Signature::empty(), ) .as_ssz_bytes() .len(); - pub static ref SIGNED_BEACON_BLOCK_MAX: usize = SignedBeaconBlock::::from_block( - BeaconBlock::full(&MainnetEthSpec::default_spec()), + pub static ref SIGNED_BEACON_BLOCK_ALTAIR_MAX: usize = SignedBeaconBlock::::from_block( + BeaconBlock::Altair(BeaconBlockAltair::full(&MainnetEthSpec::default_spec())), Signature::empty(), ) .as_ssz_bytes() @@ -95,6 +112,8 @@ pub enum Protocol { pub enum Version { /// Version 1 of RPC V1, + /// Version 2 of RPC + V2, } /// RPC Encondings supported. @@ -130,6 +149,7 @@ impl std::fmt::Display for Version { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let repr = match self { Version::V1 => "1", + Version::V2 => "2", }; f.write_str(repr) } @@ -137,6 +157,7 @@ impl std::fmt::Display for Version { #[derive(Debug, Clone)] pub struct RPCProtocol { + pub fork_context: Arc, pub phantom: PhantomData, } @@ -149,9 +170,13 @@ impl UpgradeInfo for RPCProtocol { vec![ ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy), + // V2 variants have higher preference then V1 + ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), ] } @@ -226,22 +251,49 @@ impl ProtocolId { ::ssz_fixed_len(), ), Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response - Protocol::BlocksByRange => { - RpcLimits::new(*SIGNED_BEACON_BLOCK_MIN, *SIGNED_BEACON_BLOCK_MAX) - } - Protocol::BlocksByRoot => { - RpcLimits::new(*SIGNED_BEACON_BLOCK_MIN, *SIGNED_BEACON_BLOCK_MAX) - } + Protocol::BlocksByRange => RpcLimits::new( + std::cmp::min( + *SIGNED_BEACON_BLOCK_ALTAIR_MIN, + *SIGNED_BEACON_BLOCK_BASE_MIN, + ), + std::cmp::max( + *SIGNED_BEACON_BLOCK_ALTAIR_MAX, + *SIGNED_BEACON_BLOCK_BASE_MAX, + ), + ), + Protocol::BlocksByRoot => RpcLimits::new( + std::cmp::min( + *SIGNED_BEACON_BLOCK_ALTAIR_MIN, + *SIGNED_BEACON_BLOCK_BASE_MIN, + ), + std::cmp::max( + *SIGNED_BEACON_BLOCK_ALTAIR_MAX, + *SIGNED_BEACON_BLOCK_BASE_MAX, + ), + ), + Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), ), Protocol::MetaData => RpcLimits::new( - as Encode>::ssz_fixed_len(), - as Encode>::ssz_fixed_len(), + as Encode>::ssz_fixed_len(), + as Encode>::ssz_fixed_len(), ), } } + + /// Returns `true` if the given `ProtocolId` should expect `context_bytes` in the + /// beginning of the stream, else returns `false`. + pub fn has_context_bytes(&self) -> bool { + if self.version == Version::V2 { + match self.message_name { + Protocol::BlocksByRange | Protocol::BlocksByRoot => return true, + _ => return false, + } + } + false + } } /// An RPC protocol ID. @@ -292,8 +344,11 @@ where let socket = socket.compat(); let codec = match protocol.encoding { Encoding::SSZSnappy => { - let ssz_snappy_codec = - BaseInboundCodec::new(SSZSnappyInboundCodec::new(protocol, MAX_RPC_SIZE)); + let ssz_snappy_codec = BaseInboundCodec::new(SSZSnappyInboundCodec::new( + protocol, + MAX_RPC_SIZE, + self.fork_context.clone(), + )); InboundCodec::SSZSnappy(ssz_snappy_codec) } }; @@ -359,26 +414,25 @@ impl InboundRequest { Version::V1, Encoding::SSZSnappy, )], - InboundRequest::BlocksByRange(_) => vec![ProtocolId::new( - Protocol::BlocksByRange, - Version::V1, - Encoding::SSZSnappy, - )], - InboundRequest::BlocksByRoot(_) => vec![ProtocolId::new( - Protocol::BlocksByRoot, - Version::V1, - Encoding::SSZSnappy, - )], + InboundRequest::BlocksByRange(_) => vec![ + // V2 has higher preference when negotiating a stream + ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), + ], + InboundRequest::BlocksByRoot(_) => vec![ + // V2 has higher preference when negotiating a stream + ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), + ], InboundRequest::Ping(_) => vec![ProtocolId::new( Protocol::Ping, Version::V1, Encoding::SSZSnappy, )], - InboundRequest::MetaData(_) => vec![ProtocolId::new( - Protocol::MetaData, - Version::V1, - Encoding::SSZSnappy, - )], + InboundRequest::MetaData(_) => vec![ + ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy), + ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), + ], } } @@ -424,8 +478,6 @@ impl InboundRequest { } } -/* RPC Response type - used for outbound upgrades */ - /// Error in RPC Encoding/Decoding. #[derive(Debug, Clone, PartialEq, AsStaticStr)] #[strum(serialize_all = "snake_case")] diff --git a/beacon_node/eth2_libp2p/src/service.rs b/beacon_node/eth2_libp2p/src/service.rs index f19e6ffe6cc..c5291bbdf50 100644 --- a/beacon_node/eth2_libp2p/src/service.rs +++ b/beacon_node/eth2_libp2p/src/service.rs @@ -3,8 +3,10 @@ use crate::behaviour::{ }; use crate::discovery::enr; use crate::multiaddr::Protocol; -use crate::rpc::{GoodbyeReason, MetaData, RPCResponseErrorCode, RequestId}; -use crate::types::{error, EnrBitfield, GossipKind}; +use crate::rpc::{ + GoodbyeReason, MetaData, MetaDataV1, MetaDataV2, RPCResponseErrorCode, RequestId, +}; +use crate::types::{error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipKind}; use crate::EnrExt; use crate::{NetworkConfig, NetworkGlobals, PeerAction, ReportSource}; use futures::prelude::*; @@ -25,7 +27,7 @@ use std::io::prelude::*; use std::pin::Pin; use std::sync::Arc; use std::time::Duration; -use types::{ChainSpec, EnrForkId, EthSpec}; +use types::{ChainSpec, EnrForkId, EthSpec, ForkContext}; use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR}; @@ -66,6 +68,7 @@ impl Service { config: &NetworkConfig, enr_fork_id: EnrForkId, log: &Logger, + fork_context: Arc, chain_spec: &ChainSpec, ) -> error::Result<(Arc>, Self)> { let log = log.new(o!("service"=> "libp2p")); @@ -112,9 +115,10 @@ impl Service { // Lighthouse network behaviour let behaviour = Behaviour::new( &local_keypair, - config, + config.clone(), network_globals.clone(), &log, + fork_context, chain_spec, ) .await?; @@ -547,37 +551,57 @@ fn load_or_build_metadata( network_dir: &std::path::Path, log: &slog::Logger, ) -> MetaData { - // Default metadata - let mut meta_data = MetaData { + // We load a V2 metadata version by default (regardless of current fork) + // since a V2 metadata can be converted to V1. The RPC encoder is responsible + // for sending the correct metadata version based on the negotiated protocol version. + let mut meta_data = MetaDataV2 { seq_number: 0, - attnets: EnrBitfield::::default(), + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), }; // Read metadata from persisted file if available let metadata_path = network_dir.join(METADATA_FILENAME); if let Ok(mut metadata_file) = File::open(metadata_path) { let mut metadata_ssz = Vec::new(); if metadata_file.read_to_end(&mut metadata_ssz).is_ok() { - match MetaData::::from_ssz_bytes(&metadata_ssz) { + // Attempt to read a MetaDataV2 version from the persisted file, + // if that fails, read MetaDataV1 + match MetaDataV2::::from_ssz_bytes(&metadata_ssz) { Ok(persisted_metadata) => { meta_data.seq_number = persisted_metadata.seq_number; // Increment seq number if persisted attnet is not default - if persisted_metadata.attnets != meta_data.attnets { + if persisted_metadata.attnets != meta_data.attnets + || persisted_metadata.syncnets != meta_data.syncnets + { meta_data.seq_number += 1; } debug!(log, "Loaded metadata from disk"); } - Err(e) => { - debug!( - log, - "Metadata from file could not be decoded"; - "error" => ?e, - ); + Err(_) => { + match MetaDataV1::::from_ssz_bytes(&metadata_ssz) { + Ok(persisted_metadata) => { + let persisted_metadata = MetaData::V1(persisted_metadata); + // Increment seq number as the persisted metadata version is updated + meta_data.seq_number = *persisted_metadata.seq_number() + 1; + debug!(log, "Loaded metadata from disk"); + } + Err(e) => { + debug!( + log, + "Metadata from file could not be decoded"; + "error" => ?e, + ); + } + } } } } }; - debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number); + // Wrap the MetaData + let meta_data = MetaData::V2(meta_data); + + debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number()); save_metadata_to_disk(network_dir, meta_data.clone(), log); meta_data } diff --git a/beacon_node/eth2_libp2p/src/types/mod.rs b/beacon_node/eth2_libp2p/src/types/mod.rs index 156e6a1d782..1d045bb38d6 100644 --- a/beacon_node/eth2_libp2p/src/types/mod.rs +++ b/beacon_node/eth2_libp2p/src/types/mod.rs @@ -7,13 +7,13 @@ mod topics; use types::{BitVector, EthSpec}; -#[allow(type_alias_bounds)] -pub type EnrBitfield = BitVector; +pub type EnrAttestationBitfield = BitVector<::SubnetBitfieldLength>; +pub type EnrSyncCommitteeBitfield = BitVector<::SyncCommitteeSubnetCount>; pub type Enr = discv5::enr::Enr; pub use globals::NetworkGlobals; pub use pubsub::{PubsubMessage, SnappyTransform}; -pub use subnet::SubnetDiscovery; +pub use subnet::{Subnet, SubnetDiscovery}; pub use sync_state::SyncState; -pub use topics::{subnet_id_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS}; +pub use topics::{subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS}; diff --git a/beacon_node/eth2_libp2p/src/types/pubsub.rs b/beacon_node/eth2_libp2p/src/types/pubsub.rs index f1ba987058a..75ef6e8ab26 100644 --- a/beacon_node/eth2_libp2p/src/types/pubsub.rs +++ b/beacon_node/eth2_libp2p/src/types/pubsub.rs @@ -7,10 +7,10 @@ use snap::raw::{decompress_len, Decoder, Encoder}; use ssz::{Decode, Encode}; use std::boxed::Box; use std::io::{Error, ErrorKind}; -use types::SubnetId; use types::{ - Attestation, AttesterSlashing, EthSpec, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedBeaconBlockBase, SignedVoluntaryExit, + Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, + SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -27,6 +27,10 @@ pub enum PubsubMessage { ProposerSlashing(Box), /// Gossipsub message providing notification of a new attester slashing. AttesterSlashing(Box>), + /// Gossipsub message providing notification of partially aggregated sync committee signatures. + SignedContributionAndProof(Box>), + /// Gossipsub message providing notification of unaggregated sync committee signatures with its subnet id. + SyncCommitteeMessage(Box<(SyncSubnetId, SyncCommitteeMessage)>), } // Implements the `DataTransform` trait of gossipsub to employ snappy compression @@ -107,6 +111,8 @@ impl PubsubMessage { PubsubMessage::VoluntaryExit(_) => GossipKind::VoluntaryExit, PubsubMessage::ProposerSlashing(_) => GossipKind::ProposerSlashing, PubsubMessage::AttesterSlashing(_) => GossipKind::AttesterSlashing, + PubsubMessage::SignedContributionAndProof(_) => GossipKind::SignedContributionAndProof, + PubsubMessage::SyncCommitteeMessage(data) => GossipKind::SyncCommitteeMessage(data.0), } } @@ -114,7 +120,11 @@ impl PubsubMessage { /* Note: This is assuming we are not hashing topics. If we choose to hash topics, these will * need to be modified. */ - pub fn decode(topic: &TopicHash, data: &[u8]) -> Result { + pub fn decode( + topic: &TopicHash, + data: &[u8], + fork_context: &ForkContext, + ) -> Result { match GossipTopic::decode(topic.as_str()) { Err(_) => Err(format!("Unknown gossipsub topic: {:?}", topic)), Ok(gossip_topic) => { @@ -141,11 +151,23 @@ impl PubsubMessage { )))) } GossipKind::BeaconBlock => { - // FIXME(altair): support Altair blocks - let beacon_block = SignedBeaconBlock::Base( - SignedBeaconBlockBase::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ); + let beacon_block = + match fork_context.from_context_bytes(gossip_topic.fork_digest) { + Some(ForkName::Base) => SignedBeaconBlock::::Base( + SignedBeaconBlockBase::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + Some(ForkName::Altair) => SignedBeaconBlock::::Altair( + SignedBeaconBlockAltair::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + None => { + return Err(format!( + "Unknown gossipsub fork digest: {:?}", + gossip_topic.fork_digest + )) + } + }; Ok(PubsubMessage::BeaconBlock(Box::new(beacon_block))) } GossipKind::VoluntaryExit => { @@ -163,6 +185,21 @@ impl PubsubMessage { .map_err(|e| format!("{:?}", e))?; Ok(PubsubMessage::AttesterSlashing(Box::new(attester_slashing))) } + GossipKind::SignedContributionAndProof => { + let sync_aggregate = SignedContributionAndProof::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::SignedContributionAndProof(Box::new( + sync_aggregate, + ))) + } + GossipKind::SyncCommitteeMessage(subnet_id) => { + let sync_committee = SyncCommitteeMessage::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::SyncCommitteeMessage(Box::new(( + *subnet_id, + sync_committee, + )))) + } } } } @@ -182,6 +219,8 @@ impl PubsubMessage { PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), PubsubMessage::AttesterSlashing(data) => data.as_ssz_bytes(), PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(), + PubsubMessage::SignedContributionAndProof(data) => data.as_ssz_bytes(), + PubsubMessage::SyncCommitteeMessage(data) => data.1.as_ssz_bytes(), } } } @@ -210,6 +249,12 @@ impl std::fmt::Display for PubsubMessage { PubsubMessage::VoluntaryExit(_data) => write!(f, "Voluntary Exit"), PubsubMessage::ProposerSlashing(_data) => write!(f, "Proposer Slashing"), PubsubMessage::AttesterSlashing(_data) => write!(f, "Attester Slashing"), + PubsubMessage::SignedContributionAndProof(_) => { + write!(f, "Signed Contribution and Proof") + } + PubsubMessage::SyncCommitteeMessage(data) => { + write!(f, "Sync committee message: subnet_id: {}", *data.0) + } } } } diff --git a/beacon_node/eth2_libp2p/src/types/subnet.rs b/beacon_node/eth2_libp2p/src/types/subnet.rs index 847a63b60d0..50d28542bec 100644 --- a/beacon_node/eth2_libp2p/src/types/subnet.rs +++ b/beacon_node/eth2_libp2p/src/types/subnet.rs @@ -1,9 +1,28 @@ +use serde::Serialize; use std::time::Instant; -use types::SubnetId; +use types::{SubnetId, SyncSubnetId}; + +/// Represents a subnet on an attestation or sync committee `SubnetId`. +/// +/// Used for subscribing to the appropriate gossipsub subnets and mark +/// appropriate metadata bitfields. +#[derive(Debug, Clone, Copy, Serialize, PartialEq, Eq, Hash)] +pub enum Subnet { + /// Represents a gossipsub attestation subnet and the metadata `attnets` field. + Attestation(SubnetId), + /// Represents a gossipsub sync committee subnet and the metadata `syncnets` field. + SyncCommittee(SyncSubnetId), +} /// A subnet to discover peers on along with the instant after which it's no longer useful. #[derive(Debug, Clone)] pub struct SubnetDiscovery { - pub subnet_id: SubnetId, + pub subnet: Subnet, pub min_ttl: Option, } + +impl PartialEq for SubnetDiscovery { + fn eq(&self, other: &SubnetDiscovery) -> bool { + self.subnet.eq(&other.subnet) + } +} diff --git a/beacon_node/eth2_libp2p/src/types/topics.rs b/beacon_node/eth2_libp2p/src/types/topics.rs index 6bacfcf383c..f9860a003f5 100644 --- a/beacon_node/eth2_libp2p/src/types/topics.rs +++ b/beacon_node/eth2_libp2p/src/types/topics.rs @@ -1,7 +1,9 @@ use libp2p::gossipsub::{IdentTopic as Topic, TopicHash}; use serde_derive::{Deserialize, Serialize}; use strum::AsRefStr; -use types::SubnetId; +use types::{SubnetId, SyncSubnetId}; + +use crate::Subnet; /// The gossipsub topic names. // These constants form a topic name of the form /TOPIC_PREFIX/TOPIC/ENCODING_POSTFIX @@ -14,13 +16,16 @@ pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; +pub const SIGNED_CONTRIBUTION_AND_PROOF_TOPIC: &str = "sync_committee_contribution_and_proof"; +pub const SYNC_COMMITTEE_PREFIX_TOPIC: &str = "sync_committee_"; -pub const CORE_TOPICS: [GossipKind; 5] = [ +pub const CORE_TOPICS: [GossipKind; 6] = [ GossipKind::BeaconBlock, GossipKind::BeaconAggregateAndProof, GossipKind::VoluntaryExit, GossipKind::ProposerSlashing, GossipKind::AttesterSlashing, + GossipKind::SignedContributionAndProof, ]; /// A gossipsub topic which encapsulates the type of messages that should be sent and received over @@ -30,7 +35,7 @@ pub struct GossipTopic { /// The encoding of the topic. encoding: GossipEncoding, /// The fork digest of the topic, - fork_digest: [u8; 4], + pub fork_digest: [u8; 4], /// The kind of topic. kind: GossipKind, } @@ -53,12 +58,20 @@ pub enum GossipKind { ProposerSlashing, /// Topic for publishing attester slashings. AttesterSlashing, + /// Topic for publishing partially aggregated sync committee signatures. + SignedContributionAndProof, + /// Topic for publishing unaggregated sync committee signatures on a particular subnet. + #[strum(serialize = "sync_committee")] + SyncCommitteeMessage(SyncSubnetId), } impl std::fmt::Display for GossipKind { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { GossipKind::Attestation(subnet_id) => write!(f, "beacon_attestation_{}", **subnet_id), + GossipKind::SyncCommitteeMessage(subnet_id) => { + write!(f, "sync_committee_{}", **subnet_id) + } x => f.write_str(x.as_ref()), } } @@ -124,11 +137,15 @@ impl GossipTopic { let kind = match topic_parts[3] { BEACON_BLOCK_TOPIC => GossipKind::BeaconBlock, BEACON_AGGREGATE_AND_PROOF_TOPIC => GossipKind::BeaconAggregateAndProof, + SIGNED_CONTRIBUTION_AND_PROOF_TOPIC => GossipKind::SignedContributionAndProof, VOLUNTARY_EXIT_TOPIC => GossipKind::VoluntaryExit, PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing, ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing, topic => match committee_topic_index(topic) { - Some(subnet_id) => GossipKind::Attestation(subnet_id), + Some(subnet) => match subnet { + Subnet::Attestation(s) => GossipKind::Attestation(s), + Subnet::SyncCommittee(s) => GossipKind::SyncCommitteeMessage(s), + }, None => return Err(format!("Unknown topic: {}", topic)), }, }; @@ -163,6 +180,10 @@ impl From for String { GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), GossipKind::AttesterSlashing => ATTESTER_SLASHING_TOPIC.into(), GossipKind::Attestation(index) => format!("{}{}", BEACON_ATTESTATION_PREFIX, *index,), + GossipKind::SignedContributionAndProof => SIGNED_CONTRIBUTION_AND_PROOF_TOPIC.into(), + GossipKind::SyncCommitteeMessage(index) => { + format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index) + } }; format!( "/{}/{}/{}/{}", @@ -174,32 +195,72 @@ impl From for String { } } -impl From for GossipKind { - fn from(subnet_id: SubnetId) -> Self { - GossipKind::Attestation(subnet_id) +impl std::fmt::Display for GossipTopic { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let encoding = match self.encoding { + GossipEncoding::SSZSnappy => SSZ_SNAPPY_ENCODING_POSTFIX, + }; + + let kind = match self.kind { + GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), + GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), + GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), + GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), + GossipKind::AttesterSlashing => ATTESTER_SLASHING_TOPIC.into(), + GossipKind::Attestation(index) => format!("{}{}", BEACON_ATTESTATION_PREFIX, *index,), + GossipKind::SignedContributionAndProof => SIGNED_CONTRIBUTION_AND_PROOF_TOPIC.into(), + GossipKind::SyncCommitteeMessage(index) => { + format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index) + } + }; + write!( + f, + "/{}/{}/{}/{}", + TOPIC_PREFIX, + hex::encode(self.fork_digest), + kind, + encoding + ) + } +} + +impl From for GossipKind { + fn from(subnet_id: Subnet) -> Self { + match subnet_id { + Subnet::Attestation(s) => GossipKind::Attestation(s), + Subnet::SyncCommittee(s) => GossipKind::SyncCommitteeMessage(s), + } } } // helper functions /// Get subnet id from an attestation subnet topic hash. -pub fn subnet_id_from_topic_hash(topic_hash: &TopicHash) -> Option { +pub fn subnet_from_topic_hash(topic_hash: &TopicHash) -> Option { let gossip_topic = GossipTopic::decode(topic_hash.as_str()).ok()?; - if let GossipKind::Attestation(subnet_id) = gossip_topic.kind() { - return Some(*subnet_id); + match gossip_topic.kind() { + GossipKind::Attestation(subnet_id) => Some(Subnet::Attestation(*subnet_id)), + GossipKind::SyncCommitteeMessage(subnet_id) => Some(Subnet::SyncCommittee(*subnet_id)), + _ => None, } - None } -// Determines if a string is a committee topic. -fn committee_topic_index(topic: &str) -> Option { +// Determines if a string is an attestation or sync committee topic. +fn committee_topic_index(topic: &str) -> Option { if topic.starts_with(BEACON_ATTESTATION_PREFIX) { - return Some(SubnetId::new( + return Some(Subnet::Attestation(SubnetId::new( topic .trim_start_matches(BEACON_ATTESTATION_PREFIX) .parse::() .ok()?, - )); + ))); + } else if topic.starts_with(SYNC_COMMITTEE_PREFIX_TOPIC) { + return Some(Subnet::SyncCommittee(SyncSubnetId::new( + topic + .trim_start_matches(SYNC_COMMITTEE_PREFIX_TOPIC) + .parse::() + .ok()?, + ))); } None } @@ -222,7 +283,9 @@ mod tests { for kind in [ BeaconBlock, BeaconAggregateAndProof, + SignedContributionAndProof, Attestation(SubnetId::new(42)), + SyncCommitteeMessage(SyncSubnetId::new(42)), VoluntaryExit, ProposerSlashing, AttesterSlashing, @@ -292,14 +355,20 @@ mod tests { } #[test] - fn test_subnet_id_from_topic_hash() { + fn test_subnet_from_topic_hash() { let topic_hash = TopicHash::from_raw("/eth2/e1925f3b/beacon_block/ssz_snappy"); - assert!(subnet_id_from_topic_hash(&topic_hash).is_none()); + assert!(subnet_from_topic_hash(&topic_hash).is_none()); let topic_hash = TopicHash::from_raw("/eth2/e1925f3b/beacon_attestation_42/ssz_snappy"); assert_eq!( - subnet_id_from_topic_hash(&topic_hash), - Some(SubnetId::new(42)) + subnet_from_topic_hash(&topic_hash), + Some(Subnet::Attestation(SubnetId::new(42))) + ); + + let topic_hash = TopicHash::from_raw("/eth2/e1925f3b/sync_committee_42/ssz_snappy"); + assert_eq!( + subnet_from_topic_hash(&topic_hash), + Some(Subnet::SyncCommittee(SyncSubnetId::new(42))) ); } @@ -314,6 +383,11 @@ mod tests { "beacon_attestation", Attestation(SubnetId::new(42)).as_ref() ); + + assert_eq!( + "sync_committee", + SyncCommitteeMessage(SyncSubnetId::new(42)).as_ref() + ); assert_eq!("voluntary_exit", VoluntaryExit.as_ref()); assert_eq!("proposer_slashing", ProposerSlashing.as_ref()); assert_eq!("attester_slashing", AttesterSlashing.as_ref()); diff --git a/beacon_node/eth2_libp2p/tests/common/mod.rs b/beacon_node/eth2_libp2p/tests/common/mod.rs index 1f60624287d..8c28512d045 100644 --- a/beacon_node/eth2_libp2p/tests/common/mod.rs +++ b/beacon_node/eth2_libp2p/tests/common/mod.rs @@ -7,14 +7,20 @@ use eth2_libp2p::{Libp2pEvent, NetworkConfig}; use libp2p::gossipsub::GossipsubConfigBuilder; use slog::{debug, error, o, Drain}; use std::net::{TcpListener, UdpSocket}; +use std::sync::Arc; use std::sync::Weak; use std::time::Duration; use tokio::runtime::Runtime; -use types::{ChainSpec, EnrForkId, MinimalEthSpec}; +use types::{ChainSpec, EnrForkId, ForkContext, Hash256, MinimalEthSpec}; type E = MinimalEthSpec; use tempfile::Builder as TempBuilder; +/// Returns a dummy fork context +fn fork_context() -> ForkContext { + ForkContext::new::(types::Slot::new(0), Hash256::zero(), &ChainSpec::minimal()) +} + pub struct Libp2pInstance(LibP2PService, exit_future::Signal); impl std::ops::Deref for Libp2pInstance { @@ -109,12 +115,14 @@ pub async fn build_libp2p_instance( let (signal, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new(rt, exit, log.clone(), shutdown_tx); + let fork_context = Arc::new(fork_context()); Libp2pInstance( LibP2PService::new( executor, &config, EnrForkId::default(), &log, + fork_context, &ChainSpec::minimal(), ) .await diff --git a/beacon_node/eth2_libp2p/tests/rpc_tests.rs b/beacon_node/eth2_libp2p/tests/rpc_tests.rs index d621bf31cc8..9d1faf748cf 100644 --- a/beacon_node/eth2_libp2p/tests/rpc_tests.rs +++ b/beacon_node/eth2_libp2p/tests/rpc_tests.rs @@ -8,7 +8,8 @@ use std::time::Duration; use tokio::runtime::Runtime; use tokio::time::sleep; use types::{ - BeaconBlock, Epoch, EthSpec, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, Epoch, EthSpec, Hash256, MinimalEthSpec, + Signature, SignedBeaconBlock, Slot, }; mod common; @@ -500,9 +501,13 @@ fn test_blocks_by_root_chunked_rpc() { }); // BlocksByRoot Response - let full_block = BeaconBlock::full(&spec); + let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response_base = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + + let full_block = BeaconBlock::Altair(BeaconBlockAltair::::full(&spec)); + let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); + let rpc_response_altair = Response::BlocksByRoot(Some(Box::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; @@ -525,7 +530,11 @@ fn test_blocks_by_root_chunked_rpc() { response, }) => match response { Response::BlocksByRoot(Some(_)) => { - assert_eq!(response, rpc_response.clone()); + if messages_received < 5 { + assert_eq!(response, rpc_response_base.clone()); + } else { + assert_eq!(response, rpc_response_altair.clone()); + } messages_received += 1; debug!(log, "Chunk received"); } @@ -555,11 +564,18 @@ fn test_blocks_by_root_chunked_rpc() { // send the response debug!(log, "Receiver got request"); - for _ in 1..=messages_to_send { + for i in 0..messages_to_send { + // Send first half of responses as base blocks and + // second half as altair blocks. + let rpc_response = if i < 5 { + rpc_response_base.clone() + } else { + rpc_response_altair.clone() + }; receiver.swarm.behaviour_mut().send_successful_response( peer_id, id, - rpc_response.clone(), + rpc_response, ); debug!(log, "Sending message"); } @@ -621,7 +637,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { }); // BlocksByRoot Response - let full_block = BeaconBlock::full(&spec); + let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block))); diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 0f288cfea0c..1e6dffa45f2 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -3,6 +3,7 @@ name = "http_api" version = "0.1.0" authors = ["Paul Hauner "] edition = "2018" +autotests = false # using a single test binary compiles faster [dependencies] warp = { git = "https://github.com/paulhauner/warp ", branch = "cors-wildcard" } @@ -34,4 +35,9 @@ futures = "0.3.8" store = { path = "../store" } environment = { path = "../../lighthouse/environment" } tree_hash = "0.1.1" +discv5 = { version = "0.1.0-beta.8", features = ["libp2p"] } sensitive_url = { path = "../../common/sensitive_url" } + +[[test]] +name = "bn_http_api_tests" +path = "tests/main.rs" diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 998b95bea98..32a10ed71d8 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -10,7 +10,9 @@ mod block_id; mod metrics; mod proposer_duties; mod state_id; +mod sync_committees; mod validator_inclusion; +mod version; use beacon_chain::{ attestation_verification::SignatureVerifiedAttestation, @@ -20,7 +22,7 @@ use beacon_chain::{ WhenSlotSkipped, }; use block_id::BlockId; -use eth2::types::{self as api_types, ValidatorId}; +use eth2::types::{self as api_types, EndpointVersion, ValidatorId}; use eth2_libp2p::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; use network::NetworkMessage; @@ -37,10 +39,12 @@ use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ - Attestation, AttesterSlashing, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, - ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, SignedBeaconBlock, - SignedVoluntaryExit, Slot, + Attestation, AttesterSlashing, BeaconStateError, CommitteeCache, ConfigAndPreset, Epoch, + EthSpec, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, SignedBeaconBlock, + SignedContributionAndProof, SignedVoluntaryExit, Slot, SyncCommitteeMessage, + SyncContributionData, }; +use version::{fork_versioned_response, unsupported_version_rejection, V1}; use warp::http::StatusCode; use warp::sse::Event; use warp::Reply; @@ -48,7 +52,6 @@ use warp::{http::Response, Filter}; use warp_utils::task::{blocking_json_task, blocking_task}; const API_PREFIX: &str = "eth"; -const API_VERSION: &str = "v1"; /// If the node is within this many epochs from the head, we declare it to be synced regardless of /// the network sync state. @@ -152,7 +155,7 @@ pub fn prometheus_metrics() -> warp::filters::log::Log Option<&'static str> { - if info.path() == format!("/{}/{}/{}", API_PREFIX, API_VERSION, s) { + if info.path() == format!("/{}/{}", API_PREFIX, s) { Some(s) } else { None @@ -160,30 +163,30 @@ pub fn prometheus_metrics() -> warp::filters::log::Log Option<&'static str> { - if info - .path() - .starts_with(&format!("/{}/{}/{}", API_PREFIX, API_VERSION, s)) - { + if info.path().starts_with(&format!("/{}/{}", API_PREFIX, s)) { Some(s) } else { None } }; - equals("beacon/blocks") - .or_else(|| starts_with("validator/duties/attester")) - .or_else(|| starts_with("validator/duties/proposer")) - .or_else(|| starts_with("validator/attestation_data")) - .or_else(|| starts_with("validator/blocks")) - .or_else(|| starts_with("validator/aggregate_attestation")) - .or_else(|| starts_with("validator/aggregate_and_proofs")) - .or_else(|| starts_with("validator/beacon_committee_subscriptions")) - .or_else(|| starts_with("beacon/")) - .or_else(|| starts_with("config/")) - .or_else(|| starts_with("debug/")) - .or_else(|| starts_with("events/")) - .or_else(|| starts_with("node/")) - .or_else(|| starts_with("validator/")) + // First line covers `POST /v1/beacon/blocks` only + equals("v1/beacon/blocks") + .or_else(|| starts_with("v1/validator/duties/attester")) + .or_else(|| starts_with("v1/validator/duties/proposer")) + .or_else(|| starts_with("v1/validator/attestation_data")) + .or_else(|| starts_with("v1/validator/blocks")) + .or_else(|| starts_with("v2/validator/blocks")) + .or_else(|| starts_with("v1/validator/aggregate_attestation")) + .or_else(|| starts_with("v1/validator/aggregate_and_proofs")) + .or_else(|| starts_with("v1/validator/beacon_committee_subscriptions")) + .or_else(|| starts_with("v1/beacon/")) + .or_else(|| starts_with("v2/beacon/")) + .or_else(|| starts_with("v1/config/")) + .or_else(|| starts_with("v1/debug/")) + .or_else(|| starts_with("v1/events/")) + .or_else(|| starts_with("v1/node/")) + .or_else(|| starts_with("v1/validator/")) .unwrap_or("other") }; @@ -239,7 +242,29 @@ pub fn serve( )); } - let eth1_v1 = warp::path(API_PREFIX).and(warp::path(API_VERSION)); + // Create a filter that extracts the endpoint version. + let any_version = warp::path(API_PREFIX).and(warp::path::param::().or_else( + |_| async move { + Err(warp_utils::reject::custom_bad_request( + "Invalid version identifier".to_string(), + )) + }, + )); + + // Filter that enforces a single endpoint version and then discards the `EndpointVersion`. + let single_version = |reqd: EndpointVersion| { + any_version + .and_then(move |version| async move { + if version == reqd { + Ok(()) + } else { + Err(unsupported_version_rejection(version)) + } + }) + .untuple_one() + }; + + let eth1_v1 = single_version(V1); // Create a `warp` filter that provides access to the network globals. let inner_network_globals = ctx.network_globals.clone(); @@ -659,6 +684,61 @@ pub fn serve( }, ); + // GET beacon/states/{state_id}/sync_committees?epoch + let get_beacon_state_sync_committees = beacon_states_path + .clone() + .and(warp::path("sync_committees")) + .and(warp::query::()) + .and(warp::path::end()) + .and_then( + |state_id: StateId, + chain: Arc>, + query: api_types::SyncCommitteesQuery| { + blocking_json_task(move || { + let sync_committee = state_id.map_state(&chain, |state| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); + state + .get_built_sync_committee(epoch, &chain.spec) + .map(|committee| committee.clone()) + .map_err(|e| match e { + BeaconStateError::SyncCommitteeNotKnown { .. } => { + warp_utils::reject::custom_bad_request(format!( + "state at epoch {} has no sync committee for epoch {}", + current_epoch, epoch + )) + } + BeaconStateError::IncorrectStateVariant => { + warp_utils::reject::custom_bad_request(format!( + "state at epoch {} is not activated for Altair", + current_epoch, + )) + } + e => warp_utils::reject::beacon_state_error(e), + }) + })?; + + let validators = chain + .validator_indices(sync_committee.pubkeys.iter()) + .map_err(warp_utils::reject::beacon_chain_error)?; + + let validator_aggregates = validators + .chunks_exact(T::EthSpec::sync_subcommittee_size()) + .map(|indices| api_types::SyncSubcommittee { + indices: indices.to_vec(), + }) + .collect(); + + let response = api_types::SyncCommitteeByValidatorIndices { + validators, + validator_aggregates, + }; + + Ok(api_types::GenericResponse::from(response)) + }) + }, + ); + // GET beacon/headers // // Note: this endpoint only returns information about blocks in the canonical chain. Given that @@ -875,23 +955,32 @@ pub fn serve( }, ); - let beacon_blocks_path = eth1_v1 + let block_id_or_err = warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid block ID".to_string(), + )) + }); + + let beacon_blocks_path_v1 = eth1_v1 .and(warp::path("beacon")) .and(warp::path("blocks")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid block ID".to_string(), - )) - })) + .and(block_id_or_err) + .and(chain_filter.clone()); + + let beacon_blocks_path_any = any_version + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(block_id_or_err) .and(chain_filter.clone()); // GET beacon/blocks/{block_id} - let get_beacon_block = beacon_blocks_path + let get_beacon_block = beacon_blocks_path_any .clone() .and(warp::path::end()) .and(warp::header::optional::("accept")) .and_then( - |block_id: BlockId, + |endpoint_version: EndpointVersion, + block_id: BlockId, chain: Arc>, accept_header: Option| { blocking_task(move || { @@ -907,17 +996,18 @@ pub fn serve( e )) }), - _ => Ok( - warp::reply::json(&api_types::GenericResponseRef::from(&block)) - .into_response(), - ), + _ => { + let fork_name = block.fork_name(&chain.spec).ok(); + fork_versioned_response(endpoint_version, fork_name, block) + .map(|res| warp::reply::json(&res).into_response()) + } } }) }, ); // GET beacon/blocks/{block_id}/root - let get_beacon_block_root = beacon_blocks_path + let get_beacon_block_root = beacon_blocks_path_v1 .clone() .and(warp::path("root")) .and(warp::path::end()) @@ -931,7 +1021,7 @@ pub fn serve( }); // GET beacon/blocks/{block_id}/attestations - let get_beacon_block_attestations = beacon_blocks_path + let get_beacon_block_attestations = beacon_blocks_path_v1 .clone() .and(warp::path("attestations")) .and(warp::path::end()) @@ -1250,6 +1340,28 @@ pub fn serve( }) }); + // POST beacon/pool/sync_committees + let post_beacon_pool_sync_committees = beacon_pool_path + .clone() + .and(warp::path("sync_committees")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .and_then( + |chain: Arc>, + signatures: Vec, + network_tx: UnboundedSender>, + log: Logger| { + blocking_json_task(move || { + sync_committees::process_sync_committee_signatures( + signatures, network_tx, &chain, log, + )?; + Ok(api_types::GenericResponse::from(())) + }) + }, + ); + /* * config/fork_schedule */ @@ -1307,7 +1419,7 @@ pub fn serve( */ // GET debug/beacon/states/{state_id} - let get_debug_beacon_states = eth1_v1 + let get_debug_beacon_states = any_version .and(warp::path("debug")) .and(warp::path("beacon")) .and(warp::path("states")) @@ -1320,7 +1432,8 @@ pub fn serve( .and(warp::header::optional::("accept")) .and(chain_filter.clone()) .and_then( - |state_id: StateId, + |endpoint_version: EndpointVersion, + state_id: StateId, accept_header: Option, chain: Arc>| { blocking_task(move || match accept_header { @@ -1338,10 +1451,9 @@ pub fn serve( }) } _ => state_id.map_state(&chain, |state| { - Ok( - warp::reply::json(&api_types::GenericResponseRef::from(&state)) - .into_response(), - ) + let fork_name = state.fork_name(&chain.spec).ok(); + let res = fork_versioned_response(endpoint_version, fork_name, &state)?; + Ok(warp::reply::json(&res).into_response()) }), }) }, @@ -1380,23 +1492,27 @@ pub fn serve( let enr = network_globals.local_enr(); let p2p_addresses = enr.multiaddr_p2p_tcp(); let discovery_addresses = enr.multiaddr_p2p_udp(); + let meta_data = network_globals.local_metadata.read(); Ok(api_types::GenericResponse::from(api_types::IdentityData { peer_id: network_globals.local_peer_id().to_base58(), enr, p2p_addresses, discovery_addresses, metadata: api_types::MetaData { - seq_number: network_globals.local_metadata.read().seq_number, + seq_number: *meta_data.seq_number(), attnets: format!( + "0x{}", + hex::encode(meta_data.attnets().clone().into_bytes()), + ), + syncnets: format!( "0x{}", hex::encode( - network_globals - .local_metadata - .read() - .attnets - .clone() + meta_data + .syncnets() + .map(|x| x.clone()) + .unwrap_or_default() .into_bytes() - ), + ) ), }, })) @@ -1655,7 +1771,7 @@ pub fn serve( }); // GET validator/blocks/{slot} - let get_validator_blocks = eth1_v1 + let get_validator_blocks = any_version .and(warp::path("validator")) .and(warp::path("blocks")) .and(warp::path::param::().or_else(|_| async { @@ -1668,7 +1784,10 @@ pub fn serve( .and(warp::query::()) .and(chain_filter.clone()) .and_then( - |slot: Slot, query: api_types::ValidatorBlocksQuery, chain: Arc>| { + |endpoint_version: EndpointVersion, + slot: Slot, + query: api_types::ValidatorBlocksQuery, + chain: Arc>| { blocking_json_task(move || { let randao_reveal = (&query.randao_reveal).try_into().map_err(|e| { warp_utils::reject::custom_bad_request(format!( @@ -1677,11 +1796,11 @@ pub fn serve( )) })?; - chain + let (block, _) = chain .produce_block(randao_reveal, slot, query.graffiti.map(Into::into)) - .map(|block_and_state| block_and_state.0) - .map(api_types::GenericResponse::from) - .map_err(warp_utils::reject::block_production_error) + .map_err(warp_utils::reject::block_production_error)?; + let fork_name = block.to_ref().fork_name(&chain.spec).ok(); + fork_versioned_response(endpoint_version, fork_name, block) }) }, ); @@ -1766,12 +1885,57 @@ pub fn serve( }, ); + // POST validator/duties/sync + let post_validator_duties_sync = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("sync")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid epoch".to_string(), + )) + })) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and_then( + |epoch: Epoch, indices: api_types::ValidatorIndexData, chain: Arc>| { + blocking_json_task(move || { + sync_committees::sync_committee_duties(epoch, &indices.0, &chain) + }) + }, + ); + + // GET validator/sync_committee_contribution + let get_validator_sync_committee_contribution = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("sync_committee_contribution")) + .and(warp::path::end()) + .and(warp::query::()) + .and(not_while_syncing_filter.clone()) + .and(chain_filter.clone()) + .and_then( + |sync_committee_data: SyncContributionData, chain: Arc>| { + blocking_json_task(move || { + chain + .get_aggregated_sync_committee_contribution(&sync_committee_data) + .map(api_types::GenericResponse::from) + .ok_or_else(|| { + warp_utils::reject::custom_not_found( + "no matching sync contribution found".to_string(), + ) + }) + }) + }, + ); + // POST validator/aggregate_and_proofs let post_validator_aggregate_and_proofs = eth1_v1 .and(warp::path("validator")) .and(warp::path("aggregate_and_proofs")) .and(warp::path::end()) - .and(not_while_syncing_filter) + .and(not_while_syncing_filter.clone()) .and(chain_filter.clone()) .and(warp::body::json()) .and(network_tx_filter.clone()) @@ -1867,13 +2031,39 @@ pub fn serve( }, ); + let post_validator_contribution_and_proofs = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("contribution_and_proofs")) + .and(warp::path::end()) + .and(not_while_syncing_filter) + .and(chain_filter.clone()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .and_then( + |chain: Arc>, + contributions: Vec>, + network_tx: UnboundedSender>, + log: Logger| { + blocking_json_task(move || { + sync_committees::process_signed_contribution_and_proofs( + contributions, + network_tx, + &chain, + log, + )?; + Ok(api_types::GenericResponse::from(())) + }) + }, + ); + // POST validator/beacon_committee_subscriptions let post_validator_beacon_committee_subscriptions = eth1_v1 .and(warp::path("validator")) .and(warp::path("beacon_committee_subscriptions")) .and(warp::path::end()) .and(warp::body::json()) - .and(network_tx_filter) + .and(network_tx_filter.clone()) .and(chain_filter.clone()) .and_then( |subscriptions: Vec, @@ -1896,7 +2086,39 @@ pub fn serve( publish_network_message( &network_tx, - NetworkMessage::Subscribe { + NetworkMessage::AttestationSubscribe { + subscriptions: vec![subscription], + }, + )?; + } + + Ok(()) + }) + }, + ); + + // POST validator/sync_committee_subscriptions + let post_validator_sync_committee_subscriptions = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("sync_committee_subscriptions")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter) + .and(chain_filter.clone()) + .and_then( + |subscriptions: Vec, + network_tx: UnboundedSender>, + chain: Arc>| { + blocking_json_task(move || { + for subscription in subscriptions { + chain + .validator_monitor + .write() + .auto_register_local_validator(subscription.validator_index); + + publish_network_message( + &network_tx, + NetworkMessage::SyncCommitteeSubscribe { subscriptions: vec![subscription], }, )?; @@ -2244,6 +2466,7 @@ pub fn serve( .or(get_beacon_state_validators.boxed()) .or(get_beacon_state_validators_id.boxed()) .or(get_beacon_state_committees.boxed()) + .or(get_beacon_state_sync_committees.boxed()) .or(get_beacon_headers.boxed()) .or(get_beacon_headers_block_id.boxed()) .or(get_beacon_block.boxed()) @@ -2269,6 +2492,7 @@ pub fn serve( .or(get_validator_blocks.boxed()) .or(get_validator_attestation_data.boxed()) .or(get_validator_aggregate_attestation.boxed()) + .or(get_validator_sync_committee_contribution.boxed()) .or(get_lighthouse_health.boxed()) .or(get_lighthouse_syncing.boxed()) .or(get_lighthouse_peers.boxed()) @@ -2290,10 +2514,14 @@ pub fn serve( .or(post_beacon_pool_attester_slashings.boxed()) .or(post_beacon_pool_proposer_slashings.boxed()) .or(post_beacon_pool_voluntary_exits.boxed()) + .or(post_beacon_pool_sync_committees.boxed()) .or(post_validator_duties_attester.boxed()) + .or(post_validator_duties_sync.boxed()) .or(post_validator_aggregate_and_proofs.boxed()) - .or(post_lighthouse_liveness.boxed()) - .or(post_validator_beacon_committee_subscriptions.boxed()), + .or(post_validator_contribution_and_proofs.boxed()) + .or(post_validator_beacon_committee_subscriptions.boxed()) + .or(post_validator_sync_committee_subscriptions.boxed()) + .or(post_lighthouse_liveness.boxed()), )) .recover(warp_utils::reject::handle_rejection) .with(slog_logging(log.clone())) diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs new file mode 100644 index 00000000000..921c7c4c2de --- /dev/null +++ b/beacon_node/http_api/src/sync_committees.rs @@ -0,0 +1,295 @@ +//! Handlers for sync committee endpoints. + +use crate::publish_pubsub_message; +use beacon_chain::sync_committee_verification::{ + Error as SyncVerificationError, VerifiedSyncCommitteeMessage, +}; +use beacon_chain::{ + BeaconChain, BeaconChainError, BeaconChainTypes, StateSkipConfig, + MAXIMUM_GOSSIP_CLOCK_DISPARITY, +}; +use eth2::types::{self as api_types}; +use eth2_libp2p::PubsubMessage; +use network::NetworkMessage; +use slog::{error, warn, Logger}; +use slot_clock::SlotClock; +use std::cmp::max; +use std::collections::HashMap; +use tokio::sync::mpsc::UnboundedSender; +use types::{ + slot_data::SlotData, BeaconStateError, Epoch, EthSpec, SignedContributionAndProof, + SyncCommitteeMessage, SyncDuty, SyncSubnetId, +}; + +/// The struct that is returned to the requesting HTTP client. +type SyncDuties = api_types::GenericResponse>; + +/// Handles a request from the HTTP API for sync committee duties. +pub fn sync_committee_duties( + request_epoch: Epoch, + request_indices: &[u64], + chain: &BeaconChain, +) -> Result { + let altair_fork_epoch = if let Some(altair_fork_epoch) = chain.spec.altair_fork_epoch { + altair_fork_epoch + } else { + // Empty response for networks with Altair disabled. + return Ok(convert_to_response(vec![])); + }; + + // Try using the head's sync committees to satisfy the request. This should be sufficient for + // the vast majority of requests. Rather than checking if we think the request will succeed in a + // way prone to data races, we attempt the request immediately and check the error code. + match chain.sync_committee_duties_from_head(request_epoch, request_indices) { + Ok(duties) => return Ok(convert_to_response(duties)), + Err(BeaconChainError::SyncDutiesError(BeaconStateError::SyncCommitteeNotKnown { + .. + })) + | Err(BeaconChainError::SyncDutiesError(BeaconStateError::IncorrectStateVariant)) => (), + Err(e) => return Err(warp_utils::reject::beacon_chain_error(e)), + } + + let duties = duties_from_state_load(request_epoch, request_indices, altair_fork_epoch, chain) + .map_err(|e| match e { + BeaconChainError::SyncDutiesError(BeaconStateError::SyncCommitteeNotKnown { + current_epoch, + .. + }) => warp_utils::reject::custom_bad_request(format!( + "invalid epoch: {}, current epoch: {}", + request_epoch, current_epoch + )), + e => warp_utils::reject::beacon_chain_error(e), + })?; + Ok(convert_to_response(duties)) +} + +/// Slow path for duties: load a state and use it to compute the duties. +fn duties_from_state_load( + request_epoch: Epoch, + request_indices: &[u64], + altair_fork_epoch: Epoch, + chain: &BeaconChain, +) -> Result>, BeaconChainError> { + // Determine what the current epoch would be if we fast-forward our system clock by + // `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. + // + // Most of the time, `tolerant_current_epoch` will be equal to `current_epoch`. However, during + // the first `MAXIMUM_GOSSIP_CLOCK_DISPARITY` duration of the epoch `tolerant_current_epoch` + // will equal `current_epoch + 1` + let current_epoch = chain.epoch()?; + + let tolerant_current_epoch = chain + .slot_clock + .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .ok_or(BeaconChainError::UnableToReadSlot)? + .epoch(T::EthSpec::slots_per_epoch()); + + let max_sync_committee_period = tolerant_current_epoch.sync_committee_period(&chain.spec)? + 1; + let sync_committee_period = request_epoch.sync_committee_period(&chain.spec)?; + + if tolerant_current_epoch < altair_fork_epoch { + // Empty response if the epoch is pre-Altair. + Ok(vec![]) + } else if sync_committee_period <= max_sync_committee_period { + // Load the state at the start of the *previous* sync committee period. + // This is sufficient for historical duties, and efficient in the case where the head + // is lagging the current epoch and we need duties for the next period (because we only + // have to transition the head to start of the current period). + // + // We also need to ensure that the load slot is after the Altair fork. + let load_slot = max( + chain.spec.epochs_per_sync_committee_period * sync_committee_period.saturating_sub(1), + altair_fork_epoch, + ) + .start_slot(T::EthSpec::slots_per_epoch()); + + let state = chain.state_at_slot(load_slot, StateSkipConfig::WithoutStateRoots)?; + + state + .get_sync_committee_duties(request_epoch, request_indices, &chain.spec) + .map_err(BeaconChainError::SyncDutiesError) + } else { + Err(BeaconChainError::SyncDutiesError( + BeaconStateError::SyncCommitteeNotKnown { + current_epoch, + epoch: request_epoch, + }, + )) + } +} + +fn convert_to_response(duties: Vec>) -> SyncDuties { + api_types::GenericResponse::from(duties.into_iter().flatten().collect::>()) +} + +/// Receive sync committee duties, storing them in the pools & broadcasting them. +pub fn process_sync_committee_signatures( + sync_committee_signatures: Vec, + network_tx: UnboundedSender>, + chain: &BeaconChain, + log: Logger, +) -> Result<(), warp::reject::Rejection> { + let mut failures = vec![]; + + for (i, sync_committee_signature) in sync_committee_signatures.iter().enumerate() { + let subnet_positions = match get_subnet_positions_for_sync_committee_message( + sync_committee_signature, + chain, + ) { + Ok(positions) => positions, + Err(e) => { + error!( + log, + "Unable to compute subnet positions for sync message"; + "error" => ?e, + "slot" => sync_committee_signature.slot, + ); + failures.push(api_types::Failure::new(i, format!("Verification: {:?}", e))); + continue; + } + }; + + // Verify and publish on all relevant subnets. + // + // The number of assigned subnets on any practical network should be ~1, so the apparent + // inefficiency of verifying multiple times is not a real inefficiency. + let mut verified_for_pool = None; + for subnet_id in subnet_positions.keys().copied() { + match VerifiedSyncCommitteeMessage::verify( + sync_committee_signature.clone(), + subnet_id, + chain, + ) { + Ok(verified) => { + publish_pubsub_message( + &network_tx, + PubsubMessage::SyncCommitteeMessage(Box::new(( + subnet_id, + verified.sync_message().clone(), + ))), + )?; + + verified_for_pool = Some(verified); + } + Err(e) => { + error!( + log, + "Failure verifying sync committee signature for gossip"; + "error" => ?e, + "request_index" => i, + "slot" => sync_committee_signature.slot, + "validator_index" => sync_committee_signature.validator_index, + ); + failures.push(api_types::Failure::new(i, format!("Verification: {:?}", e))); + } + } + } + + if let Some(verified) = verified_for_pool { + if let Err(e) = chain.add_to_naive_sync_aggregation_pool(verified) { + error!( + log, + "Unable to add sync committee signature to pool"; + "error" => ?e, + "slot" => sync_committee_signature.slot, + "validator_index" => sync_committee_signature.validator_index, + ); + } + } + } + + if failures.is_empty() { + Ok(()) + } else { + Err(warp_utils::reject::indexed_bad_request( + "error processing sync committee signatures".to_string(), + failures, + )) + } +} + +/// Get the set of all subnet assignments for a `SyncCommitteeMessage`. +pub fn get_subnet_positions_for_sync_committee_message( + sync_message: &SyncCommitteeMessage, + chain: &BeaconChain, +) -> Result>, SyncVerificationError> { + let pubkey = chain + .validator_pubkey_bytes(sync_message.validator_index as usize)? + .ok_or(SyncVerificationError::UnknownValidatorIndex( + sync_message.validator_index as usize, + ))?; + let sync_committee = chain.sync_committee_at_next_slot(sync_message.get_slot())?; + Ok(sync_committee.subcommittee_positions_for_public_key(&pubkey)?) +} + +/// Receive signed contributions and proofs, storing them in the op pool and broadcasting. +pub fn process_signed_contribution_and_proofs( + signed_contribution_and_proofs: Vec>, + network_tx: UnboundedSender>, + chain: &BeaconChain, + log: Logger, +) -> Result<(), warp::reject::Rejection> { + let mut verified_contributions = Vec::with_capacity(signed_contribution_and_proofs.len()); + let mut failures = vec![]; + + // Verify contributions & broadcast to the network. + for (index, contribution) in signed_contribution_and_proofs.into_iter().enumerate() { + let aggregator_index = contribution.message.aggregator_index; + let subcommittee_index = contribution.message.contribution.subcommittee_index; + let contribution_slot = contribution.message.contribution.slot; + + match chain.verify_sync_contribution_for_gossip(contribution) { + Ok(verified_contribution) => { + publish_pubsub_message( + &network_tx, + PubsubMessage::SignedContributionAndProof(Box::new( + verified_contribution.aggregate().clone(), + )), + )?; + + // FIXME(altair): notify validator monitor + verified_contributions.push((index, verified_contribution)); + } + // If we already know the contribution, don't broadcast it or attempt to + // further verify it. Return success. + Err(SyncVerificationError::SyncContributionAlreadyKnown(_)) => continue, + Err(e) => { + error!( + log, + "Failure verifying signed contribution and proof"; + "error" => ?e, + "request_index" => index, + "aggregator_index" => aggregator_index, + "subcommittee_index" => subcommittee_index, + "contribution_slot" => contribution_slot, + ); + failures.push(api_types::Failure::new( + index, + format!("Verification: {:?}", e), + )); + } + } + } + + // Add to the block inclusion pool. + for (index, verified_contribution) in verified_contributions { + if let Err(e) = chain.add_contribution_to_block_inclusion_pool(verified_contribution) { + warn!( + log, + "Could not add verified sync contribution to the inclusion pool"; + "error" => ?e, + "request_index" => index, + ); + failures.push(api_types::Failure::new(index, format!("Op pool: {:?}", e))); + } + } + + if !failures.is_empty() { + Err(warp_utils::reject::indexed_bad_request( + "error processing contribution and proofs".to_string(), + failures, + )) + } else { + Ok(()) + } +} diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs new file mode 100644 index 00000000000..db891727ef7 --- /dev/null +++ b/beacon_node/http_api/src/version.rs @@ -0,0 +1,28 @@ +use crate::api_types::{EndpointVersion, ForkVersionedResponse}; +use serde::Serialize; +use types::ForkName; + +pub const V1: EndpointVersion = EndpointVersion(1); +pub const V2: EndpointVersion = EndpointVersion(2); + +pub fn fork_versioned_response( + endpoint_version: EndpointVersion, + fork_name: Option, + data: T, +) -> Result, warp::reject::Rejection> { + let fork_name = if endpoint_version == V1 { + None + } else if endpoint_version == V2 { + fork_name + } else { + return Err(unsupported_version_rejection(endpoint_version)); + }; + Ok(ForkVersionedResponse { + version: fork_name, + data, + }) +} + +pub fn unsupported_version_rejection(version: EndpointVersion) -> warp::reject::Rejection { + warp_utils::reject::custom_bad_request(format!("Unsupported endpoint version: {}", version)) +} diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs new file mode 100644 index 00000000000..24ffd38c80f --- /dev/null +++ b/beacon_node/http_api/tests/common.rs @@ -0,0 +1,142 @@ +use beacon_chain::{ + test_utils::{BeaconChainHarness, EphemeralHarnessType}, + BeaconChain, BeaconChainTypes, +}; +use discv5::enr::{CombinedKey, EnrBuilder}; +use eth2::{BeaconNodeHttpClient, Timeouts}; +use eth2_libp2p::{ + rpc::methods::{MetaData, MetaDataV2}, + types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, + Enr, NetworkGlobals, PeerId, +}; +use http_api::{Config, Context}; +use network::NetworkMessage; +use sensitive_url::SensitiveUrl; +use slog::Logger; +use std::future::Future; +use std::net::{Ipv4Addr, SocketAddr}; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::{mpsc, oneshot}; +use types::{test_utils::generate_deterministic_keypairs, ChainSpec, EthSpec}; + +pub const TCP_PORT: u16 = 42; +pub const UDP_PORT: u16 = 42; +pub const SEQ_NUMBER: u64 = 0; +pub const EXTERNAL_ADDR: &str = "/ip4/0.0.0.0/tcp/9000"; + +/// HTTP API tester that allows interaction with the underlying beacon chain harness. +pub struct InteractiveTester { + pub harness: BeaconChainHarness>, + pub client: BeaconNodeHttpClient, + pub network_rx: mpsc::UnboundedReceiver>, + _server_shutdown: oneshot::Sender<()>, +} + +/// The result of calling `create_api_server`. +/// +/// Glue-type between `tests::ApiTester` and `InteractiveTester`. +pub struct ApiServer> { + pub server: SFut, + pub listening_socket: SocketAddr, + pub shutdown_tx: oneshot::Sender<()>, + pub network_rx: tokio::sync::mpsc::UnboundedReceiver>, + pub local_enr: Enr, + pub external_peer_id: PeerId, +} + +impl InteractiveTester { + pub fn new(spec: Option, validator_count: usize) -> Self { + let harness = BeaconChainHarness::new( + E::default(), + spec, + generate_deterministic_keypairs(validator_count), + ); + + let ApiServer { + server, + listening_socket, + shutdown_tx: _server_shutdown, + network_rx, + .. + } = create_api_server(harness.chain.clone(), harness.logger().clone()); + + tokio::spawn(server); + + let client = BeaconNodeHttpClient::new( + SensitiveUrl::parse(&format!( + "http://{}:{}", + listening_socket.ip(), + listening_socket.port() + )) + .unwrap(), + Timeouts::set_all(Duration::from_secs(1)), + ); + + Self { + harness, + client, + network_rx, + _server_shutdown, + } + } +} + +pub fn create_api_server( + chain: Arc>, + log: Logger, +) -> ApiServer> { + let (network_tx, network_rx) = mpsc::unbounded_channel(); + + // Default metadata + let meta_data = MetaData::V2(MetaDataV2 { + seq_number: SEQ_NUMBER, + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }); + let enr_key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); + let network_globals = + NetworkGlobals::new(enr.clone(), TCP_PORT, UDP_PORT, meta_data, vec![], &log); + + let peer_id = PeerId::random(); + network_globals + .peers + .write() + .connect_ingoing(&peer_id, EXTERNAL_ADDR.parse().unwrap(), None); + + *network_globals.sync_state.write() = SyncState::Synced; + + let eth1_service = eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()); + + let context = Arc::new(Context { + config: Config { + enabled: true, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 0, + allow_origin: None, + serve_legacy_spec: true, + }, + chain: Some(chain.clone()), + network_tx: Some(network_tx), + network_globals: Some(Arc::new(network_globals)), + eth1_service: Some(eth1_service), + log, + }); + let ctx = context.clone(); + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let server_shutdown = async { + // It's not really interesting why this triggered, just that it happened. + let _ = shutdown_rx.await; + }; + let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap(); + + ApiServer { + server, + listening_socket, + shutdown_tx, + network_rx, + local_enr: enr, + external_peer_id: peer_id, + } +} diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs new file mode 100644 index 00000000000..9286b60af87 --- /dev/null +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -0,0 +1,305 @@ +//! Tests for API behaviour across fork boundaries. +use crate::common::*; +use beacon_chain::{test_utils::RelativeSyncCommittee, StateSkipConfig}; +use eth2::types::{StateId, SyncSubcommittee}; +use types::{ChainSpec, Epoch, EthSpec, MinimalEthSpec, Slot}; + +type E = MinimalEthSpec; + +fn altair_spec(altair_fork_epoch: Epoch) -> ChainSpec { + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(altair_fork_epoch); + spec +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn sync_committee_duties_across_fork() { + let validator_count = E::sync_committee_size(); + let fork_epoch = Epoch::new(8); + let spec = altair_spec(fork_epoch); + let tester = InteractiveTester::::new(Some(spec.clone()), validator_count); + let harness = &tester.harness; + let client = &tester.client; + + let all_validators = harness.get_all_validators(); + let all_validators_u64 = all_validators.iter().map(|x| *x as u64).collect::>(); + + assert_eq!(harness.get_current_slot(), 0); + + // Prior to the fork the endpoint should return an empty vec. + let early_duties = client + .post_validator_duties_sync(fork_epoch - 1, &all_validators_u64) + .await + .unwrap() + .data; + assert!(early_duties.is_empty()); + + // If there's a skip slot at the fork slot, the endpoint should return duties, even + // though the head state hasn't transitioned yet. + let fork_slot = fork_epoch.start_slot(E::slots_per_epoch()); + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); + let (_, state) = harness + .add_attested_block_at_slot( + fork_slot - 1, + genesis_state, + genesis_state_root, + &all_validators, + ) + .unwrap(); + + harness.advance_slot(); + assert_eq!(harness.get_current_slot(), fork_slot); + + let sync_duties = client + .post_validator_duties_sync(fork_epoch, &all_validators_u64) + .await + .unwrap() + .data; + assert_eq!(sync_duties.len(), E::sync_committee_size()); + + // After applying a block at the fork slot the duties should remain unchanged. + let state_root = state.canonical_root(); + harness + .add_attested_block_at_slot(fork_slot, state, state_root, &all_validators) + .unwrap(); + + assert_eq!( + client + .post_validator_duties_sync(fork_epoch, &all_validators_u64) + .await + .unwrap() + .data, + sync_duties + ); + + // Sync duties should also be available for the next period. + let current_period = fork_epoch.sync_committee_period(&spec).unwrap(); + let next_period_epoch = spec.epochs_per_sync_committee_period * (current_period + 1); + + let next_period_duties = client + .post_validator_duties_sync(next_period_epoch, &all_validators_u64) + .await + .unwrap() + .data; + assert_eq!(next_period_duties.len(), E::sync_committee_size()); + + // Sync duties should *not* be available for the period after the next period. + // We expect a 400 (bad request) response. + let next_next_period_epoch = spec.epochs_per_sync_committee_period * (current_period + 2); + assert_eq!( + client + .post_validator_duties_sync(next_next_period_epoch, &all_validators_u64) + .await + .unwrap_err() + .status() + .unwrap(), + 400 + ); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn attestations_across_fork_with_skip_slots() { + let validator_count = E::sync_committee_size(); + let fork_epoch = Epoch::new(8); + let spec = altair_spec(fork_epoch); + let tester = InteractiveTester::::new(Some(spec.clone()), validator_count); + let harness = &tester.harness; + let client = &tester.client; + + let all_validators = harness.get_all_validators(); + + let fork_slot = fork_epoch.start_slot(E::slots_per_epoch()); + let fork_state = harness + .chain + .state_at_slot(fork_slot, StateSkipConfig::WithStateRoots) + .unwrap(); + + harness.set_current_slot(fork_slot); + + let attestations = harness.make_attestations( + &all_validators, + &fork_state, + fork_state.canonical_root(), + (*fork_state.get_block_root(fork_slot - 1).unwrap()).into(), + fork_slot, + ); + + let unaggregated_attestations = attestations + .iter() + .flat_map(|(atts, _)| atts.iter().map(|(att, _)| att.clone())) + .collect::>(); + + assert!(!unaggregated_attestations.is_empty()); + client + .post_beacon_pool_attestations(&unaggregated_attestations) + .await + .unwrap(); + + let signed_aggregates = attestations + .into_iter() + .filter_map(|(_, op_aggregate)| op_aggregate) + .collect::>(); + assert!(!signed_aggregates.is_empty()); + + client + .post_validator_aggregate_and_proof(&signed_aggregates) + .await + .unwrap(); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn sync_contributions_across_fork_with_skip_slots() { + let validator_count = E::sync_committee_size(); + let fork_epoch = Epoch::new(8); + let spec = altair_spec(fork_epoch); + let tester = InteractiveTester::::new(Some(spec.clone()), validator_count); + let harness = &tester.harness; + let client = &tester.client; + + let fork_slot = fork_epoch.start_slot(E::slots_per_epoch()); + let fork_state = harness + .chain + .state_at_slot(fork_slot, StateSkipConfig::WithStateRoots) + .unwrap(); + + harness.set_current_slot(fork_slot); + + let sync_messages = harness.make_sync_contributions( + &fork_state, + *fork_state.get_block_root(fork_slot - 1).unwrap(), + fork_slot, + RelativeSyncCommittee::Current, + ); + + let sync_committee_messages = sync_messages + .iter() + .flat_map(|(messages, _)| messages.iter().map(|(message, _subnet)| message.clone())) + .collect::>(); + assert!(!sync_committee_messages.is_empty()); + + client + .post_beacon_pool_sync_committee_signatures(&sync_committee_messages) + .await + .unwrap(); + + let signed_contributions = sync_messages + .into_iter() + .filter_map(|(_, op_aggregate)| op_aggregate) + .collect::>(); + assert!(!signed_contributions.is_empty()); + + client + .post_validator_contribution_and_proofs(&signed_contributions) + .await + .unwrap(); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn sync_committee_indices_across_fork() { + let validator_count = E::sync_committee_size(); + let fork_epoch = Epoch::new(8); + let spec = altair_spec(fork_epoch); + let tester = InteractiveTester::::new(Some(spec.clone()), validator_count); + let harness = &tester.harness; + let client = &tester.client; + + let all_validators = harness.get_all_validators(); + + // Flatten subcommittees into a single vec. + let flatten = |subcommittees: &[SyncSubcommittee]| -> Vec { + subcommittees + .iter() + .flat_map(|sub| sub.indices.iter().copied()) + .collect() + }; + + // Prior to the fork the `sync_committees` endpoint should return a 400 error. + assert_eq!( + client + .get_beacon_states_sync_committees(StateId::Slot(Slot::new(0)), None) + .await + .unwrap_err() + .status() + .unwrap(), + 400 + ); + assert_eq!( + client + .get_beacon_states_sync_committees(StateId::Head, Some(Epoch::new(0))) + .await + .unwrap_err() + .status() + .unwrap(), + 400 + ); + + // If there's a skip slot at the fork slot, the endpoint will return a 400 until a block is + // applied. + let fork_slot = fork_epoch.start_slot(E::slots_per_epoch()); + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); + let (_, state) = harness + .add_attested_block_at_slot( + fork_slot - 1, + genesis_state, + genesis_state_root, + &all_validators, + ) + .unwrap(); + + harness.advance_slot(); + assert_eq!(harness.get_current_slot(), fork_slot); + + // Using the head state must fail. + assert_eq!( + client + .get_beacon_states_sync_committees(StateId::Head, Some(fork_epoch)) + .await + .unwrap_err() + .status() + .unwrap(), + 400 + ); + + // In theory we could do a state advance and make this work, but to keep things simple I've + // avoided doing that for now. + assert_eq!( + client + .get_beacon_states_sync_committees(StateId::Slot(fork_slot), None) + .await + .unwrap_err() + .status() + .unwrap(), + 400 + ); + + // Once the head is updated it should be useable for requests, including in the next sync + // committee period. + let state_root = state.canonical_root(); + harness + .add_attested_block_at_slot(fork_slot + 1, state, state_root, &all_validators) + .unwrap(); + + let current_period = fork_epoch.sync_committee_period(&spec).unwrap(); + let next_period_epoch = spec.epochs_per_sync_committee_period * (current_period + 1); + assert!(next_period_epoch > fork_epoch); + + for epoch in [ + None, + Some(fork_epoch), + Some(fork_epoch + 1), + Some(next_period_epoch), + Some(next_period_epoch + 1), + ] { + let committee = client + .get_beacon_states_sync_committees(StateId::Head, epoch) + .await + .unwrap() + .data; + assert_eq!(committee.validators.len(), E::sync_committee_size()); + + assert_eq!( + committee.validators, + flatten(&committee.validator_aggregates) + ); + } +} diff --git a/beacon_node/http_api/tests/main.rs b/beacon_node/http_api/tests/main.rs new file mode 100644 index 00000000000..d10725a0264 --- /dev/null +++ b/beacon_node/http_api/tests/main.rs @@ -0,0 +1,6 @@ +#![cfg(not(debug_assertions))] // Tests are too slow in debug. +#![recursion_limit = "256"] + +pub mod common; +pub mod fork_tests; +pub mod tests; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 470afbf0923..21e52b92bbf 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,6 +1,4 @@ -#![cfg(not(debug_assertions))] // Tests are too slow in debug. -#![recursion_limit = "256"] - +use crate::common::{create_api_server, ApiServer}; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, BeaconChain, StateSkipConfig, WhenSlotSkipped, MAXIMUM_GOSSIP_CLOCK_DISPARITY, @@ -9,21 +7,14 @@ use environment::null_logger; use eth2::Error; use eth2::StatusCode; use eth2::{types::*, BeaconNodeHttpClient, Timeouts}; -use eth2_libp2p::discv5::enr::{CombinedKey, EnrBuilder}; -use eth2_libp2p::{ - rpc::methods::MetaData, - types::{EnrBitfield, SyncState}, - Enr, EnrExt, NetworkGlobals, PeerId, -}; +use eth2_libp2p::{Enr, EnrExt, PeerId}; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; -use http_api::{Config, Context}; use network::NetworkMessage; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; use state_processing::per_slot_processing; use std::convert::TryInto; -use std::net::Ipv4Addr; use std::sync::Arc; use tokio::sync::{mpsc, oneshot}; use tokio::time::Duration; @@ -41,9 +32,6 @@ const VALIDATOR_COUNT: usize = SLOTS_PER_EPOCH as usize; const CHAIN_LENGTH: u64 = SLOTS_PER_EPOCH * 5 - 1; // Make `next_block` an epoch transition const JUSTIFIED_EPOCH: u64 = 4; const FINALIZED_EPOCH: u64 = 3; -const TCP_PORT: u16 = 42; -const UDP_PORT: u16 = 42; -const SEQ_NUMBER: u64 = 0; const EXTERNAL_ADDR: &str = "/ip4/0.0.0.0/tcp/9000"; /// Skipping the slots around the epoch boundary allows us to check that we're obtaining states @@ -74,9 +62,13 @@ struct ApiTester { impl ApiTester { pub fn new() -> Self { - let mut harness = BeaconChainHarness::new( + // This allows for testing voluntary exits without building out a massive chain. + let mut spec = E::default_spec(); + spec.shard_committee_period = 2; + + let harness = BeaconChainHarness::new( MainnetEthSpec, - None, + Some(spec), generate_deterministic_keypairs(VALIDATOR_COUNT), ); @@ -134,13 +126,7 @@ impl ApiTester { let proposer_slashing = harness.make_proposer_slashing(2); let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); - // Changing this *after* the chain has been initialized is a bit cheeky, but it shouldn't - // cause issue. - // - // This allows for testing voluntary exits without building out a massive chain. - harness.chain.spec.shard_committee_period = 2; - - let chain = Arc::new(harness.chain); + let chain = harness.chain.clone(); assert_eq!( chain.head_info().unwrap().finalized_checkpoint.epoch, @@ -157,55 +143,18 @@ impl ApiTester { "precondition: justification" ); - let (network_tx, network_rx) = mpsc::unbounded_channel(); - let log = null_logger().unwrap(); - // Default metadata - let meta_data = MetaData { - seq_number: SEQ_NUMBER, - attnets: EnrBitfield::::default(), - }; - let enr_key = CombinedKey::generate_secp256k1(); - let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); - let enr_clone = enr.clone(); - let network_globals = NetworkGlobals::new(enr, TCP_PORT, UDP_PORT, meta_data, vec![], &log); - - let peer_id = PeerId::random(); - network_globals.peers.write().connect_ingoing( - &peer_id, - EXTERNAL_ADDR.parse().unwrap(), - None, - ); - - *network_globals.sync_state.write() = SyncState::Synced; - - let eth1_service = - eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()); - - let context = Arc::new(Context { - config: Config { - enabled: true, - listen_addr: Ipv4Addr::new(127, 0, 0, 1), - listen_port: 0, - allow_origin: None, - serve_legacy_spec: true, - }, - chain: Some(chain.clone()), - network_tx: Some(network_tx), - network_globals: Some(Arc::new(network_globals)), - eth1_service: Some(eth1_service), - log, - }); - let ctx = context.clone(); - let (shutdown_tx, shutdown_rx) = oneshot::channel(); - let server_shutdown = async { - // It's not really interesting why this triggered, just that it happened. - let _ = shutdown_rx.await; - }; - let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap(); + let ApiServer { + server, + listening_socket, + shutdown_tx, + network_rx, + local_enr, + external_peer_id, + } = create_api_server(chain.clone(), log); - tokio::spawn(async { server.await }); + tokio::spawn(server); let client = BeaconNodeHttpClient::new( SensitiveUrl::parse(&format!( @@ -229,8 +178,8 @@ impl ApiTester { _server_shutdown: shutdown_tx, validator_keypairs: harness.validator_keypairs, network_rx, - local_enr: enr_clone, - external_peer_id: peer_id, + local_enr, + external_peer_id, } } @@ -270,57 +219,20 @@ impl ApiTester { let proposer_slashing = harness.make_proposer_slashing(2); let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); - let chain = Arc::new(harness.chain); - - let (network_tx, network_rx) = mpsc::unbounded_channel(); + let chain = harness.chain.clone(); let log = null_logger().unwrap(); - // Default metadata - let meta_data = MetaData { - seq_number: SEQ_NUMBER, - attnets: EnrBitfield::::default(), - }; - let enr_key = CombinedKey::generate_secp256k1(); - let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); - let enr_clone = enr.clone(); - let network_globals = NetworkGlobals::new(enr, TCP_PORT, UDP_PORT, meta_data, vec![], &log); - - let peer_id = PeerId::random(); - network_globals.peers.write().connect_ingoing( - &peer_id, - EXTERNAL_ADDR.parse().unwrap(), - None, - ); - - *network_globals.sync_state.write() = SyncState::Synced; - - let eth1_service = - eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()); - - let context = Arc::new(Context { - config: Config { - enabled: true, - listen_addr: Ipv4Addr::new(127, 0, 0, 1), - listen_port: 0, - allow_origin: None, - serve_legacy_spec: true, - }, - chain: Some(chain.clone()), - network_tx: Some(network_tx), - network_globals: Some(Arc::new(network_globals)), - eth1_service: Some(eth1_service), - log, - }); - let ctx = context.clone(); - let (shutdown_tx, shutdown_rx) = oneshot::channel(); - let server_shutdown = async { - // It's not really interesting why this triggered, just that it happened. - let _ = shutdown_rx.await; - }; - let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap(); + let ApiServer { + server, + listening_socket, + shutdown_tx, + network_rx, + local_enr, + external_peer_id, + } = create_api_server(chain.clone(), log); - tokio::spawn(async { server.await }); + tokio::spawn(server); let client = BeaconNodeHttpClient::new( SensitiveUrl::parse(&format!( @@ -344,8 +256,8 @@ impl ApiTester { _server_shutdown: shutdown_tx, validator_keypairs: harness.validator_keypairs, network_rx, - local_enr: enr_clone, - external_peer_id: peer_id, + local_enr, + external_peer_id, } } @@ -1009,13 +921,18 @@ impl ApiTester { } } - let json_result = self - .client - .get_beacon_blocks(block_id) - .await - .unwrap() - .map(|res| res.data); - assert_eq!(json_result, expected, "{:?}", block_id); + let json_result = self.client.get_beacon_blocks(block_id).await.unwrap(); + + if let (Some(json), Some(expected)) = (&json_result, &expected) { + assert_eq!(json.data, *expected, "{:?}", block_id); + assert_eq!( + json.version, + Some(expected.fork_name(&self.chain.spec).unwrap()) + ); + } else { + assert_eq!(json_result, None); + assert_eq!(expected, None); + } let ssz_result = self .client @@ -1023,6 +940,16 @@ impl ApiTester { .await .unwrap(); assert_eq!(ssz_result, expected, "{:?}", block_id); + + // Check that the legacy v1 API still works but doesn't return a version field. + let v1_result = self.client.get_beacon_blocks_v1(block_id).await.unwrap(); + if let (Some(v1_result), Some(expected)) = (&v1_result, &expected) { + assert_eq!(v1_result.version, None); + assert_eq!(v1_result.data, *expected); + } else { + assert_eq!(v1_result, None); + assert_eq!(expected, None); + } } self @@ -1338,6 +1265,7 @@ impl ApiTester { metadata: eth2::types::MetaData { seq_number: 0, attnets: "0x0000000000000000".to_string(), + syncnets: "0x00".to_string(), }, }; @@ -1440,23 +1368,44 @@ impl ApiTester { pub async fn test_get_debug_beacon_states(self) -> Self { for state_id in self.interesting_state_ids() { + let result_json = self.client.get_debug_beacon_states(state_id).await.unwrap(); + + let mut expected = self.get_state(state_id); + expected.as_mut().map(|state| state.drop_all_caches()); + + if let (Some(json), Some(expected)) = (&result_json, &expected) { + assert_eq!(json.data, *expected, "{:?}", state_id); + assert_eq!( + json.version, + Some(expected.fork_name(&self.chain.spec).unwrap()) + ); + } else { + assert_eq!(result_json, None); + assert_eq!(expected, None); + } + + // Check SSZ API. let result_ssz = self .client .get_debug_beacon_states_ssz(state_id, &self.chain.spec) .await .unwrap(); - let result_json = self + assert_eq!(result_ssz, expected, "{:?}", state_id); + + // Check legacy v1 API. + let result_v1 = self .client - .get_debug_beacon_states(state_id) + .get_debug_beacon_states_v1(state_id) .await - .unwrap() - .map(|res| res.data); - - let mut expected = self.get_state(state_id); - expected.as_mut().map(|state| state.drop_all_caches()); + .unwrap(); - assert_eq!(result_ssz, expected, "{:?}", state_id); - assert_eq!(result_json, expected, "{:?}", state_id); + if let (Some(json), Some(expected)) = (&result_v1, &expected) { + assert_eq!(json.version, None); + assert_eq!(json.data, *expected, "{:?}", state_id); + } else { + assert_eq!(result_v1, None); + assert_eq!(expected, None); + } } self diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 63990a54c88..b7d2191e23d 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -15,6 +15,7 @@ slog-term = "2.6.0" slog-async = "2.5.0" logging = { path = "../../common/logging" } environment = { path = "../../lighthouse/environment" } +discv5 = { version = "0.1.0-beta.3" } [dependencies] beacon_chain = { path = "../beacon_chain" } diff --git a/beacon_node/network/src/attestation_service/tests/mod.rs b/beacon_node/network/src/attestation_service/tests/mod.rs deleted file mode 100644 index 55811a1917e..00000000000 --- a/beacon_node/network/src/attestation_service/tests/mod.rs +++ /dev/null @@ -1,429 +0,0 @@ -use super::*; -use beacon_chain::{ - builder::{BeaconChainBuilder, Witness}, - eth1_chain::CachingEth1Backend, -}; -use futures::Stream; -use genesis::{generate_deterministic_keypairs, interop_genesis_state}; -use lazy_static::lazy_static; -use matches::assert_matches; -use slog::Logger; -use sloggers::{null::NullLoggerBuilder, Build}; -use slot_clock::{SlotClock, SystemTimeSlotClock}; -use std::time::{Duration, SystemTime}; -use store::config::StoreConfig; -use store::{HotColdDB, MemoryStore}; -use types::{CommitteeIndex, EthSpec, MinimalEthSpec}; - -const SLOT_DURATION_MILLIS: u64 = 400; - -type TestBeaconChainType = Witness< - SystemTimeSlotClock, - CachingEth1Backend, - MinimalEthSpec, - MemoryStore, - MemoryStore, ->; - -pub struct TestBeaconChain { - chain: Arc>, -} - -impl TestBeaconChain { - pub fn new_with_system_clock() -> Self { - let spec = MinimalEthSpec::default_spec(); - - let keypairs = generate_deterministic_keypairs(1); - - let log = get_logger(); - let store = - HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone()).unwrap(); - - let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - - let chain = Arc::new( - BeaconChainBuilder::new(MinimalEthSpec) - .logger(log.clone()) - .custom_spec(spec.clone()) - .store(Arc::new(store)) - .genesis_state( - interop_genesis_state::(&keypairs, 0, &spec) - .expect("should generate interop state"), - ) - .expect("should build state using recent genesis") - .dummy_eth1_backend() - .expect("should build dummy backend") - .slot_clock(SystemTimeSlotClock::new( - Slot::new(0), - Duration::from_secs(recent_genesis_time()), - Duration::from_millis(SLOT_DURATION_MILLIS), - )) - .shutdown_sender(shutdown_tx) - .monitor_validators(true, vec![], log) - .build() - .expect("should build"), - ); - Self { chain } - } -} - -pub fn recent_genesis_time() -> u64 { - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs() -} - -fn get_logger() -> Logger { - NullLoggerBuilder.build().expect("logger should build") -} - -lazy_static! { - static ref CHAIN: TestBeaconChain = TestBeaconChain::new_with_system_clock(); -} - -fn get_attestation_service() -> AttestationService { - let log = get_logger(); - let config = NetworkConfig::default(); - - let beacon_chain = CHAIN.chain.clone(); - - AttestationService::new(beacon_chain, &config, &log) -} - -fn get_subscription( - validator_index: u64, - attestation_committee_index: CommitteeIndex, - slot: Slot, - committee_count_at_slot: u64, -) -> ValidatorSubscription { - let is_aggregator = true; - ValidatorSubscription { - validator_index, - attestation_committee_index, - slot, - committee_count_at_slot, - is_aggregator, - } -} - -fn get_subscriptions( - validator_count: u64, - slot: Slot, - committee_count_at_slot: u64, -) -> Vec { - (0..validator_count) - .map(|validator_index| { - get_subscription( - validator_index, - validator_index, - slot, - committee_count_at_slot, - ) - }) - .collect() -} - -// gets a number of events from the subscription service, or returns none if it times out after a number -// of slots -async fn get_events + Unpin>( - stream: &mut S, - num_events: Option, - num_slots_before_timeout: u32, -) -> Vec { - let mut events = Vec::new(); - - let collect_stream_fut = async { - loop { - if let Some(result) = stream.next().await { - events.push(result); - if let Some(num) = num_events { - if events.len() == num { - return; - } - } - } - } - }; - - tokio::select! { - _ = collect_stream_fut => {events} - _ = tokio::time::sleep( - Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout, - ) => { events } - } -} - -#[tokio::test] -async fn subscribe_current_slot_wait_for_unsubscribe() { - // subscription config - let validator_index = 1; - let committee_index = 1; - // Keep a low subscription slot so that there are no additional subnet discovery events. - let subscription_slot = 0; - let committee_count = 1; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let subscriptions = vec![get_subscription( - validator_index, - committee_index, - current_slot + Slot::new(subscription_slot), - committee_count, - )]; - - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions) - .unwrap(); - - // not enough time for peer discovery, just subscribe, unsubscribe - let subnet_id = SubnetId::compute_subnet::( - current_slot + Slot::new(subscription_slot), - committee_index, - committee_count, - &attestation_service.beacon_chain.spec, - ) - .unwrap(); - let expected = vec![ - AttServiceMessage::Subscribe(subnet_id), - AttServiceMessage::Unsubscribe(subnet_id), - ]; - - // Wait for 1 slot duration to get the unsubscription event - let events = get_events(&mut attestation_service, None, 1).await; - assert_matches!( - events[..3], - [ - AttServiceMessage::DiscoverPeers(_), - AttServiceMessage::Subscribe(_any1), - AttServiceMessage::EnrAdd(_any3) - ] - ); - - // If the long lived and short lived subnets are the same, there should be no more events - // as we don't resubscribe already subscribed subnets. - if !attestation_service.random_subnets.contains(&subnet_id) { - assert_eq!(expected[..], events[3..]); - } - // Should be subscribed to only 1 long lived subnet after unsubscription. - assert_eq!(attestation_service.subscription_count(), 1); -} - -/// Test to verify that we are not unsubscribing to a subnet before a required subscription. -#[tokio::test] -async fn test_same_subnet_unsubscription() { - // subscription config - let validator_index = 1; - let committee_count = 1; - - // Makes 2 validator subscriptions to the same subnet but at different slots. - // There should be just 1 unsubscription event for the later slot subscription (subscription_slot2). - let subscription_slot1 = 0; - let subscription_slot2 = 1; - let com1 = 1; - let com2 = 0; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let sub1 = get_subscription( - validator_index, - com1, - current_slot + Slot::new(subscription_slot1), - committee_count, - ); - - let sub2 = get_subscription( - validator_index, - com2, - current_slot + Slot::new(subscription_slot2), - committee_count, - ); - - let subnet_id1 = SubnetId::compute_subnet::( - current_slot + Slot::new(subscription_slot1), - com1, - committee_count, - &attestation_service.beacon_chain.spec, - ) - .unwrap(); - - let subnet_id2 = SubnetId::compute_subnet::( - current_slot + Slot::new(subscription_slot2), - com2, - committee_count, - &attestation_service.beacon_chain.spec, - ) - .unwrap(); - - // Assert that subscriptions are different but their subnet is the same - assert_ne!(sub1, sub2); - assert_eq!(subnet_id1, subnet_id2); - - // submit the subscriptions - attestation_service - .validator_subscriptions(vec![sub1, sub2]) - .unwrap(); - - // Unsubscription event should happen at slot 2 (since subnet id's are the same, unsubscription event should be at higher slot + 1) - // Get all events for 1 slot duration (unsubscription event should happen after 2 slot durations). - let events = get_events(&mut attestation_service, None, 1).await; - assert_matches!( - events[..3], - [ - AttServiceMessage::DiscoverPeers(_), - AttServiceMessage::Subscribe(_any1), - AttServiceMessage::EnrAdd(_any3) - ] - ); - - let expected = AttServiceMessage::Subscribe(subnet_id1); - - // Should be still subscribed to 1 long lived and 1 short lived subnet if both are different. - if !attestation_service.random_subnets.contains(&subnet_id1) { - assert_eq!(expected, events[3]); - assert_eq!(attestation_service.subscription_count(), 2); - } else { - assert_eq!(attestation_service.subscription_count(), 1); - } - - // Get event for 1 more slot duration, we should get the unsubscribe event now. - let unsubscribe_event = get_events(&mut attestation_service, None, 1).await; - - // If the long lived and short lived subnets are different, we should get an unsubscription event. - if !attestation_service.random_subnets.contains(&subnet_id1) { - assert_eq!( - [AttServiceMessage::Unsubscribe(subnet_id1)], - unsubscribe_event[..] - ); - } - - // Should be subscribed to only 1 long lived subnet after unsubscription. - assert_eq!(attestation_service.subscription_count(), 1); -} - -#[tokio::test] -async fn subscribe_all_random_subnets() { - let attestation_subnet_count = MinimalEthSpec::default_spec().attestation_subnet_count; - let subscription_slot = 10; - let subscription_count = attestation_subnet_count; - let committee_count = 1; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let subscriptions = get_subscriptions( - subscription_count, - current_slot + subscription_slot, - committee_count, - ); - - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions) - .unwrap(); - - let events = get_events(&mut attestation_service, None, 3).await; - let mut discover_peer_count = 0; - let mut enr_add_count = 0; - let mut unexpected_msg_count = 0; - - for event in &events { - match event { - AttServiceMessage::DiscoverPeers(_) => discover_peer_count += 1, - AttServiceMessage::Subscribe(_any_subnet) => {} - AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1, - _ => unexpected_msg_count += 1, - } - } - - // The bulk discovery request length should be equal to validator_count - let bulk_discovery_event = events.last().unwrap(); - if let AttServiceMessage::DiscoverPeers(d) = bulk_discovery_event { - assert_eq!(d.len(), attestation_subnet_count as usize); - } else { - panic!("Unexpected event {:?}", bulk_discovery_event); - } - - // 64 `DiscoverPeer` requests of length 1 corresponding to random subnets - // and 1 `DiscoverPeer` request corresponding to bulk subnet discovery. - assert_eq!(discover_peer_count, subscription_count + 1); - assert_eq!(attestation_service.subscription_count(), 64); - assert_eq!(enr_add_count, 64); - assert_eq!(unexpected_msg_count, 0); - // test completed successfully -} - -#[tokio::test] -async fn subscribe_all_random_subnets_plus_one() { - let attestation_subnet_count = MinimalEthSpec::default_spec().attestation_subnet_count; - let subscription_slot = 10; - // the 65th subscription should result in no more messages than the previous scenario - let subscription_count = attestation_subnet_count + 1; - let committee_count = 1; - - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); - let current_slot = attestation_service - .beacon_chain - .slot_clock - .now() - .expect("Could not get current slot"); - - let subscriptions = get_subscriptions( - subscription_count, - current_slot + subscription_slot, - committee_count, - ); - - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions) - .unwrap(); - - let events = get_events(&mut attestation_service, None, 3).await; - let mut discover_peer_count = 0; - let mut enr_add_count = 0; - let mut unexpected_msg_count = 0; - - for event in &events { - match event { - AttServiceMessage::DiscoverPeers(_) => discover_peer_count += 1, - AttServiceMessage::Subscribe(_any_subnet) => {} - AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1, - _ => unexpected_msg_count += 1, - } - } - - // The bulk discovery request length shouldn't exceed max attestation_subnet_count - let bulk_discovery_event = events.last().unwrap(); - if let AttServiceMessage::DiscoverPeers(d) = bulk_discovery_event { - assert_eq!(d.len(), attestation_subnet_count as usize); - } else { - panic!("Unexpected event {:?}", bulk_discovery_event); - } - // 64 `DiscoverPeer` requests of length 1 corresponding to random subnets - // and 1 `DiscoverPeer` request corresponding to the bulk subnet discovery. - // For the 65th subscription, the call to `subscribe_to_random_subnets` is not made because we are at capacity. - assert_eq!(discover_peer_count, 64 + 1); - assert_eq!(attestation_service.subscription_count(), 64); - assert_eq!(enr_add_count, 64); - assert_eq!(unexpected_msg_count, 0); -} diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index fb8fbc9dcd0..91f20e68c81 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -57,7 +57,8 @@ use task_executor::TaskExecutor; use tokio::sync::{mpsc, oneshot}; use types::{ Attestation, AttesterSlashing, Hash256, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedVoluntaryExit, SubnetId, + SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; use work_reprocessing_queue::{ spawn_reprocess_scheduler, QueuedAggregate, QueuedBlock, QueuedUnaggregate, ReadyWork, @@ -121,6 +122,14 @@ const MAX_GOSSIP_PROPOSER_SLASHING_QUEUE_LEN: usize = 4_096; /// before we start dropping them. const MAX_GOSSIP_ATTESTER_SLASHING_QUEUE_LEN: usize = 4_096; +/// The maximum number of queued `SyncCommitteeMessage` objects that will be stored before we start dropping +/// them. +const MAX_SYNC_MESSAGE_QUEUE_LEN: usize = 2048; + +/// The maximum number of queued `SignedContributionAndProof` objects that will be stored before we +/// start dropping them. +const MAX_SYNC_CONTRIBUTION_QUEUE_LEN: usize = 1024; + /// The maximum number of queued `SignedBeaconBlock` objects received from the network RPC that /// will be stored before we start dropping them. const MAX_RPC_BLOCK_QUEUE_LEN: usize = 1_024; @@ -160,6 +169,8 @@ pub const DELAYED_IMPORT_BLOCK: &str = "delayed_import_block"; pub const GOSSIP_VOLUNTARY_EXIT: &str = "gossip_voluntary_exit"; pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing"; pub const GOSSIP_ATTESTER_SLASHING: &str = "gossip_attester_slashing"; +pub const GOSSIP_SYNC_SIGNATURE: &str = "gossip_sync_signature"; +pub const GOSSIP_SYNC_CONTRIBUTION: &str = "gossip_sync_contribution"; pub const RPC_BLOCK: &str = "rpc_block"; pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const STATUS_PROCESSING: &str = "status_processing"; @@ -327,6 +338,44 @@ impl WorkEvent { } } + /// Create a new `Work` event for some sync committee signature. + pub fn gossip_sync_signature( + message_id: MessageId, + peer_id: PeerId, + sync_signature: SyncCommitteeMessage, + subnet_id: SyncSubnetId, + seen_timestamp: Duration, + ) -> Self { + Self { + drop_during_sync: true, + work: Work::GossipSyncSignature { + message_id, + peer_id, + sync_signature: Box::new(sync_signature), + subnet_id, + seen_timestamp, + }, + } + } + + /// Create a new `Work` event for some sync committee contribution. + pub fn gossip_sync_contribution( + message_id: MessageId, + peer_id: PeerId, + sync_contribution: SignedContributionAndProof, + seen_timestamp: Duration, + ) -> Self { + Self { + drop_during_sync: true, + work: Work::GossipSyncContribution { + message_id, + peer_id, + sync_contribution: Box::new(sync_contribution), + seen_timestamp, + }, + } + } + /// Create a new `Work` event for some exit. pub fn gossip_voluntary_exit( message_id: MessageId, @@ -553,6 +602,19 @@ pub enum Work { peer_id: PeerId, attester_slashing: Box>, }, + GossipSyncSignature { + message_id: MessageId, + peer_id: PeerId, + sync_signature: Box, + subnet_id: SyncSubnetId, + seen_timestamp: Duration, + }, + GossipSyncContribution { + message_id: MessageId, + peer_id: PeerId, + sync_contribution: Box>, + seen_timestamp: Duration, + }, RpcBlock { block: Box>, result_tx: BlockResultSender, @@ -588,6 +650,8 @@ impl Work { Work::GossipVoluntaryExit { .. } => GOSSIP_VOLUNTARY_EXIT, Work::GossipProposerSlashing { .. } => GOSSIP_PROPOSER_SLASHING, Work::GossipAttesterSlashing { .. } => GOSSIP_ATTESTER_SLASHING, + Work::GossipSyncSignature { .. } => GOSSIP_SYNC_SIGNATURE, + Work::GossipSyncContribution { .. } => GOSSIP_SYNC_CONTRIBUTION, Work::RpcBlock { .. } => RPC_BLOCK, Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::Status { .. } => STATUS_PROCESSING, @@ -730,6 +794,9 @@ impl BeaconProcessor { let mut unknown_block_attestation_queue = LifoQueue::new(MAX_UNAGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN); + let mut sync_message_queue = LifoQueue::new(MAX_SYNC_MESSAGE_QUEUE_LEN); + let mut sync_contribution_queue = LifoQueue::new(MAX_SYNC_CONTRIBUTION_QUEUE_LEN); + // Using a FIFO queue for voluntary exits since it prevents exit censoring. I don't have // a strong feeling about queue type for exits. let mut gossip_voluntary_exit_queue = FifoQueue::new(MAX_GOSSIP_EXIT_QUEUE_LEN); @@ -859,6 +926,12 @@ impl BeaconProcessor { self.spawn_worker(item, toolbox); } else if let Some(item) = attestation_queue.pop() { self.spawn_worker(item, toolbox); + // Check sync committee messages after attestations as their rewards are lesser + // and they don't influence fork choice. + } else if let Some(item) = sync_contribution_queue.pop() { + self.spawn_worker(item, toolbox); + } else if let Some(item) = sync_message_queue.pop() { + self.spawn_worker(item, toolbox); // Aggregates and unaggregates queued for re-processing are older and we // care about fresher ones, so check those first. } else if let Some(item) = unknown_block_aggregate_queue.pop() { @@ -952,6 +1025,10 @@ impl BeaconProcessor { Work::GossipAttesterSlashing { .. } => { gossip_attester_slashing_queue.push(work, work_id, &self.log) } + Work::GossipSyncSignature { .. } => sync_message_queue.push(work), + Work::GossipSyncContribution { .. } => { + sync_contribution_queue.push(work) + } Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log), Work::ChainSegment { .. } => { chain_segment_queue.push(work, work_id, &self.log) @@ -985,6 +1062,14 @@ impl BeaconProcessor { &metrics::BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL, aggregate_queue.len() as i64, ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL, + sync_message_queue.len() as i64, + ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL, + sync_contribution_queue.len() as i64, + ); metrics::set_gauge( &metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL, gossip_block_queue.len() as i64, @@ -1188,6 +1273,36 @@ impl BeaconProcessor { peer_id, *attester_slashing, ), + /* + * Sync committee message verification. + */ + Work::GossipSyncSignature { + message_id, + peer_id, + sync_signature, + subnet_id, + seen_timestamp, + } => worker.process_gossip_sync_committee_signature( + message_id, + peer_id, + *sync_signature, + subnet_id, + seen_timestamp, + ), + /* + * Syn contribution verification. + */ + Work::GossipSyncContribution { + message_id, + peer_id, + sync_contribution, + seen_timestamp, + } => worker.process_sync_committee_contribution( + message_id, + peer_id, + *sync_contribution, + seen_timestamp, + ), /* * Verification for beacon blocks received during syncing via RPC. */ diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index 617b9a87153..0f491527b27 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -9,8 +9,12 @@ use beacon_chain::test_utils::{ }; use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use environment::{null_logger, Environment, EnvironmentBuilder}; -use eth2_libp2p::discv5::enr::{CombinedKey, EnrBuilder}; -use eth2_libp2p::{rpc::methods::MetaData, types::EnrBitfield, MessageId, NetworkGlobals, PeerId}; +use eth2_libp2p::{ + discv5::enr::{CombinedKey, EnrBuilder}, + rpc::methods::{MetaData, MetaDataV2}, + types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, + MessageId, NetworkGlobals, PeerId, +}; use slot_clock::SlotClock; use std::cmp; use std::iter::Iterator; @@ -19,8 +23,8 @@ use std::time::Duration; use tokio::runtime::Runtime; use tokio::sync::mpsc; use types::{ - test_utils::generate_deterministic_keypairs, Attestation, AttesterSlashing, MainnetEthSpec, - ProposerSlashing, SignedBeaconBlock, SignedVoluntaryExit, SubnetId, + test_utils::generate_deterministic_keypairs, Attestation, AttesterSlashing, EthSpec, + MainnetEthSpec, ProposerSlashing, SignedBeaconBlock, SignedVoluntaryExit, SubnetId, }; type E = MainnetEthSpec; @@ -67,9 +71,13 @@ impl Drop for TestRig { impl TestRig { pub fn new(chain_length: u64) -> Self { - let mut harness = BeaconChainHarness::new( + // This allows for testing voluntary exits without building out a massive chain. + let mut spec = E::default_spec(); + spec.shard_committee_period = 2; + + let harness = BeaconChainHarness::new( MainnetEthSpec, - None, + Some(spec), generate_deterministic_keypairs(VALIDATOR_COUNT), ); @@ -147,13 +155,7 @@ impl TestRig { let proposer_slashing = harness.make_proposer_slashing(2); let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); - // Changing this *after* the chain has been initialized is a bit cheeky, but it shouldn't - // cause issue. - // - // This allows for testing voluntary exits without building out a massive chain. - harness.chain.spec.shard_committee_period = 2; - - let chain = Arc::new(harness.chain); + let chain = harness.chain; let (network_tx, _network_rx) = mpsc::unbounded_channel(); @@ -163,10 +165,11 @@ impl TestRig { let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); // Default metadata - let meta_data = MetaData { + let meta_data = MetaData::V2(MetaDataV2 { seq_number: SEQ_NUMBER, - attnets: EnrBitfield::::default(), - }; + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }); let enr_key = CombinedKey::generate_secp256k1(); let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); let network_globals = Arc::new(NetworkGlobals::new( diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 0040a996f8b..4c71e3ca0dc 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -3,6 +3,7 @@ use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::{ attestation_verification::{Error as AttnError, SignatureVerifiedAttestation}, observed_operations::ObservationOutcome, + sync_committee_verification::Error as SyncCommitteeError, validator_monitor::get_block_delay_ms, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, }; @@ -14,7 +15,8 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, Hash256, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedVoluntaryExit, SubnetId, + SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; use super::{ @@ -688,6 +690,131 @@ impl Worker { } } + /// Process the sync committee signature received from the gossip network and: + /// + /// - If it passes gossip propagation criteria, tell the network thread to forward it. + /// - Attempt to add it to the naive aggregation pool. + /// + /// Raises a log if there are errors. + pub fn process_gossip_sync_committee_signature( + self, + message_id: MessageId, + peer_id: PeerId, + sync_signature: SyncCommitteeMessage, + subnet_id: SyncSubnetId, + _seen_timestamp: Duration, + ) { + let sync_signature = match self + .chain + .verify_sync_committee_message_for_gossip(sync_signature, subnet_id) + { + Ok(sync_signature) => sync_signature, + Err(e) => { + self.handle_sync_committee_message_failure( + peer_id, + message_id, + "sync_signature", + e, + ); + return; + } + }; + + /*TODO: + // Register the sync signature with any monitored validators. + self.chain + .validator_monitor + .read() + .register_gossip_unaggregated_attestation( + seen_timestamp, + attestation.indexed_attestation(), + &self.chain.slot_clock, + ); + */ + + // Indicate to the `Network` service that this message is valid and can be + // propagated on the gossip network. + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_SYNC_MESSAGE_VERIFIED_TOTAL); + + if let Err(e) = self + .chain + .add_to_naive_sync_aggregation_pool(sync_signature) + { + debug!( + self.log, + "Sync committee signature invalid for agg pool"; + "reason" => ?e, + "peer" => %peer_id, + ) + } + + metrics::inc_counter(&metrics::BEACON_PROCESSOR_SYNC_MESSAGE_IMPORTED_TOTAL); + } + + /// Process the sync committee contribution received from the gossip network and: + /// + /// - If it passes gossip propagation criteria, tell the network thread to forward it. + /// - Attempt to add it to the block inclusion pool. + /// + /// Raises a log if there are errors. + pub fn process_sync_committee_contribution( + self, + message_id: MessageId, + peer_id: PeerId, + sync_contribution: SignedContributionAndProof, + _seen_timestamp: Duration, + ) { + let sync_contribution = match self + .chain + .verify_sync_contribution_for_gossip(sync_contribution) + { + Ok(sync_contribution) => sync_contribution, + Err(e) => { + // Report the failure to gossipsub + self.handle_sync_committee_message_failure( + peer_id, + message_id, + "sync_contribution", + e, + ); + return; + } + }; + + // Indicate to the `Network` service that this message is valid and can be + // propagated on the gossip network. + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + /* TODO + // Register the attestation with any monitored validators. + self.chain + .validator_monitor + .read() + .register_gossip_aggregated_attestation( + seen_timestamp, + aggregate.aggregate(), + aggregate.indexed_attestation(), + &self.chain.slot_clock, + ); + metrics::inc_counter(&metrics::BEACON_PROCESSOR_AGGREGATED_ATTESTATION_VERIFIED_TOTAL); + */ + + if let Err(e) = self + .chain + .add_contribution_to_block_inclusion_pool(sync_contribution) + { + debug!( + self.log, + "Sync contribution invalid for op pool"; + "reason" => ?e, + "peer" => %peer_id, + ) + } + metrics::inc_counter(&metrics::BEACON_PROCESSOR_SYNC_CONTRIBUTION_IMPORTED_TOTAL); + } + /// Handle an error whilst verifying an `Attestation` or `SignedAggregateAndProof` from the /// network. fn handle_attestation_verification_failure( @@ -740,8 +867,7 @@ impl Worker { /* * The aggregate had no signatures and is therefore worthless. * - * Whilst we don't gossip this attestation, this act is **not** a clear - * violation of the spec nor indication of fault. + * This is forbidden by the p2p spec. Reject the message. * */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); @@ -1079,4 +1205,242 @@ impl Worker { "type" => ?attestation_type, ); } + + /// Handle an error whilst verifying a `SyncCommitteeMessage` or `SignedContributionAndProof` from the + /// network. + pub fn handle_sync_committee_message_failure( + &self, + peer_id: PeerId, + message_id: MessageId, + message_type: &str, + error: SyncCommitteeError, + ) { + metrics::register_sync_committee_error(&error); + + match &error { + SyncCommitteeError::FutureSlot { .. } | SyncCommitteeError::PastSlot { .. } => { + /* + * These errors can be triggered by a mismatch between our slot and the peer. + * + * + * The peer has published an invalid consensus message, _only_ if we trust our own clock. + */ + trace!( + self.log, + "Sync committee message is not within the last MAXIMUM_GOSSIP_CLOCK_DISPARITY slots"; + "peer_id" => %peer_id, + "type" => ?message_type, + ); + + // Peers that are slow or not to spec can spam us with these messages draining our + // bandwidth. We therefore penalize these peers when they do this. + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + + // Do not propagate these messages. + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + SyncCommitteeError::EmptyAggregationBitfield => { + /* + * The aggregate had no signatures and is therefore worthless. + * + * This is forbidden by the p2p spec. Reject the message. + * + */ + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::InvalidSelectionProof { .. } + | SyncCommitteeError::InvalidSignature => { + /* + * These errors are caused by invalid signatures. + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::AggregatorNotInCommittee { .. } + | SyncCommitteeError::AggregatorPubkeyUnknown(_) => { + /* + * The aggregator is not in the committee for the given `ContributionAndSync` OR + The aggregator index was higher than any known validator index + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::SyncContributionAlreadyKnown(_) + | SyncCommitteeError::AggregatorAlreadyKnown(_) => { + /* + * The sync committee message already been observed on the network or in + * a block. + * + * The peer is not necessarily faulty. + */ + trace!( + self.log, + "Sync committee message is already known"; + "peer_id" => %peer_id, + "type" => ?message_type, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return; + } + SyncCommitteeError::UnknownValidatorIndex(_) => { + /* + * The aggregator index (or similar field) was higher than the maximum + * possible number of validators. + * + * The peer has published an invalid consensus message. + */ + debug!( + self.log, + "Validation Index too high"; + "peer_id" => %peer_id, + "type" => ?message_type, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::UnknownValidatorPubkey(_) => { + debug!( + self.log, + "Validator pubkey is unknown"; + "peer_id" => %peer_id, + "type" => ?message_type, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::InvalidSubnetId { received, expected } => { + /* + * The sync committee message was received on an incorrect subnet id. + */ + debug!( + self.log, + "Received sync committee message on incorrect subnet"; + "expected" => ?expected, + "received" => ?received, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::Invalid(_) => { + /* + * The sync committee message failed the state_processing verification. + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::PriorSyncCommitteeMessageKnown { .. } => { + /* + * We have already seen a sync committee message from this validator for this epoch. + * + * The peer is not necessarily faulty. + */ + debug!( + self.log, + "Prior sync committee message known"; + "peer_id" => %peer_id, + "type" => ?message_type, + ); + // We still penalize the peer slightly. We don't want this to be a recurring + // behaviour. + self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + + return; + } + SyncCommitteeError::BeaconChainError(e) => { + /* + * Lighthouse hit an unexpected error whilst processing the sync committee message. It + * should be impossible to trigger a `BeaconChainError` from the network, + * so we have a bug. + * + * It's not clear if the message is invalid/malicious. + */ + error!( + self.log, + "Unable to validate sync committee message"; + "peer_id" => %peer_id, + "error" => ?e, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + // Penalize the peer slightly + self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + } + SyncCommitteeError::BeaconStateError(e) => { + /* + * Lighthouse hit an unexpected error whilst processing the sync committee message. It + * should be impossible to trigger a `BeaconStateError` from the network, + * so we have a bug. + * + * It's not clear if the message is invalid/malicious. + */ + error!( + self.log, + "Unable to validate sync committee message"; + "peer_id" => %peer_id, + "error" => ?e, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + // Penalize the peer slightly + self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + } + SyncCommitteeError::ContributionError(e) => { + error!( + self.log, + "Error while processing sync contribution"; + "peer_id" => %peer_id, + "error" => ?e, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + // Penalize the peer slightly + self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + } + SyncCommitteeError::SyncCommitteeError(e) => { + error!( + self.log, + "Error while processing sync committee message"; + "peer_id" => %peer_id, + "error" => ?e, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + // Penalize the peer slightly + self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + } + SyncCommitteeError::ArithError(e) => { + /* + This would most likely imply incompatible configs or an invalid message. + */ + error!( + self.log, + "Arithematic error while processing sync committee message"; + "peer_id" => %peer_id, + "error" => ?e, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + SyncCommitteeError::InvalidSubcommittee { .. } => { + /* + The subcommittee index is higher than `SYNC_COMMITTEE_SUBNET_COUNT`. This would imply + an invalid message. + */ + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + } + debug!( + self.log, + "Invalid sync committee message from network"; + "reason" => ?error, + "peer_id" => %peer_id, + "type" => ?message_type, + ); + } } diff --git a/beacon_node/network/src/beacon_processor/worker/mod.rs b/beacon_node/network/src/beacon_processor/worker/mod.rs index ec2cce0ee6f..58fec22d5f5 100644 --- a/beacon_node/network/src/beacon_processor/worker/mod.rs +++ b/beacon_node/network/src/beacon_processor/worker/mod.rs @@ -27,8 +27,8 @@ impl Worker { /// Creates a log if there is an internal error. fn send_sync_message(&self, message: SyncMessage) { self.sync_tx.send(message).unwrap_or_else(|e| { - debug!(self.log, "Could not send message to the sync service, likely shutdown"; - "error" => %e) + debug!(self.log, "Could not send message to the sync service"; + "error" => %e) }); } diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 31bad7a3441..934442e1285 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -6,7 +6,6 @@ pub mod error; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod service; -mod attestation_service; mod beacon_processor; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy mod metrics; @@ -14,6 +13,7 @@ mod nat; mod persisted_dht; mod router; mod status; +mod subnet_service; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy mod sync; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index bc0537e28e6..7ffce125422 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -1,4 +1,7 @@ -use beacon_chain::attestation_verification::Error as AttnError; +use beacon_chain::{ + attestation_verification::Error as AttnError, + sync_committee_verification::Error as SyncCommitteeError, +}; use eth2_libp2p::PubsubMessage; use eth2_libp2p::{ types::GossipKind, BandwidthSinks, GossipTopic, Gossipsub, NetworkGlobals, TopicHash, @@ -7,7 +10,10 @@ use fnv::FnvHashMap; pub use lighthouse_metrics::*; use std::{collections::HashMap, sync::Arc}; use strum::AsStaticRef; -use types::{subnet_id::subnet_id_to_string, EthSpec}; +use types::{ + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, subnet_id::subnet_id_to_string, + sync_subnet_id::sync_subnet_id_to_string, EthSpec, +}; lazy_static! { @@ -20,15 +26,27 @@ lazy_static! { &["protocol"] ); - pub static ref GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_subscribed_subnets", - "Subnets currently subscribed to", + pub static ref GOSSIPSUB_SUBSCRIBED_ATTESTATION_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_subscribed_attestation_subnets", + "Attestation subnets currently subscribed to", + &["subnet"] + ); + + pub static ref GOSSIPSUB_SUBSCRIBED_SYNC_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_subscribed_sync_subnets", + "Sync subnets currently subscribed to", &["subnet"] ); - pub static ref GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_peers_per_subnet_topic_count", - "Peers subscribed per subnet topic", + pub static ref GOSSIPSUB_SUBSCRIBED_PEERS_ATTESTATION_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_peers_per_attestation_subnet_topic_count", + "Peers subscribed per attestation subnet topic", + &["subnet"] + ); + + pub static ref GOSSIPSUB_SUBSCRIBED_PEERS_SYNC_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_peers_per_sync_subnet_topic_count", + "Peers subscribed per sync subnet topic", &["subnet"] ); @@ -38,7 +56,13 @@ lazy_static! { &["topic_hash"] ); - pub static ref MESH_PEERS_PER_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + pub static ref MESH_PEERS_PER_ATTESTATION_SUBNET_TOPIC: Result = try_create_int_gauge_vec( + "gossipsub_mesh_peers_per_subnet_topic", + "Mesh peers per subnet topic", + &["subnet"] + ); + + pub static ref MESH_PEERS_PER_SYNC_SUBNET_TOPIC: Result = try_create_int_gauge_vec( "gossipsub_mesh_peers_per_subnet_topic", "Mesh peers per subnet topic", &["subnet"] @@ -50,9 +74,15 @@ lazy_static! { &["topic_hash"] ); - pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC: Result = try_create_float_gauge_vec( - "gossipsub_avg_peer_score_per_subnet_topic", - "Average peer's score per subnet topic", + pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC: Result = try_create_float_gauge_vec( + "gossipsub_avg_peer_score_per_attestation_subnet_topic", + "Average peer's score per attestation subnet topic", + &["subnet"] + ); + + pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC: Result = try_create_float_gauge_vec( + "gossipsub_avg_peer_score_per_sync_subnet_topic", + "Average peer's score per sync committee subnet topic", &["subnet"] ); @@ -133,6 +163,14 @@ lazy_static! { "gossipsub_aggregated_attestations_rx_total", "Count of gossip aggregated attestations received" ); + pub static ref GOSSIP_SYNC_COMMITTEE_MESSAGE_RX: Result = try_create_int_counter( + "gossipsub_sync_committee_message_rx_total", + "Count of gossip sync committee messages received" + ); + pub static ref GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_RX: Result = try_create_int_counter( + "gossipsub_sync_committee_contribution_received_total", + "Count of gossip sync committee contributions received" + ); /* @@ -150,19 +188,35 @@ lazy_static! { "gossipsub_aggregated_attestations_tx_total", "Count of gossip aggregated attestations transmitted" ); + pub static ref GOSSIP_SYNC_COMMITTEE_MESSAGE_TX: Result = try_create_int_counter( + "gossipsub_sync_committee_message_tx_total", + "Count of gossip sync committee messages transmitted" + ); + pub static ref GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_TX: Result = try_create_int_counter( + "gossipsub_sync_committee_contribution_tx_total", + "Count of gossip sync committee contributions transmitted" + ); /* * Attestation subnet subscriptions */ pub static ref SUBNET_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( - "gossipsub_subnet_subscriptions_total", - "Count of validator subscription requests." + "gossipsub_attestation_subnet_subscriptions_total", + "Count of validator attestation subscription requests." ); pub static ref SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS: Result = try_create_int_counter( "gossipsub_subnet_subscriptions_aggregator_total", "Count of validator subscription requests where the subscriber is an aggregator." ); + /* + * Sync committee subnet subscriptions + */ + pub static ref SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( + "gossipsub_sync_committee_subnet_subscriptions_total", + "Count of validator sync committee subscription requests." + ); + /* * Gossip processor */ @@ -322,6 +376,33 @@ lazy_static! { "beacon_processor_aggregated_attestation_requeued_total", "Total number of aggregated attestations that referenced an unknown block and were re-queued." ); + // Sync committee messages. + pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_sync_message_queue_total", + "Count of sync committee messages waiting to be processed." + ); + pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_VERIFIED_TOTAL: Result = try_create_int_counter( + "beacon_processor_sync_message_verified_total", + "Total number of sync committee messages verified for gossip." + ); + pub static ref BEACON_PROCESSOR_SYNC_MESSAGE_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_sync_message_imported_total", + "Total number of sync committee messages imported to fork choice, etc." + ); + // Sync contribution. + pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_sync_contribution_queue_total", + "Count of sync committee contributions waiting to be processed." + ); + pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_VERIFIED_TOTAL: Result = try_create_int_counter( + "beacon_processor_sync_contribution_verified_total", + "Total number of sync committee contributions verified for gossip." + ); + pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_IMPORTED_TOTAL: Result = try_create_int_counter( + "beacon_processor_sync_contribution_imported_total", + "Total number of sync committee contributions imported to fork choice, etc." + ); + } lazy_static! { @@ -331,6 +412,12 @@ lazy_static! { "Gossipsub attestation errors per error type", &["type"] ); + pub static ref GOSSIP_SYNC_COMMITTEE_ERRORS_PER_TYPE: Result = + try_create_int_counter_vec( + "gossipsub_sync_committee_errors_per_type", + "Gossipsub sync_committee errors per error type", + &["type"] + ); pub static ref INBOUND_LIBP2P_BYTES: Result = try_create_int_gauge("libp2p_inbound_bytes", "The inbound bandwidth over libp2p"); pub static ref OUTBOUND_LIBP2P_BYTES: Result = try_create_int_gauge( @@ -402,6 +489,10 @@ pub fn register_attestation_error(error: &AttnError) { inc_counter_vec(&GOSSIP_ATTESTATION_ERRORS_PER_TYPE, &[error.as_ref()]); } +pub fn register_sync_committee_error(error: &SyncCommitteeError) { + inc_counter_vec(&GOSSIP_SYNC_COMMITTEE_ERRORS_PER_TYPE, &[error.as_ref()]); +} + /// Inspects the `messages` that were being sent to the network and updates Prometheus metrics. pub fn expose_publish_metrics(messages: &[PubsubMessage]) { for message in messages { @@ -417,6 +508,12 @@ pub fn expose_publish_metrics(messages: &[PubsubMessage]) { PubsubMessage::AggregateAndProofAttestation(_) => { inc_counter(&GOSSIP_AGGREGATED_ATTESTATIONS_TX) } + PubsubMessage::SyncCommitteeMessage(_) => { + inc_counter(&GOSSIP_SYNC_COMMITTEE_MESSAGE_TX) + } + PubsubMessage::SignedContributionAndProof(_) => { + inc_counter(&GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_TX) + } _ => {} } } @@ -430,6 +527,10 @@ pub fn expose_receive_metrics(message: &PubsubMessage) { PubsubMessage::AggregateAndProofAttestation(_) => { inc_counter(&GOSSIP_AGGREGATED_ATTESTATIONS_RX) } + PubsubMessage::SyncCommitteeMessage(_) => inc_counter(&GOSSIP_SYNC_COMMITTEE_MESSAGE_RX), + PubsubMessage::SignedContributionAndProof(_) => { + inc_counter(&GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_RX) + } _ => {} } } @@ -447,7 +548,10 @@ pub fn update_gossip_metrics( let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC .as_ref() .map(|gauge| gauge.reset()); - let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC + let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC + .as_ref() + .map(|gauge| gauge.reset()); + let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC .as_ref() .map(|gauge| gauge.reset()); @@ -478,30 +582,50 @@ pub fn update_gossip_metrics( // reset the mesh peers, showing all subnets for subnet_id in 0..T::default_spec().attestation_subnet_count { let _ = get_int_gauge( - &MESH_PEERS_PER_SUBNET_TOPIC, + &MESH_PEERS_PER_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id)], ) .map(|v| v.set(0)); let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC, + &GOSSIPSUB_SUBSCRIBED_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id)], ) .map(|v| v.set(0)); let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC, + &GOSSIPSUB_SUBSCRIBED_PEERS_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id)], ) .map(|v| v.set(0)); } + for subnet_id in 0..SYNC_COMMITTEE_SUBNET_COUNT { + let _ = get_int_gauge( + &MESH_PEERS_PER_SYNC_SUBNET_TOPIC, + &[sync_subnet_id_to_string(subnet_id)], + ) + .map(|v| v.set(0)); + + let _ = get_int_gauge( + &GOSSIPSUB_SUBSCRIBED_SYNC_SUBNET_TOPIC, + &[sync_subnet_id_to_string(subnet_id)], + ) + .map(|v| v.set(0)); + + let _ = get_int_gauge( + &GOSSIPSUB_SUBSCRIBED_PEERS_SYNC_SUBNET_TOPIC, + &[sync_subnet_id_to_string(subnet_id)], + ) + .map(|v| v.set(0)); + } + // Subnet topics subscribed to for topic_hash in gossipsub.topics() { if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { if let GossipKind::Attestation(subnet_id) = topic.kind() { let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC, + &GOSSIPSUB_SUBSCRIBED_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id.into())], ) .map(|v| v.set(1)); @@ -519,7 +643,7 @@ pub fn update_gossip_metrics( match topic.kind() { GossipKind::Attestation(subnet_id) => { if let Some(v) = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC, + &GOSSIPSUB_SUBSCRIBED_PEERS_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id.into())], ) { v.inc() @@ -528,13 +652,31 @@ pub fn update_gossip_metrics( // average peer scores if let Some(score) = gossipsub.peer_score(peer_id) { if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC, + &AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id.into())], ) { v.add(score) }; } } + GossipKind::SyncCommitteeMessage(subnet_id) => { + if let Some(v) = get_int_gauge( + &GOSSIPSUB_SUBSCRIBED_PEERS_SYNC_SUBNET_TOPIC, + &[sync_subnet_id_to_string(subnet_id.into())], + ) { + v.inc() + }; + + // average peer scores + if let Some(score) = gossipsub.peer_score(peer_id) { + if let Some(v) = get_gauge( + &AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC, + &[sync_subnet_id_to_string(subnet_id.into())], + ) { + v.add(score) + }; + } + } kind => { // main topics if let Some(score) = gossipsub.peer_score(peer_id) { @@ -557,12 +699,21 @@ pub fn update_gossip_metrics( GossipKind::Attestation(subnet_id) => { // average peer scores if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC, + &AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id.into())], ) { v.set(v.get() / (*peers as f64)) }; } + GossipKind::SyncCommitteeMessage(subnet_id) => { + // average peer scores + if let Some(v) = get_gauge( + &AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC, + &[sync_subnet_id_to_string(subnet_id.into())], + ) { + v.set(v.get() / (*peers as f64)) + }; + } kind => { // main topics if let Some(v) = @@ -582,12 +733,20 @@ pub fn update_gossip_metrics( match topic.kind() { GossipKind::Attestation(subnet_id) => { if let Some(v) = get_int_gauge( - &MESH_PEERS_PER_SUBNET_TOPIC, + &MESH_PEERS_PER_ATTESTATION_SUBNET_TOPIC, &[subnet_id_to_string(subnet_id.into())], ) { v.set(peers as i64) }; } + GossipKind::SyncCommitteeMessage(subnet_id) => { + if let Some(v) = get_int_gauge( + &MESH_PEERS_PER_SYNC_SUBNET_TOPIC, + &[sync_subnet_id_to_string(subnet_id.into())], + ) { + v.set(peers as i64) + }; + } kind => { // main topics if let Some(v) = get_int_gauge(&MESH_PEERS_PER_MAIN_TOPIC, &[kind.as_ref()]) { diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 8d9b1cd6247..5096a4bdc84 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -247,6 +247,31 @@ impl Router { self.processor .on_attester_slashing_gossip(id, peer_id, attester_slashing); } + PubsubMessage::SignedContributionAndProof(contribution_and_proof) => { + trace!( + self.log, + "Received sync committee aggregate"; + "peer_id" => %peer_id + ); + self.processor.on_sync_committee_contribution_gossip( + id, + peer_id, + *contribution_and_proof, + ); + } + PubsubMessage::SyncCommitteeMessage(sync_committtee_msg) => { + trace!( + self.log, + "Received sync committee signature"; + "peer_id" => %peer_id + ); + self.processor.on_sync_committee_signature_gossip( + id, + peer_id, + sync_committtee_msg.1, + sync_committtee_msg.0, + ); + } } } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 590d52da635..103ab85dc22 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -10,10 +10,11 @@ use slog::{debug, error, o, trace, warn}; use std::cmp; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use store::SyncCommitteeMessage; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, ChainSpec, EthSpec, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedVoluntaryExit, SubnetId, + Attestation, AttesterSlashing, EthSpec, ProposerSlashing, SignedAggregateAndProof, + SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId, }; /// Processes validated messages from the network. It relays necessary data to the syncing thread @@ -309,6 +310,36 @@ impl Processor { )) } + pub fn on_sync_committee_signature_gossip( + &mut self, + message_id: MessageId, + peer_id: PeerId, + sync_signature: SyncCommitteeMessage, + subnet_id: SyncSubnetId, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_signature( + message_id, + peer_id, + sync_signature, + subnet_id, + timestamp_now(), + )) + } + + pub fn on_sync_committee_contribution_gossip( + &mut self, + message_id: MessageId, + peer_id: PeerId, + sync_contribution: SignedContributionAndProof, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::gossip_sync_contribution( + message_id, + peer_id, + sync_contribution, + timestamp_now(), + )) + } + fn send_beacon_processor_work(&mut self, work: BeaconWorkEvent) { self.beacon_processor_send .try_send(work) @@ -328,10 +359,7 @@ pub(crate) fn status_message( beacon_chain: &BeaconChain, ) -> Result { let head_info = beacon_chain.head_info()?; - let genesis_validators_root = beacon_chain.genesis_validators_root; - - let fork_digest = - ChainSpec::compute_fork_digest(head_info.fork.current_version, genesis_validators_root); + let fork_digest = beacon_chain.enr_fork_id().fork_digest; Ok(StatusMessage { fork_digest, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 1f94c387d01..26c1e272fd0 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,38 +1,51 @@ use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; +use crate::subnet_service::SyncCommitteeService; +use crate::{error, metrics}; use crate::{ - attestation_service::{AttServiceMessage, AttestationService}, + subnet_service::{AttestationService, SubnetServiceMessage}, NetworkConfig, }; -use crate::{error, metrics}; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2_libp2p::{ rpc::{GoodbyeReason, RPCResponseErrorCode, RequestId}, - Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, + Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, Subnet, +}; +use eth2_libp2p::{ + types::{GossipEncoding, GossipTopic}, + BehaviourEvent, MessageId, NetworkGlobals, PeerId, }; -use eth2_libp2p::{types::GossipKind, BehaviourEvent, MessageId, NetworkGlobals, PeerId}; use eth2_libp2p::{MessageAcceptance, Service as LibP2PService}; +use futures::future::OptionFuture; use futures::prelude::*; -use slog::{debug, error, info, o, trace, warn}; -use std::{net::SocketAddr, sync::Arc, time::Duration}; +use slog::{crit, debug, error, info, o, trace, warn}; +use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; use store::HotColdDB; use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; -use types::{EthSpec, RelativeEpoch, SubnetId, Unsigned, ValidatorSubscription}; +use types::{ + EthSpec, ForkContext, ForkName, RelativeEpoch, SubnetId, SyncCommitteeSubscription, + SyncSubnetId, Unsigned, ValidatorSubscription, +}; mod tests; /// The interval (in seconds) that various network metrics will update. const METRIC_UPDATE_INTERVAL: u64 = 1; +/// Delay after a fork where we unsubscribe from pre-fork topics. +const UNSUBSCRIBE_DELAY_EPOCHS: u64 = 2; /// Types of messages that the network service can receive. #[derive(Debug)] pub enum NetworkMessage { /// Subscribes a list of validators to specific slots for attestation duties. - Subscribe { + AttestationSubscribe { subscriptions: Vec, }, + SyncCommitteeSubscribe { + subscriptions: Vec, + }, /// Subscribes the beacon node to the core gossipsub topics. We do this when we are either /// synced or close to the head slot. SubscribeCoreTopics, @@ -97,6 +110,8 @@ pub struct NetworkService { libp2p: LibP2PService, /// An attestation and subnet manager service. attestation_service: AttestationService, + /// A sync committeee subnet manager service. + sync_committee_service: SyncCommitteeService, /// The receiver channel for lighthouse to communicate with the network service. network_recv: mpsc::UnboundedReceiver>, /// The sending channel for the network service to send messages to be routed throughout @@ -113,7 +128,9 @@ pub struct NetworkService { /// update the UDP socket of discovery if the UPnP mappings get established. discovery_auto_update: bool, /// A delay that expires when a new fork takes place. - next_fork_update: Option, + next_fork_update: Pin>>, + /// A delay that expires when we need to unsubscribe from old fork topics. + next_unsubscribe: Pin>>, /// Subscribe to all the subnets once synced. subscribe_all_subnets: bool, /// A timer for updating various network metrics. @@ -121,6 +138,7 @@ pub struct NetworkService { /// gossipsub_parameter_update timer gossipsub_parameter_update: tokio::time::Interval, /// The logger for the network service. + fork_context: Arc, log: slog::Logger, } @@ -158,7 +176,19 @@ impl NetworkService { let enr_fork_id = beacon_chain.enr_fork_id(); // keep track of when our fork_id needs to be updated - let next_fork_update = next_fork_delay(&beacon_chain); + let next_fork_update = Box::pin(next_fork_delay(&beacon_chain).into()); + let next_unsubscribe = Box::pin(None.into()); + + let current_slot = beacon_chain + .slot() + .unwrap_or(beacon_chain.spec.genesis_slot); + + // Create a fork context for the given config and genesis validators root + let fork_context = Arc::new(ForkContext::new::( + current_slot, + beacon_chain.genesis_validators_root, + &beacon_chain.spec, + )); // launch libp2p service let (network_globals, mut libp2p) = LibP2PService::new( @@ -166,6 +196,7 @@ impl NetworkService { config, enr_fork_id, &network_log, + fork_context.clone(), &beacon_chain.spec, ) .await?; @@ -193,10 +224,14 @@ impl NetworkService { network_log.clone(), )?; - // attestation service + // attestation subnet service let attestation_service = AttestationService::new(beacon_chain.clone(), config, &network_log); + // sync committee subnet service + let sync_committee_service = + SyncCommitteeService::new(beacon_chain.clone(), config, &network_log); + // create a timer for updating network metrics let metrics_update = tokio::time::interval(Duration::from_secs(METRIC_UPDATE_INTERVAL)); @@ -209,6 +244,7 @@ impl NetworkService { beacon_chain, libp2p, attestation_service, + sync_committee_service, network_recv, router_send, store, @@ -216,9 +252,11 @@ impl NetworkService { upnp_mappings: (None, None), discovery_auto_update: config.discv5_config.enr_update, next_fork_update, + next_unsubscribe, subscribe_all_subnets: config.subscribe_all_subnets, metrics_update, gossipsub_parameter_update, + fork_context, log: network_log, }; @@ -226,6 +264,26 @@ impl NetworkService { Ok((network_globals, network_send)) } + + /// Returns the required fork digests that gossipsub needs to subscribe to based on the current slot. + /// + /// For `current_slot < fork_slot`, this function returns both the pre-fork and post-fork + /// digests since we should be subscribed to post fork topics before the fork. + pub fn required_gossip_fork_digests(&self) -> Vec<[u8; 4]> { + let fork_context = &self.fork_context; + match fork_context.current_fork() { + ForkName::Base => { + if fork_context.fork_exists(ForkName::Altair) { + fork_context.all_fork_digests() + } else { + vec![fork_context.genesis_context_bytes()] + } + } + ForkName::Altair => vec![fork_context + .to_context_bytes(ForkName::Altair) + .expect("Altair fork bytes should exist as it's initialized in ForkContext")], + } + } } fn spawn_service( @@ -363,42 +421,69 @@ fn spawn_service( } NetworkMessage::ReportPeer { peer_id, action, source } => service.libp2p.report_peer(&peer_id, action, source), NetworkMessage::GoodbyePeer { peer_id, reason, source } => service.libp2p.goodbye_peer(&peer_id, reason, source), - NetworkMessage::Subscribe { subscriptions } => { + NetworkMessage::AttestationSubscribe { subscriptions } => { if let Err(e) = service .attestation_service .validator_subscriptions(subscriptions) { - warn!(service.log, "Validator subscription failed"; "error" => e); + warn!(service.log, "Attestation validator subscription failed"; "error" => e); + } + } + NetworkMessage::SyncCommitteeSubscribe { subscriptions } => { + if let Err(e) = service + .sync_committee_service + .validator_subscriptions(subscriptions) { + warn!(service.log, "Sync committee calidator subscription failed"; "error" => e); } } NetworkMessage::SubscribeCoreTopics => { - let mut subscribed_topics: Vec = vec![]; - let already_subscribed = service.network_globals.gossipsub_subscriptions.read().clone(); - let already_subscribed = already_subscribed.iter().map(|x| x.kind()).collect::>(); - for topic_kind in eth2_libp2p::types::CORE_TOPICS.iter().filter(|topic| already_subscribed.get(topic).is_none()) { - if service.libp2p.swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) { - subscribed_topics.push(topic_kind.clone()); - } else { - warn!(service.log, "Could not subscribe to topic"; "topic" => %topic_kind); + let mut subscribed_topics: Vec = vec![]; + for topic_kind in eth2_libp2p::types::CORE_TOPICS.iter() { + for fork_digest in service.required_gossip_fork_digests() { + let topic = GossipTopic::new(topic_kind.clone(), GossipEncoding::default(), fork_digest); + if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); + } } } - // if we are to subscribe to all subnets we do it here + // If we are to subscribe to all subnets we do it here if service.subscribe_all_subnets { for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { - let subnet_id = SubnetId::new(subnet_id); - let topic_kind = eth2_libp2p::types::GossipKind::Attestation(subnet_id); - if service.libp2p.swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) { - // Update the ENR bitfield. - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet_id, true); - subscribed_topics.push(topic_kind.clone()); - } else { - warn!(service.log, "Could not subscribe to topic"; "topic" => %topic_kind); + let subnet = Subnet::Attestation(SubnetId::new(subnet_id)); + // Update the ENR bitfield + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); + for fork_digest in service.required_gossip_fork_digests() { + let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); + } + } } + for subnet_id in 0..<::EthSpec as EthSpec>::SyncCommitteeSubnetCount::to_u64() { + let subnet = Subnet::SyncCommittee(SyncSubnetId::new(subnet_id)); + // Update the ENR bitfield + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); + for fork_digest in service.required_gossip_fork_digests() { + let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); + } + } } } if !subscribed_topics.is_empty() { - info!(service.log, "Subscribed to topics"; "topics" => ?subscribed_topics); + info!( + service.log, + "Subscribed to topics"; + "topics" => ?subscribed_topics.into_iter().map(|topic| format!("{}", topic)).collect::>() + ); } } } @@ -406,19 +491,51 @@ fn spawn_service( // process any attestation service events Some(attestation_service_message) = service.attestation_service.next() => { match attestation_service_message { - AttServiceMessage::Subscribe(subnet_id) => { - service.libp2p.swarm.behaviour_mut().subscribe_to_subnet(subnet_id); + SubnetServiceMessage::Subscribe(subnet) => { + for fork_digest in service.required_gossip_fork_digests() { + let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + service.libp2p.swarm.behaviour_mut().subscribe(topic); + } + } + SubnetServiceMessage::Unsubscribe(subnet) => { + for fork_digest in service.required_gossip_fork_digests() { + let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + service.libp2p.swarm.behaviour_mut().unsubscribe(topic); + } + } + SubnetServiceMessage::EnrAdd(subnet) => { + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); + } + SubnetServiceMessage::EnrRemove(subnet) => { + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, false); + } + SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { + service.libp2p.swarm.behaviour_mut().discover_subnet_peers(subnets_to_discover); + } + } + } + // process any sync committee service events + Some(sync_committee_service_message) = service.sync_committee_service.next() => { + match sync_committee_service_message { + SubnetServiceMessage::Subscribe(subnet) => { + for fork_digest in service.required_gossip_fork_digests() { + let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + service.libp2p.swarm.behaviour_mut().subscribe(topic); + } } - AttServiceMessage::Unsubscribe(subnet_id) => { - service.libp2p.swarm.behaviour_mut().unsubscribe_from_subnet(subnet_id); + SubnetServiceMessage::Unsubscribe(subnet) => { + for fork_digest in service.required_gossip_fork_digests() { + let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + service.libp2p.swarm.behaviour_mut().unsubscribe(topic); + } } - AttServiceMessage::EnrAdd(subnet_id) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet_id, true); + SubnetServiceMessage::EnrAdd(subnet) => { + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); } - AttServiceMessage::EnrRemove(subnet_id) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet_id, false); + SubnetServiceMessage::EnrRemove(subnet) => { + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, false); } - AttServiceMessage::DiscoverPeers(subnets_to_discover) => { + SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { service.libp2p.swarm.behaviour_mut().discover_subnet_peers(subnets_to_discover); } } @@ -532,34 +649,57 @@ fn spawn_service( } } } - } + Some(_) = &mut service.next_fork_update => { + let new_enr_fork_id = service.beacon_chain.enr_fork_id(); + + let fork_context = &service.fork_context; + if let Some(new_fork_name) = fork_context.from_context_bytes(new_enr_fork_id.fork_digest) { + info!( + service.log, + "Updating enr fork version"; + "old_fork" => ?fork_context.current_fork(), + "new_fork" => ?new_fork_name, + ); + fork_context.update_current_fork(*new_fork_name); + + service + .libp2p + .swarm + .behaviour_mut() + .update_fork_version(new_enr_fork_id.clone()); + // Reinitialize the next_fork_update + service.next_fork_update = Box::pin(next_fork_delay(&service.beacon_chain).into()); + + // Set the next_unsubscribe delay. + let epoch_duration = service.beacon_chain.spec.seconds_per_slot * T::EthSpec::slots_per_epoch(); + let unsubscribe_delay = Duration::from_secs(UNSUBSCRIBE_DELAY_EPOCHS * epoch_duration); + service.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); + info!(service.log, "Network will unsubscribe from old fork gossip topics in a few epochs"; "remaining_epochs" => UNSUBSCRIBE_DELAY_EPOCHS); + } else { + crit!(service.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); + } - if let Some(delay) = &service.next_fork_update { - if delay.is_elapsed() { - service - .libp2p - .swarm - .behaviour_mut() - .update_fork_version(service.beacon_chain.enr_fork_id()); - service.next_fork_update = next_fork_delay(&service.beacon_chain); + } + Some(_) = &mut service.next_unsubscribe => { + let new_enr_fork_id = service.beacon_chain.enr_fork_id(); + service.libp2p.swarm.behaviour_mut().unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); + info!(service.log, "Unsubscribed from old fork topics"); + service.next_unsubscribe = Box::pin(None.into()); } } - metrics::update_bandwidth_metrics(service.libp2p.bandwidth.clone()); } }, "network"); } -/// Returns a `Sleep` that triggers shortly after the next change in the beacon chain fork version. +/// Returns a `Sleep` that triggers after the next change in the beacon chain fork version. /// If there is no scheduled fork, `None` is returned. fn next_fork_delay( beacon_chain: &BeaconChain, ) -> Option { - beacon_chain.duration_to_next_fork().map(|until_fork| { - // Add a short time-out to start within the new fork period. - let delay = Duration::from_millis(200); - tokio::time::sleep_until(tokio::time::Instant::now() + until_fork + delay) - }) + beacon_chain + .duration_to_next_fork() + .map(|(_, until_fork)| tokio::time::sleep(until_fork)) } impl Drop for NetworkService { diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 02be935c1fc..db61d2a88cf 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -35,15 +35,13 @@ mod tests { fn test_dht_persistence() { let log = get_logger(false); - let beacon_chain = Arc::new( - BeaconChainHarness::new_with_store_config( - MinimalEthSpec, - None, - generate_deterministic_keypairs(8), - StoreConfig::default(), - ) - .chain, - ); + let beacon_chain = BeaconChainHarness::new_with_store_config( + MinimalEthSpec, + None, + generate_deterministic_keypairs(8), + StoreConfig::default(), + ) + .chain; let store = beacon_chain.store.clone(); diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index 41cc990edf3..d4eeba57dc6 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -1,5 +1,4 @@ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; -use types::ChainSpec; use eth2_libp2p::rpc::StatusMessage; /// Trait to produce a `StatusMessage` representing the state of the given `beacon_chain`. @@ -13,10 +12,7 @@ pub trait ToStatusMessage { impl ToStatusMessage for BeaconChain { fn status_message(&self) -> Result { let head_info = self.head_info()?; - let genesis_validators_root = self.genesis_validators_root; - - let fork_digest = - ChainSpec::compute_fork_digest(head_info.fork.current_version, genesis_validators_root); + let fork_digest = self.enr_fork_id().fork_digest; Ok(StatusMessage { fork_digest, diff --git a/beacon_node/network/src/attestation_service/mod.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs similarity index 91% rename from beacon_node/network/src/attestation_service/mod.rs rename to beacon_node/network/src/subnet_service/attestation_subnets.rs index 09c0ff895e7..dcfd358974f 100644 --- a/beacon_node/network/src/attestation_service/mod.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -2,6 +2,7 @@ //! given time. It schedules subscriptions to shard subnets, requests peer discoveries and //! determines whether attestations should be aggregated and/or passed to the beacon node. +use super::SubnetServiceMessage; use std::collections::{HashMap, HashSet, VecDeque}; use std::pin::Pin; use std::sync::Arc; @@ -13,16 +14,13 @@ use rand::seq::SliceRandom; use slog::{debug, error, o, trace, warn}; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use eth2_libp2p::{NetworkConfig, SubnetDiscovery}; +use eth2_libp2p::{NetworkConfig, Subnet, SubnetDiscovery}; use hashset_delay::HashSetDelay; use slot_clock::SlotClock; use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription}; use crate::metrics; -#[cfg(test)] -mod tests; - /// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the /// slot is less than this number, skip the peer discovery process. /// Subnet discovery query takes atmost 30 secs, 2 slots take 24s. @@ -30,7 +28,6 @@ const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; /// The time (in slots) before a last seen validator is considered absent and we unsubscribe from the random /// gossip topics that we subscribed to due to the validator connection. const LAST_SEEN_VALIDATOR_TIMEOUT: u32 = 150; -// 30 mins at a 12s slot time /// The fraction of a slot that we subscribe to a subnet before the required slot. /// /// Note: The time is calculated as `time = seconds_per_slot / ADVANCE_SUBSCRIPTION_TIME`. @@ -39,46 +36,6 @@ const ADVANCE_SUBSCRIBE_TIME: u32 = 3; /// 36s at 12s slot time const DEFAULT_EXPIRATION_TIMEOUT: u32 = 3; -#[derive(Debug, Clone)] -pub enum AttServiceMessage { - /// Subscribe to the specified subnet id. - Subscribe(SubnetId), - /// Unsubscribe to the specified subnet id. - Unsubscribe(SubnetId), - /// Add the `SubnetId` to the ENR bitfield. - EnrAdd(SubnetId), - /// Remove the `SubnetId` from the ENR bitfield. - EnrRemove(SubnetId), - /// Discover peers for a list of `SubnetDiscovery`. - DiscoverPeers(Vec), -} - -/// Note: This `PartialEq` impl is for use only in tests. -/// The `DiscoverPeers` comparison is good enough for testing only. -#[cfg(test)] -impl PartialEq for AttServiceMessage { - fn eq(&self, other: &AttServiceMessage) -> bool { - match (self, other) { - (AttServiceMessage::Subscribe(a), AttServiceMessage::Subscribe(b)) => a == b, - (AttServiceMessage::Unsubscribe(a), AttServiceMessage::Unsubscribe(b)) => a == b, - (AttServiceMessage::EnrAdd(a), AttServiceMessage::EnrAdd(b)) => a == b, - (AttServiceMessage::EnrRemove(a), AttServiceMessage::EnrRemove(b)) => a == b, - (AttServiceMessage::DiscoverPeers(a), AttServiceMessage::DiscoverPeers(b)) => { - if a.len() != b.len() { - return false; - } - for i in 0..a.len() { - if a[i].subnet_id != b[i].subnet_id || a[i].min_ttl != b[i].min_ttl { - return false; - } - } - true - } - _ => false, - } - } -} - /// A particular subnet at a given slot. #[derive(PartialEq, Eq, Hash, Clone, Debug)] pub struct ExactSubnet { @@ -90,13 +47,13 @@ pub struct ExactSubnet { pub struct AttestationService { /// Queued events to return to the driving service. - events: VecDeque, + events: VecDeque, /// A reference to the beacon chain to process received attestations. - beacon_chain: Arc>, + pub(crate) beacon_chain: Arc>, /// The collection of currently subscribed random subnets mapped to their expiry deadline. - random_subnets: HashSetDelay, + pub(crate) random_subnets: HashSetDelay, /// The collection of all currently subscribed subnets (long-lived **and** short-lived). subscriptions: HashSet, @@ -332,7 +289,7 @@ impl AttestationService { .duration_to_slot(exact_subnet.slot + 1) .map(|duration| std::time::Instant::now() + duration); Some(SubnetDiscovery { - subnet_id: exact_subnet.subnet_id, + subnet: Subnet::Attestation(exact_subnet.subnet_id), min_ttl, }) } else { @@ -349,7 +306,7 @@ impl AttestationService { if !discovery_subnets.is_empty() { self.events - .push_back(AttServiceMessage::DiscoverPeers(discovery_subnets)); + .push_back(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); } Ok(()) } @@ -474,8 +431,8 @@ impl AttestationService { // However, subscribing to random subnets ideally shouldn't happen very often (once in ~27 hours) and // this makes it easier to deterministically test the attestations service. self.events - .push_back(AttServiceMessage::DiscoverPeers(vec![SubnetDiscovery { - subnet_id, + .push_back(SubnetServiceMessage::DiscoverPeers(vec![SubnetDiscovery { + subnet: Subnet::Attestation(subnet_id), min_ttl: None, }])); @@ -484,11 +441,14 @@ impl AttestationService { self.subscriptions.insert(subnet_id); debug!(self.log, "Subscribing to random subnet"; "subnet_id" => ?subnet_id); self.events - .push_back(AttServiceMessage::Subscribe(subnet_id)); + .push_back(SubnetServiceMessage::Subscribe(Subnet::Attestation( + subnet_id, + ))); } // add the subnet to the ENR bitfield - self.events.push_back(AttServiceMessage::EnrAdd(subnet_id)); + self.events + .push_back(SubnetServiceMessage::EnrAdd(Subnet::Attestation(subnet_id))); } } @@ -525,7 +485,9 @@ impl AttestationService { debug!(self.log, "Subscribing to subnet"; "subnet" => *exact_subnet.subnet_id, "target_slot" => exact_subnet.slot.as_u64()); self.subscriptions.insert(exact_subnet.subnet_id); self.events - .push_back(AttServiceMessage::Subscribe(exact_subnet.subnet_id)); + .push_back(SubnetServiceMessage::Subscribe(Subnet::Attestation( + exact_subnet.subnet_id, + ))); } } } @@ -544,7 +506,9 @@ impl AttestationService { self.subscriptions.remove(&exact_subnet.subnet_id); self.events - .push_back(AttServiceMessage::Unsubscribe(exact_subnet.subnet_id)); + .push_back(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( + exact_subnet.subnet_id, + ))); } /// A random subnet has expired. @@ -567,12 +531,16 @@ impl AttestationService { // we are not at capacity, unsubscribe from the current subnet. debug!(self.log, "Unsubscribing from random subnet"; "subnet_id" => *subnet_id); self.events - .push_back(AttServiceMessage::Unsubscribe(subnet_id)); + .push_back(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( + subnet_id, + ))); } // Remove the ENR bitfield bit and choose a new random on from the available subnets self.events - .push_back(AttServiceMessage::EnrRemove(subnet_id)); + .push_back(SubnetServiceMessage::EnrRemove(Subnet::Attestation( + subnet_id, + ))); // Subscribe to a new random subnet self.subscribe_to_random_subnets(1); } @@ -606,19 +574,23 @@ impl AttestationService { .any(|s| s.subnet_id == *subnet_id) { self.events - .push_back(AttServiceMessage::Unsubscribe(*subnet_id)); + .push_back(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( + *subnet_id, + ))); } // as the long lasting subnet subscription is being removed, remove the subnet_id from // the ENR bitfield self.events - .push_back(AttServiceMessage::EnrRemove(*subnet_id)); + .push_back(SubnetServiceMessage::EnrRemove(Subnet::Attestation( + *subnet_id, + ))); self.random_subnets.remove(subnet_id); } } } impl Stream for AttestationService { - type Item = AttServiceMessage; + type Item = SubnetServiceMessage; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { // update the waker if needed diff --git a/beacon_node/network/src/subnet_service/mod.rs b/beacon_node/network/src/subnet_service/mod.rs new file mode 100644 index 00000000000..4df540d9b67 --- /dev/null +++ b/beacon_node/network/src/subnet_service/mod.rs @@ -0,0 +1,50 @@ +pub mod attestation_subnets; +pub mod sync_subnets; + +use eth2_libp2p::{Subnet, SubnetDiscovery}; + +pub use attestation_subnets::AttestationService; +pub use sync_subnets::SyncCommitteeService; + +#[cfg(test)] +mod tests; + +#[derive(Debug, Clone)] +pub enum SubnetServiceMessage { + /// Subscribe to the specified subnet id. + Subscribe(Subnet), + /// Unsubscribe to the specified subnet id. + Unsubscribe(Subnet), + /// Add the `SubnetId` to the ENR bitfield. + EnrAdd(Subnet), + /// Remove the `SubnetId` from the ENR bitfield. + EnrRemove(Subnet), + /// Discover peers for a list of `SubnetDiscovery`. + DiscoverPeers(Vec), +} + +/// Note: This `PartialEq` impl is for use only in tests. +/// The `DiscoverPeers` comparison is good enough for testing only. +#[cfg(test)] +impl PartialEq for SubnetServiceMessage { + fn eq(&self, other: &SubnetServiceMessage) -> bool { + match (self, other) { + (SubnetServiceMessage::Subscribe(a), SubnetServiceMessage::Subscribe(b)) => a == b, + (SubnetServiceMessage::Unsubscribe(a), SubnetServiceMessage::Unsubscribe(b)) => a == b, + (SubnetServiceMessage::EnrAdd(a), SubnetServiceMessage::EnrAdd(b)) => a == b, + (SubnetServiceMessage::EnrRemove(a), SubnetServiceMessage::EnrRemove(b)) => a == b, + (SubnetServiceMessage::DiscoverPeers(a), SubnetServiceMessage::DiscoverPeers(b)) => { + if a.len() != b.len() { + return false; + } + for i in 0..a.len() { + if a[i].subnet != b[i].subnet || a[i].min_ttl != b[i].min_ttl { + return false; + } + } + true + } + _ => false, + } + } +} diff --git a/beacon_node/network/src/subnet_service/sync_subnets.rs b/beacon_node/network/src/subnet_service/sync_subnets.rs new file mode 100644 index 00000000000..4162fdd167b --- /dev/null +++ b/beacon_node/network/src/subnet_service/sync_subnets.rs @@ -0,0 +1,350 @@ +//! This service keeps track of which sync committee subnet the beacon node should be subscribed to at any +//! given time. It schedules subscriptions to sync committee subnets and requests peer discoveries. + +use std::collections::{hash_map::Entry, HashMap, VecDeque}; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::Duration; + +use futures::prelude::*; +use slog::{debug, error, o, trace, warn}; + +use super::SubnetServiceMessage; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2_libp2p::{NetworkConfig, Subnet, SubnetDiscovery}; +use hashset_delay::HashSetDelay; +use slot_clock::SlotClock; +use types::{Epoch, EthSpec, SyncCommitteeSubscription, SyncSubnetId}; + +use crate::metrics; + +/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the +/// slot is less than this number, skip the peer discovery process. +/// Subnet discovery query takes atmost 30 secs, 2 slots take 24s. +const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; + +/// A particular subnet at a given slot. +#[derive(PartialEq, Eq, Hash, Clone, Debug)] +pub struct ExactSubnet { + /// The `SyncSubnetId` associated with this subnet. + pub subnet_id: SyncSubnetId, + /// The epoch until which we need to stay subscribed to the subnet. + pub until_epoch: Epoch, +} +pub struct SyncCommitteeService { + /// Queued events to return to the driving service. + events: VecDeque, + + /// A reference to the beacon chain to process received attestations. + pub(crate) beacon_chain: Arc>, + + /// The collection of all currently subscribed subnets. + subscriptions: HashMap, + + /// A collection of timeouts for when to unsubscribe from a subnet. + unsubscriptions: HashSetDelay, + + /// The waker for the current thread. + waker: Option, + + /// The discovery mechanism of lighthouse is disabled. + discovery_disabled: bool, + + /// We are always subscribed to all subnets. + subscribe_all_subnets: bool, + + /// The logger for the attestation service. + log: slog::Logger, +} + +impl SyncCommitteeService { + /* Public functions */ + + pub fn new( + beacon_chain: Arc>, + config: &NetworkConfig, + log: &slog::Logger, + ) -> Self { + let log = log.new(o!("service" => "sync_committee_service")); + + let spec = &beacon_chain.spec; + let epoch_duration_secs = + beacon_chain.slot_clock.slot_duration().as_secs() * T::EthSpec::slots_per_epoch(); + let default_timeout = + epoch_duration_secs.saturating_mul(spec.epochs_per_sync_committee_period.as_u64()); + + SyncCommitteeService { + events: VecDeque::with_capacity(10), + beacon_chain, + subscriptions: HashMap::new(), + unsubscriptions: HashSetDelay::new(Duration::from_secs(default_timeout)), + waker: None, + subscribe_all_subnets: config.subscribe_all_subnets, + discovery_disabled: config.disable_discovery, + log, + } + } + + /// Return count of all currently subscribed subnets. + #[cfg(test)] + pub fn subscription_count(&self) -> usize { + use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; + if self.subscribe_all_subnets { + SYNC_COMMITTEE_SUBNET_COUNT as usize + } else { + self.subscriptions.len() + } + } + + /// Processes a list of sync committee subscriptions. + /// + /// This will: + /// - Search for peers for required subnets. + /// - Request subscriptions required subnets. + /// - Build the timeouts for each of these events. + /// + /// This returns a result simply for the ergonomics of using ?. The result can be + /// safely dropped. + pub fn validator_subscriptions( + &mut self, + subscriptions: Vec, + ) -> Result<(), String> { + let mut subnets_to_discover = Vec::new(); + for subscription in subscriptions { + metrics::inc_counter(&metrics::SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS); + //NOTE: We assume all subscriptions have been verified before reaching this service + + // Registers the validator with the subnet service. + // This will subscribe to long-lived random subnets if required. + trace!(self.log, + "Sync committee subscription"; + "subscription" => ?subscription, + ); + + let subnet_ids = match SyncSubnetId::compute_subnets_for_sync_committee::( + &subscription.sync_committee_indices, + ) { + Ok(subnet_ids) => subnet_ids, + Err(e) => { + warn!(self.log, + "Failed to compute subnet id for sync committee subscription"; + "error" => ?e, + "validator_index" => subscription.validator_index + ); + continue; + } + }; + + for subnet_id in subnet_ids { + let exact_subnet = ExactSubnet { + subnet_id, + until_epoch: subscription.until_epoch, + }; + subnets_to_discover.push(exact_subnet.clone()); + if let Err(e) = self.subscribe_to_subnet(exact_subnet.clone()) { + warn!(self.log, + "Subscription to sync subnet error"; + "error" => e, + "validator_index" => subscription.validator_index, + ); + } else { + trace!(self.log, + "Subscribed to subnet for sync committee duties"; + "exact_subnet" => ?exact_subnet, + "validator_index" => subscription.validator_index + ); + } + } + } + // If the discovery mechanism isn't disabled, attempt to set up a peer discovery for the + // required subnets. + if !self.discovery_disabled { + if let Err(e) = self.discover_peers_request(subnets_to_discover.iter()) { + warn!(self.log, "Discovery lookup request error"; "error" => e); + }; + } + + // pre-emptively wake the thread to check for new events + if let Some(waker) = &self.waker { + waker.wake_by_ref(); + } + Ok(()) + } + + /* Internal private functions */ + + /// Checks if there are currently queued discovery requests and the time required to make the + /// request. + /// + /// If there is sufficient time, queues a peer discovery request for all the required subnets. + fn discover_peers_request<'a>( + &mut self, + exact_subnets: impl Iterator, + ) -> Result<(), &'static str> { + let current_slot = self + .beacon_chain + .slot_clock + .now() + .ok_or("Could not get the current slot")?; + + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + + let discovery_subnets: Vec = exact_subnets + .filter_map(|exact_subnet| { + let until_slot = exact_subnet.until_epoch.end_slot(slots_per_epoch); + // check if there is enough time to perform a discovery lookup + if until_slot >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) { + // if the slot is more than epoch away, add an event to start looking for peers + // add one slot to ensure we keep the peer for the subscription slot + let min_ttl = self + .beacon_chain + .slot_clock + .duration_to_slot(until_slot + 1) + .map(|duration| std::time::Instant::now() + duration); + Some(SubnetDiscovery { + subnet: Subnet::SyncCommittee(exact_subnet.subnet_id), + min_ttl, + }) + } else { + // We may want to check the global PeerInfo to see estimated timeouts for each + // peer before they can be removed. + warn!(self.log, + "Not enough time for a discovery search"; + "subnet_id" => ?exact_subnet + ); + None + } + }) + .collect(); + + if !discovery_subnets.is_empty() { + self.events + .push_back(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); + } + Ok(()) + } + + /// Adds a subscription event and an associated unsubscription event if required. + fn subscribe_to_subnet(&mut self, exact_subnet: ExactSubnet) -> Result<(), &'static str> { + // Return if we have subscribed to all subnets + if self.subscribe_all_subnets { + return Ok(()); + } + + // Return if we already have a subscription for exact_subnet + if self.subscriptions.get(&exact_subnet.subnet_id) == Some(&exact_subnet.until_epoch) { + return Ok(()); + } + + // Return if we already have subscription set to expire later than the current request. + if let Some(until_epoch) = self.subscriptions.get(&exact_subnet.subnet_id) { + if *until_epoch >= exact_subnet.until_epoch { + return Ok(()); + } + } + + // initialise timing variables + let current_slot = self + .beacon_chain + .slot_clock + .now() + .ok_or("Could not get the current slot")?; + + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + let until_slot = exact_subnet.until_epoch.end_slot(slots_per_epoch); + // Calculate the duration to the unsubscription event. + let expected_end_subscription_duration = if current_slot >= until_slot { + warn!( + self.log, + "Sync committee subscription is past expiration"; + "current_slot" => current_slot, + "exact_subnet" => ?exact_subnet, + ); + return Ok(()); + } else { + let slot_duration = self.beacon_chain.slot_clock.slot_duration(); + + // the duration until we no longer need this subscription. We assume a single slot is + // sufficient. + self.beacon_chain + .slot_clock + .duration_to_slot(until_slot) + .ok_or("Unable to determine duration to unsubscription slot")? + + slot_duration + }; + + if let Entry::Vacant(e) = self.subscriptions.entry(exact_subnet.subnet_id) { + // We are not currently subscribed and have no waiting subscription, create one + debug!(self.log, "Subscribing to subnet"; "subnet" => *exact_subnet.subnet_id, "until_epoch" => ?exact_subnet.until_epoch); + e.insert(exact_subnet.until_epoch); + self.events + .push_back(SubnetServiceMessage::Subscribe(Subnet::SyncCommittee( + exact_subnet.subnet_id, + ))); + + // add the subnet to the ENR bitfield + self.events + .push_back(SubnetServiceMessage::EnrAdd(Subnet::SyncCommittee( + exact_subnet.subnet_id, + ))); + + // add an unsubscription event to remove ourselves from the subnet once completed + self.unsubscriptions + .insert_at(exact_subnet.subnet_id, expected_end_subscription_duration); + } else { + // We are already subscribed, extend the unsubscription duration + self.unsubscriptions + .update_timeout(&exact_subnet.subnet_id, expected_end_subscription_duration); + } + + Ok(()) + } + + /// A queued unsubscription is ready. + fn handle_unsubscriptions(&mut self, subnet_id: SyncSubnetId) { + debug!(self.log, "Unsubscribing from subnet"; "subnet" => *subnet_id); + + self.subscriptions.remove(&subnet_id); + self.events + .push_back(SubnetServiceMessage::Unsubscribe(Subnet::SyncCommittee( + subnet_id, + ))); + + self.events + .push_back(SubnetServiceMessage::EnrRemove(Subnet::SyncCommittee( + subnet_id, + ))); + } +} + +impl Stream for SyncCommitteeService { + type Item = SubnetServiceMessage; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + // update the waker if needed + if let Some(waker) = &self.waker { + if waker.will_wake(cx.waker()) { + self.waker = Some(cx.waker().clone()); + } + } else { + self.waker = Some(cx.waker().clone()); + } + + // process any un-subscription events + match self.unsubscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(exact_subnet))) => self.handle_unsubscriptions(exact_subnet), + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> e); + } + Poll::Ready(None) | Poll::Pending => {} + } + + // process any generated events + if let Some(event) = self.events.pop_front() { + return Poll::Ready(Some(event)); + } + + Poll::Pending + } +} diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs new file mode 100644 index 00000000000..6ad0837642c --- /dev/null +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -0,0 +1,573 @@ +use super::*; +use beacon_chain::{ + builder::{BeaconChainBuilder, Witness}, + eth1_chain::CachingEth1Backend, + BeaconChain, +}; +use eth2_libp2p::NetworkConfig; +use futures::prelude::*; +use genesis::{generate_deterministic_keypairs, interop_genesis_state}; +use lazy_static::lazy_static; +use slog::Logger; +use sloggers::{null::NullLoggerBuilder, Build}; +use slot_clock::{SlotClock, SystemTimeSlotClock}; +use std::sync::Arc; +use std::time::{Duration, SystemTime}; +use store::config::StoreConfig; +use store::{HotColdDB, MemoryStore}; +use types::{ + CommitteeIndex, Epoch, EthSpec, MainnetEthSpec, Slot, SubnetId, SyncCommitteeSubscription, + SyncSubnetId, ValidatorSubscription, +}; + +const SLOT_DURATION_MILLIS: u64 = 400; + +type TestBeaconChainType = Witness< + SystemTimeSlotClock, + CachingEth1Backend, + MainnetEthSpec, + MemoryStore, + MemoryStore, +>; + +pub struct TestBeaconChain { + chain: Arc>, +} + +impl TestBeaconChain { + pub fn new_with_system_clock() -> Self { + let spec = MainnetEthSpec::default_spec(); + + let keypairs = generate_deterministic_keypairs(1); + + let log = get_logger(); + let store = + HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone()).unwrap(); + + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + + let chain = Arc::new( + BeaconChainBuilder::new(MainnetEthSpec) + .logger(log.clone()) + .custom_spec(spec.clone()) + .store(Arc::new(store)) + .genesis_state( + interop_genesis_state::(&keypairs, 0, &spec) + .expect("should generate interop state"), + ) + .expect("should build state using recent genesis") + .dummy_eth1_backend() + .expect("should build dummy backend") + .slot_clock(SystemTimeSlotClock::new( + Slot::new(0), + Duration::from_secs(recent_genesis_time()), + Duration::from_millis(SLOT_DURATION_MILLIS), + )) + .shutdown_sender(shutdown_tx) + .monitor_validators(true, vec![], log) + .build() + .expect("should build"), + ); + Self { chain } + } +} + +pub fn recent_genesis_time() -> u64 { + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs() +} + +fn get_logger() -> Logger { + NullLoggerBuilder.build().expect("logger should build") +} + +lazy_static! { + static ref CHAIN: TestBeaconChain = TestBeaconChain::new_with_system_clock(); +} + +fn get_attestation_service() -> AttestationService { + let log = get_logger(); + let config = NetworkConfig::default(); + + let beacon_chain = CHAIN.chain.clone(); + + AttestationService::new(beacon_chain, &config, &log) +} + +fn get_sync_committee_service() -> SyncCommitteeService { + let log = get_logger(); + let config = NetworkConfig::default(); + + let beacon_chain = CHAIN.chain.clone(); + + SyncCommitteeService::new(beacon_chain, &config, &log) +} + +// gets a number of events from the subscription service, or returns none if it times out after a number +// of slots +async fn get_events + Unpin>( + stream: &mut S, + num_events: Option, + num_slots_before_timeout: u32, +) -> Vec { + let mut events = Vec::new(); + + let collect_stream_fut = async { + loop { + if let Some(result) = stream.next().await { + events.push(result); + if let Some(num) = num_events { + if events.len() == num { + return; + } + } + } + } + }; + + tokio::select! { + _ = collect_stream_fut => events, + _ = tokio::time::sleep( + Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout, + ) => events + } +} + +mod attestation_service { + use super::*; + + fn get_subscription( + validator_index: u64, + attestation_committee_index: CommitteeIndex, + slot: Slot, + committee_count_at_slot: u64, + ) -> ValidatorSubscription { + let is_aggregator = true; + ValidatorSubscription { + validator_index, + attestation_committee_index, + slot, + committee_count_at_slot, + is_aggregator, + } + } + + fn get_subscriptions( + validator_count: u64, + slot: Slot, + committee_count_at_slot: u64, + ) -> Vec { + (0..validator_count) + .map(|validator_index| { + get_subscription( + validator_index, + validator_index, + slot, + committee_count_at_slot, + ) + }) + .collect() + } + + #[tokio::test] + async fn subscribe_current_slot_wait_for_unsubscribe() { + // subscription config + let validator_index = 1; + let committee_index = 1; + // Keep a low subscription slot so that there are no additional subnet discovery events. + let subscription_slot = 0; + let committee_count = 1; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = vec![get_subscription( + validator_index, + committee_index, + current_slot + Slot::new(subscription_slot), + committee_count, + )]; + + // submit the subscriptions + attestation_service + .validator_subscriptions(subscriptions) + .unwrap(); + + // not enough time for peer discovery, just subscribe, unsubscribe + let subnet_id = SubnetId::compute_subnet::( + current_slot + Slot::new(subscription_slot), + committee_index, + committee_count, + &attestation_service.beacon_chain.spec, + ) + .unwrap(); + let expected = vec![ + SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id)), + SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id)), + ]; + + // Wait for 1 slot duration to get the unsubscription event + let events = get_events( + &mut attestation_service, + Some(5), + (MainnetEthSpec::slots_per_epoch() * 3) as u32, + ) + .await; + matches::assert_matches!( + events[..3], + [ + SubnetServiceMessage::DiscoverPeers(_), + SubnetServiceMessage::Subscribe(_any1), + SubnetServiceMessage::EnrAdd(_any3) + ] + ); + + // If the long lived and short lived subnets are the same, there should be no more events + // as we don't resubscribe already subscribed subnets. + if !attestation_service.random_subnets.contains(&subnet_id) { + assert_eq!(expected[..], events[3..]); + } + // Should be subscribed to only 1 long lived subnet after unsubscription. + assert_eq!(attestation_service.subscription_count(), 1); + } + + /// Test to verify that we are not unsubscribing to a subnet before a required subscription. + #[tokio::test] + async fn test_same_subnet_unsubscription() { + // subscription config + let validator_index = 1; + let committee_count = 1; + + // Makes 2 validator subscriptions to the same subnet but at different slots. + // There should be just 1 unsubscription event for the later slot subscription (subscription_slot2). + let subscription_slot1 = 0; + let subscription_slot2 = 1; + let com1 = 1; + let com2 = 0; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let sub1 = get_subscription( + validator_index, + com1, + current_slot + Slot::new(subscription_slot1), + committee_count, + ); + + let sub2 = get_subscription( + validator_index, + com2, + current_slot + Slot::new(subscription_slot2), + committee_count, + ); + + let subnet_id1 = SubnetId::compute_subnet::( + current_slot + Slot::new(subscription_slot1), + com1, + committee_count, + &attestation_service.beacon_chain.spec, + ) + .unwrap(); + + let subnet_id2 = SubnetId::compute_subnet::( + current_slot + Slot::new(subscription_slot2), + com2, + committee_count, + &attestation_service.beacon_chain.spec, + ) + .unwrap(); + + // Assert that subscriptions are different but their subnet is the same + assert_ne!(sub1, sub2); + assert_eq!(subnet_id1, subnet_id2); + + // submit the subscriptions + attestation_service + .validator_subscriptions(vec![sub1, sub2]) + .unwrap(); + + // Unsubscription event should happen at slot 2 (since subnet id's are the same, unsubscription event should be at higher slot + 1) + // Get all events for 1 slot duration (unsubscription event should happen after 2 slot durations). + let events = get_events(&mut attestation_service, None, 1).await; + matches::assert_matches!( + events[..3], + [ + SubnetServiceMessage::DiscoverPeers(_), + SubnetServiceMessage::Subscribe(_any1), + SubnetServiceMessage::EnrAdd(_any3) + ] + ); + + let expected = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); + + // Should be still subscribed to 1 long lived and 1 short lived subnet if both are different. + if !attestation_service.random_subnets.contains(&subnet_id1) { + assert_eq!(expected, events[3]); + assert_eq!(attestation_service.subscription_count(), 2); + } else { + assert_eq!(attestation_service.subscription_count(), 1); + } + + // Get event for 1 more slot duration, we should get the unsubscribe event now. + let unsubscribe_event = get_events(&mut attestation_service, None, 1).await; + + // If the long lived and short lived subnets are different, we should get an unsubscription event. + if !attestation_service.random_subnets.contains(&subnet_id1) { + assert_eq!( + [SubnetServiceMessage::Unsubscribe(Subnet::Attestation( + subnet_id1 + ))], + unsubscribe_event[..] + ); + } + + // Should be subscribed to only 1 long lived subnet after unsubscription. + assert_eq!(attestation_service.subscription_count(), 1); + } + + #[tokio::test] + async fn subscribe_all_random_subnets() { + let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count; + let subscription_slot = 10; + let subscription_count = attestation_subnet_count; + let committee_count = 1; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = get_subscriptions( + subscription_count, + current_slot + subscription_slot, + committee_count, + ); + + // submit the subscriptions + attestation_service + .validator_subscriptions(subscriptions) + .unwrap(); + + let events = get_events(&mut attestation_service, None, 3).await; + let mut discover_peer_count = 0; + let mut enr_add_count = 0; + let mut unexpected_msg_count = 0; + + for event in &events { + match event { + SubnetServiceMessage::DiscoverPeers(_) => discover_peer_count += 1, + SubnetServiceMessage::Subscribe(_any_subnet) => {} + SubnetServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1, + _ => unexpected_msg_count += 1, + } + } + + // The bulk discovery request length should be equal to validator_count + let bulk_discovery_event = events.last().unwrap(); + if let SubnetServiceMessage::DiscoverPeers(d) = bulk_discovery_event { + assert_eq!(d.len(), attestation_subnet_count as usize); + } else { + panic!("Unexpected event {:?}", bulk_discovery_event); + } + + // 64 `DiscoverPeer` requests of length 1 corresponding to random subnets + // and 1 `DiscoverPeer` request corresponding to bulk subnet discovery. + assert_eq!(discover_peer_count, subscription_count + 1); + assert_eq!(attestation_service.subscription_count(), 64); + assert_eq!(enr_add_count, 64); + assert_eq!(unexpected_msg_count, 0); + // test completed successfully + } + + #[tokio::test] + async fn subscribe_all_random_subnets_plus_one() { + let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count; + let subscription_slot = 10; + // the 65th subscription should result in no more messages than the previous scenario + let subscription_count = attestation_subnet_count + 1; + let committee_count = 1; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let subscriptions = get_subscriptions( + subscription_count, + current_slot + subscription_slot, + committee_count, + ); + + // submit the subscriptions + attestation_service + .validator_subscriptions(subscriptions) + .unwrap(); + + let events = get_events(&mut attestation_service, None, 3).await; + let mut discover_peer_count = 0; + let mut enr_add_count = 0; + let mut unexpected_msg_count = 0; + + for event in &events { + match event { + SubnetServiceMessage::DiscoverPeers(_) => discover_peer_count += 1, + SubnetServiceMessage::Subscribe(_any_subnet) => {} + SubnetServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1, + _ => unexpected_msg_count += 1, + } + } + + // The bulk discovery request length shouldn't exceed max attestation_subnet_count + let bulk_discovery_event = events.last().unwrap(); + if let SubnetServiceMessage::DiscoverPeers(d) = bulk_discovery_event { + assert_eq!(d.len(), attestation_subnet_count as usize); + } else { + panic!("Unexpected event {:?}", bulk_discovery_event); + } + // 64 `DiscoverPeer` requests of length 1 corresponding to random subnets + // and 1 `DiscoverPeer` request corresponding to the bulk subnet discovery. + // For the 65th subscription, the call to `subscribe_to_random_subnets` is not made because we are at capacity. + assert_eq!(discover_peer_count, 64 + 1); + assert_eq!(attestation_service.subscription_count(), 64); + assert_eq!(enr_add_count, 64); + assert_eq!(unexpected_msg_count, 0); + } +} + +mod sync_committee_service { + use super::*; + + #[tokio::test] + async fn subscribe_and_unsubscribe() { + // subscription config + let validator_index = 1; + let until_epoch = Epoch::new(1); + let sync_committee_indices = vec![1]; + + // create the attestation service and subscriptions + let mut sync_committee_service = get_sync_committee_service(); + + let subscriptions = vec![SyncCommitteeSubscription { + validator_index, + sync_committee_indices: sync_committee_indices.clone(), + until_epoch, + }]; + + // submit the subscriptions + sync_committee_service + .validator_subscriptions(subscriptions) + .unwrap(); + + let subnet_ids = SyncSubnetId::compute_subnets_for_sync_committee::( + &sync_committee_indices, + ) + .unwrap(); + let subnet_id = subnet_ids.iter().next().unwrap(); + + // Note: the unsubscription event takes 2 epochs (8 * 2 * 0.4 secs = 3.2 secs) + let events = get_events( + &mut sync_committee_service, + Some(5), + (MainnetEthSpec::slots_per_epoch() * 3) as u32, // Have some buffer time before getting 5 events + ) + .await; + assert_eq!( + events[..2], + [ + SubnetServiceMessage::Subscribe(Subnet::SyncCommittee(*subnet_id)), + SubnetServiceMessage::EnrAdd(Subnet::SyncCommittee(*subnet_id)) + ] + ); + matches::assert_matches!( + events[2..], + [ + SubnetServiceMessage::DiscoverPeers(_), + SubnetServiceMessage::Unsubscribe(_), + SubnetServiceMessage::EnrRemove(_), + ] + ); + + // Should be unsubscribed at the end. + assert_eq!(sync_committee_service.subscription_count(), 0); + } + + #[tokio::test] + async fn same_subscription_with_lower_until_epoch() { + // subscription config + let validator_index = 1; + let until_epoch = Epoch::new(2); + let sync_committee_indices = vec![1]; + + // create the attestation service and subscriptions + let mut sync_committee_service = get_sync_committee_service(); + + let subscriptions = vec![SyncCommitteeSubscription { + validator_index, + sync_committee_indices: sync_committee_indices.clone(), + until_epoch, + }]; + + // submit the subscriptions + sync_committee_service + .validator_subscriptions(subscriptions) + .unwrap(); + + // Get all immediate events (won't include unsubscriptions) + let events = get_events(&mut sync_committee_service, None, 1).await; + matches::assert_matches!( + events[..], + [ + SubnetServiceMessage::Subscribe(Subnet::SyncCommittee(_)), + SubnetServiceMessage::EnrAdd(Subnet::SyncCommittee(_)), + SubnetServiceMessage::DiscoverPeers(_), + ] + ); + + // Additional subscriptions which shouldn't emit any non-discovery events + // Event 1 is a duplicate of an existing subscription + // Event 2 is the same subscription with lower `until_epoch` than the existing subscription + let subscriptions = vec![ + SyncCommitteeSubscription { + validator_index, + sync_committee_indices: sync_committee_indices.clone(), + until_epoch, + }, + SyncCommitteeSubscription { + validator_index, + sync_committee_indices: sync_committee_indices.clone(), + until_epoch: until_epoch - 1, + }, + ]; + + // submit the subscriptions + sync_committee_service + .validator_subscriptions(subscriptions) + .unwrap(); + + // Get all immediate events (won't include unsubscriptions) + let events = get_events(&mut sync_committee_service, None, 1).await; + matches::assert_matches!(events[..], [SubnetServiceMessage::DiscoverPeers(_),]); + + // Should be unsubscribed at the end. + assert_eq!(sync_committee_service.subscription_count(), 1); + } +} diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index f5869a7e8a0..00a1ab56048 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -89,7 +89,7 @@ impl TryFrom<&ArgMatches<'_>> for BootNodeConfig { let genesis_state = eth2_network_config.beacon_state::()?; slog::info!(logger, "Genesis state found"; "root" => genesis_state.canonical_root().to_string()); - let enr_fork = spec.enr_fork_id( + let enr_fork = spec.enr_fork_id::( types::Slot::from(0u64), genesis_state.genesis_validators_root(), ); @@ -111,7 +111,7 @@ impl TryFrom<&ArgMatches<'_>> for BootNodeConfig { // If we know of the ENR field, add it to the initial construction if let Some(enr_fork_bytes) = enr_fork { - builder.add_value("eth2", &enr_fork_bytes); + builder.add_value("eth2", enr_fork_bytes.as_slice()); } builder .build(&local_key) diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index ed563504b0b..3cdd8789a3e 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -40,13 +40,22 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { }; // construct the discv5 server - let mut discv5 = Discv5::new(config.local_enr, config.local_key, discv5_config).unwrap(); + let mut discv5 = + Discv5::new(config.local_enr.clone(), config.local_key, discv5_config).unwrap(); // If there are any bootnodes add them to the routing table for enr in config.boot_nodes { - info!(log, "Adding bootnode"; "address" => format!("{:?}", enr.udp_socket()), "peer_id" => enr.peer_id().to_string(), "node_id" => enr.node_id().to_string()); - if let Err(e) = discv5.add_enr(enr) { - slog::warn!(log, "Failed adding ENR"; "error" => e.to_string()); + info!( + log, + "Adding bootnode"; + "address" => ?enr.udp_socket(), + "peer_id" => enr.peer_id().to_string(), + "node_id" => enr.node_id().to_string() + ); + if enr != config.local_enr { + if let Err(e) = discv5.add_enr(enr) { + slog::warn!(log, "Failed adding ENR"; "error" => e.to_string()); + } } } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 08b4bcaead8..528360cf41c 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -12,7 +12,7 @@ pub mod lighthouse; pub mod lighthouse_vc; pub mod types; -use self::types::*; +use self::types::{Error as ResponseError, *}; use eth2_libp2p::PeerId; use futures::Stream; use futures_util::StreamExt; @@ -26,6 +26,9 @@ use std::fmt; use std::iter::Iterator; use std::time::Duration; +pub const V1: EndpointVersion = EndpointVersion(1); +pub const V2: EndpointVersion = EndpointVersion(2); + #[derive(Debug)] pub enum Error { /// The `reqwest` client raised an error. @@ -86,6 +89,7 @@ pub struct Timeouts { pub liveness: Duration, pub proposal: Duration, pub proposer_duties: Duration, + pub sync_duties: Duration, } impl Timeouts { @@ -96,6 +100,7 @@ impl Timeouts { liveness: timeout, proposal: timeout, proposer_duties: timeout, + sync_duties: timeout, } } } @@ -142,14 +147,14 @@ impl BeaconNodeHttpClient { } } - /// Return the path with the standard `/eth1/v1` prefix applied. - fn eth_path(&self) -> Result { + /// Return the path with the standard `/eth/vX` prefix applied. + fn eth_path(&self, version: EndpointVersion) -> Result { let mut path = self.server.full.clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("eth") - .push("v1"); + .push(&version.to_string()); Ok(path) } @@ -315,7 +320,7 @@ impl BeaconNodeHttpClient { /// /// May return a `404` if beacon chain genesis has not yet occurred. pub async fn get_beacon_genesis(&self) -> Result, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -332,7 +337,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, ) -> Result>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -351,7 +356,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, ) -> Result>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -370,7 +375,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, ) -> Result>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -390,7 +395,7 @@ impl BeaconNodeHttpClient { state_id: StateId, ids: Option<&[ValidatorId]>, ) -> Result>>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -420,7 +425,7 @@ impl BeaconNodeHttpClient { ids: Option<&[ValidatorId]>, statuses: Option<&[ValidatorStatus]>, ) -> Result>>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -460,7 +465,7 @@ impl BeaconNodeHttpClient { index: Option, epoch: Option, ) -> Result>>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -487,6 +492,29 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } + /// `GET beacon/states/{state_id}/sync_committees?epoch` + pub async fn get_beacon_states_sync_committees( + &self, + state_id: StateId, + epoch: Option, + ) -> Result, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("sync_committees"); + + if let Some(epoch) = epoch { + path.query_pairs_mut() + .append_pair("epoch", &epoch.to_string()); + } + + self.get(path).await + } + /// `GET beacon/states/{state_id}/validators/{validator_id}` /// /// Returns `Ok(None)` on a 404 error. @@ -495,7 +523,7 @@ impl BeaconNodeHttpClient { state_id: StateId, validator_id: &ValidatorId, ) -> Result>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -516,7 +544,7 @@ impl BeaconNodeHttpClient { slot: Option, parent_root: Option, ) -> Result>>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -543,7 +571,7 @@ impl BeaconNodeHttpClient { &self, block_id: BlockId, ) -> Result>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -561,7 +589,7 @@ impl BeaconNodeHttpClient { &self, block: &SignedBeaconBlock, ) -> Result<(), Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -574,14 +602,32 @@ impl BeaconNodeHttpClient { Ok(()) } - /// `GET beacon/blocks` + /// `GET v2/beacon/blocks` /// /// Returns `Ok(None)` on a 404 error. pub async fn get_beacon_blocks( &self, block_id: BlockId, - ) -> Result>>, Error> { - let mut path = self.eth_path()?; + ) -> Result>>, Error> { + let mut path = self.eth_path(V2)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks") + .push(&block_id.to_string()); + + self.get_opt(path).await + } + + /// `GET v1/beacon/blocks` (LEGACY) + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blocks_v1( + &self, + block_id: BlockId, + ) -> Result>>, Error> { + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -600,7 +646,7 @@ impl BeaconNodeHttpClient { block_id: BlockId, spec: &ChainSpec, ) -> Result>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V2)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -621,7 +667,7 @@ impl BeaconNodeHttpClient { &self, block_id: BlockId, ) -> Result>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -640,7 +686,7 @@ impl BeaconNodeHttpClient { &self, block_id: BlockId, ) -> Result>>>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -657,7 +703,7 @@ impl BeaconNodeHttpClient { &self, attestations: &[Attestation], ) -> Result<(), Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -665,15 +711,8 @@ impl BeaconNodeHttpClient { .push("pool") .push("attestations"); - let response = self - .client - .post(path) - .timeout(self.timeouts.attestation) - .json(attestations) - .send() - .await - .map_err(Error::Reqwest)?; - ok_or_indexed_error(response).await?; + self.post_with_timeout(path, &attestations, self.timeouts.attestation) + .await?; Ok(()) } @@ -684,7 +723,7 @@ impl BeaconNodeHttpClient { slot: Option, committee_index: Option, ) -> Result>>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -710,7 +749,7 @@ impl BeaconNodeHttpClient { &self, slashing: &AttesterSlashing, ) -> Result<(), Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -727,7 +766,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_pool_attester_slashings( &self, ) -> Result>>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -743,7 +782,7 @@ impl BeaconNodeHttpClient { &self, slashing: &ProposerSlashing, ) -> Result<(), Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -760,7 +799,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_pool_proposer_slashings( &self, ) -> Result>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -776,7 +815,7 @@ impl BeaconNodeHttpClient { &self, exit: &SignedVoluntaryExit, ) -> Result<(), Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -793,7 +832,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_pool_voluntary_exits( &self, ) -> Result>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -804,9 +843,44 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `POST beacon/pool/sync_committees` + pub async fn post_beacon_pool_sync_committee_signatures( + &self, + signatures: &[SyncCommitteeMessage], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("sync_committees"); + + self.post(path, &signatures).await?; + + Ok(()) + } + + /// `POST validator/contribution_and_proofs` + pub async fn post_validator_contribution_and_proofs( + &self, + signed_contributions: &[SignedContributionAndProof], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("contribution_and_proofs"); + + self.post(path, &signed_contributions).await?; + + Ok(()) + } + /// `GET config/fork_schedule` pub async fn get_config_fork_schedule(&self) -> Result>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -818,7 +892,7 @@ impl BeaconNodeHttpClient { /// `GET config/spec` pub async fn get_config_spec(&self) -> Result, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -832,7 +906,7 @@ impl BeaconNodeHttpClient { pub async fn get_config_deposit_contract( &self, ) -> Result, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -844,7 +918,7 @@ impl BeaconNodeHttpClient { /// `GET node/version` pub async fn get_node_version(&self) -> Result, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -856,7 +930,7 @@ impl BeaconNodeHttpClient { /// `GET node/identity` pub async fn get_node_identity(&self) -> Result, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -868,7 +942,7 @@ impl BeaconNodeHttpClient { /// `GET node/syncing` pub async fn get_node_syncing(&self) -> Result, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -880,7 +954,7 @@ impl BeaconNodeHttpClient { /// `GET node/health` pub async fn get_node_health(&self) -> Result { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -906,7 +980,7 @@ impl BeaconNodeHttpClient { &self, peer_id: PeerId, ) -> Result, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -923,7 +997,7 @@ impl BeaconNodeHttpClient { states: Option<&[PeerState]>, directions: Option<&[PeerDirection]>, ) -> Result { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -953,7 +1027,7 @@ impl BeaconNodeHttpClient { /// `GET node/peer_count` pub async fn get_node_peer_count(&self) -> Result, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -963,12 +1037,29 @@ impl BeaconNodeHttpClient { self.get(path).await } - /// `GET debug/beacon/states/{state_id}` + /// `GET v2/debug/beacon/states/{state_id}` pub async fn get_debug_beacon_states( &self, state_id: StateId, - ) -> Result>>, Error> { - let mut path = self.eth_path()?; + ) -> Result>>, Error> { + let mut path = self.eth_path(V2)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("debug") + .push("beacon") + .push("states") + .push(&state_id.to_string()); + + self.get_opt(path).await + } + + /// `GET v1/debug/beacon/states/{state_id}` (LEGACY) + pub async fn get_debug_beacon_states_v1( + &self, + state_id: StateId, + ) -> Result>>, Error> { + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -987,7 +1078,7 @@ impl BeaconNodeHttpClient { state_id: StateId, spec: &ChainSpec, ) -> Result>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -1006,7 +1097,7 @@ impl BeaconNodeHttpClient { pub async fn get_debug_beacon_heads( &self, ) -> Result>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -1022,7 +1113,7 @@ impl BeaconNodeHttpClient { &self, epoch: Epoch, ) -> Result>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -1035,14 +1126,14 @@ impl BeaconNodeHttpClient { .await } - /// `GET validator/blocks/{slot}` + /// `GET v2/validator/blocks/{slot}` pub async fn get_validator_blocks( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result>, Error> { - let mut path = self.eth_path()?; + ) -> Result>, Error> { + let mut path = self.eth_path(V2)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -1067,7 +1158,7 @@ impl BeaconNodeHttpClient { slot: Slot, committee_index: CommitteeIndex, ) -> Result, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -1087,7 +1178,7 @@ impl BeaconNodeHttpClient { slot: Slot, attestation_data_root: Hash256, ) -> Result>>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -1105,6 +1196,32 @@ impl BeaconNodeHttpClient { .await } + /// `GET validator/sync_committee_contribution` + pub async fn get_validator_sync_committee_contribution( + &self, + sync_committee_data: &SyncContributionData, + ) -> Result>>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("sync_committee_contribution"); + + path.query_pairs_mut() + .append_pair("slot", &sync_committee_data.slot.to_string()) + .append_pair( + "beacon_block_root", + &format!("{:?}", sync_committee_data.beacon_block_root), + ) + .append_pair( + "subcommittee_index", + &sync_committee_data.subcommittee_index.to_string(), + ); + + self.get_opt(path).await + } + /// `POST lighthouse/liveness` pub async fn post_lighthouse_liveness( &self, @@ -1135,7 +1252,7 @@ impl BeaconNodeHttpClient { epoch: Epoch, indices: &[u64], ) -> Result>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -1153,22 +1270,15 @@ impl BeaconNodeHttpClient { &self, aggregates: &[SignedAggregateAndProof], ) -> Result<(), Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("validator") .push("aggregate_and_proofs"); - let response = self - .client - .post(path) - .timeout(self.timeouts.attestation) - .json(aggregates) - .send() - .await - .map_err(Error::Reqwest)?; - ok_or_indexed_error(response).await?; + self.post_with_timeout(path, &aggregates, self.timeouts.attestation) + .await?; Ok(()) } @@ -1178,7 +1288,7 @@ impl BeaconNodeHttpClient { &self, subscriptions: &[BeaconCommitteeSubscription], ) -> Result<(), Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -1190,12 +1300,29 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST validator/sync_committee_subscriptions` + pub async fn post_validator_sync_committee_subscriptions( + &self, + subscriptions: &[SyncCommitteeSubscription], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("sync_committee_subscriptions"); + + self.post(path, &subscriptions).await?; + + Ok(()) + } + /// `GET events?topics` pub async fn get_events( &self, topic: &[EventTopic], ) -> Result, Error>>, Error> { - let mut path = self.eth_path()?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("events"); @@ -1219,31 +1346,39 @@ impl BeaconNodeHttpClient { Err(e) => Err(Error::Reqwest(e)), })) } -} -/// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an -/// appropriate error message. -async fn ok_or_error(response: Response) -> Result { - let status = response.status(); + /// `POST validator/duties/sync/{epoch}` + pub async fn post_validator_duties_sync( + &self, + epoch: Epoch, + indices: &[u64], + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; - if status == StatusCode::OK { - Ok(response) - } else if let Ok(message) = response.json().await { - Err(Error::ServerMessage(message)) - } else { - Err(Error::StatusCode(status)) + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("duties") + .push("sync") + .push(&epoch.to_string()); + + self.post_with_timeout_and_response(path, &indices, self.timeouts.sync_duties) + .await } } /// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an -/// appropriate indexed error message. -async fn ok_or_indexed_error(response: Response) -> Result { +/// appropriate error message. +async fn ok_or_error(response: Response) -> Result { let status = response.status(); if status == StatusCode::OK { Ok(response) } else if let Ok(message) = response.json().await { - Err(Error::ServerIndexedMessage(message)) + match message { + ResponseError::Message(message) => Err(Error::ServerMessage(message)), + ResponseError::Indexed(indexed) => Err(Error::ServerIndexedMessage(indexed)), + } } else { Err(Error::StatusCode(status)) } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 264f6c58702..9a9f09ad66f 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -10,6 +10,14 @@ use std::fmt; use std::str::{from_utf8, FromStr}; pub use types::*; +/// An API error serializable to JSON. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] +pub enum Error { + Indexed(IndexedErrorMessage), + Message(ErrorMessage), +} + /// An API error serializable to JSON. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ErrorMessage { @@ -43,6 +51,30 @@ impl Failure { } } +/// The version of a single API endpoint, e.g. the `v1` in `/eth/v1/beacon/blocks`. +#[derive(Debug, Clone, Copy, PartialEq)] +pub struct EndpointVersion(pub u64); + +impl FromStr for EndpointVersion { + type Err = (); + + fn from_str(s: &str) -> Result { + if let Some(version_str) = s.strip_prefix('v') { + u64::from_str(version_str) + .map(EndpointVersion) + .map_err(|_| ()) + } else { + Err(()) + } + } +} + +impl std::fmt::Display for EndpointVersion { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(fmt, "v{}", self.0) + } +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct GenesisData { #[serde(with = "serde_utils::quoted_u64")] @@ -179,6 +211,14 @@ impl<'a, T: Serialize> From<&'a T> for GenericResponseRef<'a, T> { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +// #[serde(bound = "T: Serialize + serde::de::DeserializeOwned")] +pub struct ForkVersionedResponse { + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + pub data: T, +} + #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] pub struct RootData { pub root: Hash256, @@ -378,6 +418,11 @@ pub struct CommitteesQuery { pub epoch: Option, } +#[derive(Serialize, Deserialize)] +pub struct SyncCommitteesQuery { + pub epoch: Option, +} + #[derive(Serialize, Deserialize)] pub struct AttestationPoolQuery { pub slot: Option, @@ -399,6 +444,20 @@ pub struct CommitteeData { pub validators: Vec, } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct SyncCommitteeByValidatorIndices { + #[serde(with = "serde_utils::quoted_u64_vec")] + pub validators: Vec, + pub validator_aggregates: Vec, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct SyncSubcommittee { + #[serde(with = "serde_utils::quoted_u64_vec")] + pub indices: Vec, +} + #[derive(Serialize, Deserialize)] pub struct HeadersQuery { pub slot: Option, @@ -445,6 +504,7 @@ pub struct MetaData { #[serde(with = "serde_utils::quoted_u64")] pub seq_number: u64, pub attnets: String, + pub syncnets: String, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] diff --git a/common/rest_types/Cargo.toml b/common/rest_types/Cargo.toml deleted file mode 100644 index 3d4c70c1c33..00000000000 --- a/common/rest_types/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "rest_types" -version = "0.2.0" -authors = ["Sigma Prime "] -edition = "2018" - -[dependencies] -types = { path = "../../consensus/types" } -eth2_ssz_derive = "0.1.0" -eth2_ssz = "0.1.2" -eth2_hashing = "0.1.0" -tree_hash = "0.1.0" -state_processing = { path = "../../consensus/state_processing" } -bls = { path = "../../crypto/bls" } -serde = { version = "1.0.110", features = ["derive"] } -rayon = "1.3.0" -hyper = "0.14.4" -tokio = { version = "1.1.0", features = ["sync"] } -environment = { path = "../../lighthouse/environment" } -store = { path = "../../beacon_node/store" } -beacon_chain = { path = "../../beacon_node/beacon_chain" } -serde_json = "1.0.52" -serde_yaml = "0.8.11" - -[target.'cfg(target_os = "linux")'.dependencies] -psutil = "3.1.0" -procinfo = "0.4.2" diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 6f3d18d2802..cf420e01aa9 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -50,6 +50,7 @@ superstruct = "0.2.0" serde_json = "1.0.58" criterion = "0.3.3" beacon_chain = { path = "../../beacon_node/beacon_chain" } +eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } [features] default = ["sqlite", "legacy-arith"] diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 67caabd570f..27bed6f4973 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -65,101 +65,6 @@ impl BeaconBlock { } } - /// Return a block where the block has maximum size. - pub fn full(spec: &ChainSpec) -> BeaconBlock { - let header = BeaconBlockHeader { - slot: Slot::new(1), - proposer_index: 0, - parent_root: Hash256::zero(), - state_root: Hash256::zero(), - body_root: Hash256::zero(), - }; - - let signed_header = SignedBeaconBlockHeader { - message: header, - signature: Signature::empty(), - }; - let indexed_attestation: IndexedAttestation = IndexedAttestation { - attesting_indices: VariableList::new(vec![ - 0_u64; - T::MaxValidatorsPerCommittee::to_usize() - ]) - .unwrap(), - data: AttestationData::default(), - signature: AggregateSignature::empty(), - }; - - let deposit_data = DepositData { - pubkey: PublicKeyBytes::empty(), - withdrawal_credentials: Hash256::zero(), - amount: 0, - signature: SignatureBytes::empty(), - }; - let proposer_slashing = ProposerSlashing { - signed_header_1: signed_header.clone(), - signed_header_2: signed_header, - }; - - let attester_slashing = AttesterSlashing { - attestation_1: indexed_attestation.clone(), - attestation_2: indexed_attestation, - }; - - let attestation: Attestation = Attestation { - aggregation_bits: BitList::with_capacity(T::MaxValidatorsPerCommittee::to_usize()) - .unwrap(), - data: AttestationData::default(), - signature: AggregateSignature::empty(), - }; - - let deposit = Deposit { - proof: FixedVector::from_elem(Hash256::zero()), - data: deposit_data, - }; - - let voluntary_exit = VoluntaryExit { - epoch: Epoch::new(1), - validator_index: 1, - }; - - let signed_voluntary_exit = SignedVoluntaryExit { - message: voluntary_exit, - signature: Signature::empty(), - }; - - // FIXME(altair): use an Altair block (they're bigger) - let mut block = BeaconBlockBase::::empty(spec); - for _ in 0..T::MaxProposerSlashings::to_usize() { - block - .body - .proposer_slashings - .push(proposer_slashing.clone()) - .unwrap(); - } - for _ in 0..T::MaxDeposits::to_usize() { - block.body.deposits.push(deposit.clone()).unwrap(); - } - for _ in 0..T::MaxVoluntaryExits::to_usize() { - block - .body - .voluntary_exits - .push(signed_voluntary_exit.clone()) - .unwrap(); - } - for _ in 0..T::MaxAttesterSlashings::to_usize() { - block - .body - .attester_slashings - .push(attester_slashing.clone()) - .unwrap(); - } - - for _ in 0..T::MaxAttestations::to_usize() { - block.body.attestations.push(attestation.clone()).unwrap(); - } - BeaconBlock::Base(block) - } - /// Custom SSZ decoder that takes a `ChainSpec` as context. pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { let slot_len = ::ssz_fixed_len(); @@ -244,6 +149,27 @@ impl BeaconBlock { } impl<'a, T: EthSpec> BeaconBlockRef<'a, T> { + /// Returns the name of the fork pertaining to `self`. + /// + /// Will return an `Err` if `self` has been instantiated to a variant conflicting with the fork + /// dictated by `self.slot()`. + pub fn fork_name(&self, spec: &ChainSpec) -> Result { + let fork_at_slot = spec.fork_name_at_slot::(self.slot()); + let object_fork = match self { + BeaconBlockRef::Base { .. } => ForkName::Base, + BeaconBlockRef::Altair { .. } => ForkName::Altair, + }; + + if fork_at_slot == object_fork { + Ok(object_fork) + } else { + Err(InconsistentFork { + fork_at_slot, + object_fork, + }) + } + } + /// Convenience accessor for the `body` as a `BeaconBlockBodyRef`. pub fn body(&self) -> BeaconBlockBodyRef<'a, T> { match self { @@ -314,10 +240,104 @@ impl BeaconBlockBase { }, } } + + /// Return a block where the block has maximum size. + pub fn full(spec: &ChainSpec) -> Self { + let header = BeaconBlockHeader { + slot: Slot::new(1), + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body_root: Hash256::zero(), + }; + + let signed_header = SignedBeaconBlockHeader { + message: header, + signature: Signature::empty(), + }; + let indexed_attestation: IndexedAttestation = IndexedAttestation { + attesting_indices: VariableList::new(vec![ + 0_u64; + T::MaxValidatorsPerCommittee::to_usize() + ]) + .unwrap(), + data: AttestationData::default(), + signature: AggregateSignature::empty(), + }; + + let deposit_data = DepositData { + pubkey: PublicKeyBytes::empty(), + withdrawal_credentials: Hash256::zero(), + amount: 0, + signature: SignatureBytes::empty(), + }; + let proposer_slashing = ProposerSlashing { + signed_header_1: signed_header.clone(), + signed_header_2: signed_header, + }; + + let attester_slashing = AttesterSlashing { + attestation_1: indexed_attestation.clone(), + attestation_2: indexed_attestation, + }; + + let attestation: Attestation = Attestation { + aggregation_bits: BitList::with_capacity(T::MaxValidatorsPerCommittee::to_usize()) + .unwrap(), + data: AttestationData::default(), + signature: AggregateSignature::empty(), + }; + + let deposit = Deposit { + proof: FixedVector::from_elem(Hash256::zero()), + data: deposit_data, + }; + + let voluntary_exit = VoluntaryExit { + epoch: Epoch::new(1), + validator_index: 1, + }; + + let signed_voluntary_exit = SignedVoluntaryExit { + message: voluntary_exit, + signature: Signature::empty(), + }; + + let mut block = BeaconBlockBase::::empty(spec); + for _ in 0..T::MaxProposerSlashings::to_usize() { + block + .body + .proposer_slashings + .push(proposer_slashing.clone()) + .unwrap(); + } + for _ in 0..T::MaxDeposits::to_usize() { + block.body.deposits.push(deposit.clone()).unwrap(); + } + for _ in 0..T::MaxVoluntaryExits::to_usize() { + block + .body + .voluntary_exits + .push(signed_voluntary_exit.clone()) + .unwrap(); + } + for _ in 0..T::MaxAttesterSlashings::to_usize() { + block + .body + .attester_slashings + .push(attester_slashing.clone()) + .unwrap(); + } + + for _ in 0..T::MaxAttestations::to_usize() { + block.body.attestations.push(attestation.clone()).unwrap(); + } + block + } } impl BeaconBlockAltair { - /// Returns an empty block to be used during genesis. + /// Returns an empty Altair block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { BeaconBlockAltair { slot: spec.genesis_slot, @@ -341,6 +361,36 @@ impl BeaconBlockAltair { }, } } + + /// Return an Altair block where the block has maximum size. + pub fn full(spec: &ChainSpec) -> Self { + let base_block = BeaconBlockBase::full(spec); + let sync_aggregate = SyncAggregate { + sync_committee_signature: AggregateSignature::empty(), + sync_committee_bits: BitVector::default(), + }; + BeaconBlockAltair { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyAltair { + proposer_slashings: base_block.body.proposer_slashings, + attester_slashings: base_block.body.attester_slashings, + attestations: base_block.body.attestations, + deposits: base_block.body.deposits, + voluntary_exits: base_block.body.voluntary_exits, + sync_aggregate, + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + }, + } + } } #[cfg(test)] diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 54437969ca9..e62d53a1a2e 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -837,6 +837,32 @@ impl BeaconState { }) } + /// Get the sync committee duties for a list of validator indices. + /// + /// Will return a `SyncCommitteeNotKnown` error if the `epoch` is out of bounds with respect + /// to the current or next sync committee periods. + pub fn get_sync_committee_duties( + &self, + epoch: Epoch, + validator_indices: &[u64], + spec: &ChainSpec, + ) -> Result>, Error> { + let sync_committee = self.get_built_sync_committee(epoch, spec)?; + + validator_indices + .iter() + .map(|&validator_index| { + let pubkey = self.get_validator(validator_index as usize)?.pubkey; + + Ok(SyncDuty::from_sync_committee( + validator_index, + pubkey, + sync_committee, + )) + }) + .collect() + } + /// Get the canonical root of the `latest_block_header`, filling in its state root if necessary. /// /// It needs filling in on all slots where there isn't a skip. diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 7fbb4ea5f68..14320f871e4 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -148,26 +148,49 @@ impl ChainSpec { } /// Returns an `EnrForkId` for the given `slot`. - /// - /// Presently, we don't have any forks so we just ignore the slot. In the future this function - /// may return something different based upon the slot. - pub fn enr_fork_id(&self, _slot: Slot, genesis_validators_root: Hash256) -> EnrForkId { + pub fn enr_fork_id( + &self, + slot: Slot, + genesis_validators_root: Hash256, + ) -> EnrForkId { EnrForkId { - fork_digest: Self::compute_fork_digest( - self.genesis_fork_version, - genesis_validators_root, - ), - next_fork_version: self.genesis_fork_version, - next_fork_epoch: self.far_future_epoch, + fork_digest: self.fork_digest::(slot, genesis_validators_root), + next_fork_version: self.next_fork_version(), + next_fork_epoch: self + .next_fork_epoch::(slot) + .map(|(_, e)| e) + .unwrap_or(self.far_future_epoch), } } - /// Returns the epoch of the next scheduled change in the `fork.current_version`. + /// Returns the `ForkDigest` for the given slot. /// - /// There are no future forks scheduled so this function always returns `None`. This may not - /// always be the case in the future, though. - pub fn next_fork_epoch(&self) -> Option { - None + /// If `self.altair_fork_epoch == None`, then this function returns the genesis fork digest + /// otherwise, returns the fork digest based on the slot. + pub fn fork_digest(&self, slot: Slot, genesis_validators_root: Hash256) -> [u8; 4] { + let fork_name = self.fork_name_at_slot::(slot); + Self::compute_fork_digest( + self.fork_version_for_name(fork_name), + genesis_validators_root, + ) + } + + /// Returns the `next_fork_version`. + /// + /// Since `next_fork_version = current_fork_version` if no future fork is planned, + /// this function returns `altair_fork_version` until the next fork is planned. + pub fn next_fork_version(&self) -> [u8; 4] { + self.altair_fork_version + } + + /// Returns the epoch of the next scheduled fork along with its corresponding `ForkName`. + /// + /// If no future forks are scheduled, this function returns `None`. + pub fn next_fork_epoch(&self, slot: Slot) -> Option<(ForkName, Epoch)> { + let current_fork_name = self.fork_name_at_slot::(slot); + let next_fork_name = current_fork_name.next_fork()?; + let fork_epoch = self.fork_epoch(next_fork_name)?; + Some((next_fork_name, fork_epoch)) } /// Returns the name of the fork which is active at `slot`. @@ -662,6 +685,8 @@ where #[cfg(test)] mod tests { use super::*; + use itertools::Itertools; + use safe_arith::SafeArith; #[test] fn test_mainnet_spec_can_be_constructed() { @@ -722,6 +747,33 @@ mod tests { } } } + + // Test that `next_fork_epoch` is consistent with the other functions. + #[test] + fn next_fork_epoch_consistency() { + type E = MainnetEthSpec; + let spec = ChainSpec::mainnet(); + + let mut last_fork_slot = Slot::new(0); + + for (_, fork) in ForkName::list_all().into_iter().tuple_windows() { + if let Some(fork_epoch) = spec.fork_epoch(fork) { + last_fork_slot = fork_epoch.start_slot(E::slots_per_epoch()); + + // Fork is activated at non-zero epoch: check that `next_fork_epoch` returns + // the correct result. + if let Ok(prior_slot) = last_fork_slot.safe_sub(1) { + let (next_fork, next_fork_epoch) = + spec.next_fork_epoch::(prior_slot).unwrap(); + assert_eq!(fork, next_fork); + assert_eq!(spec.fork_epoch(fork).unwrap(), next_fork_epoch); + } + } else { + // Fork is not activated, check that `next_fork_epoch` returns `None`. + assert_eq!(spec.next_fork_epoch::(last_fork_slot), None); + } + } + } } #[cfg(test)] diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 3f59d2b905b..6e21edf9f6d 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -78,6 +78,8 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + * New in Altair */ type SyncCommitteeSize: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /// The number of `sync_committee` subnets. + type SyncCommitteeSubnetCount: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -218,6 +220,7 @@ impl EthSpec for MainnetEthSpec { type MaxDeposits = U16; type MaxVoluntaryExits = U16; type SyncCommitteeSize = U512; + type SyncCommitteeSubnetCount = U4; type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch @@ -250,6 +253,7 @@ impl EthSpec for MinimalEthSpec { params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, SubnetBitfieldLength, + SyncCommitteeSubnetCount, MaxValidatorsPerCommittee, GenesisEpoch, HistoricalRootsLimit, diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs new file mode 100644 index 00000000000..6da188570e8 --- /dev/null +++ b/consensus/types/src/fork_context.rs @@ -0,0 +1,91 @@ +use parking_lot::RwLock; + +use crate::{ChainSpec, EthSpec, ForkName, Hash256, Slot}; +use std::collections::HashMap; + +/// Provides fork specific info like the current fork name and the fork digests corresponding to every valid fork. +#[derive(Debug)] +pub struct ForkContext { + current_fork: RwLock, + fork_to_digest: HashMap, + digest_to_fork: HashMap<[u8; 4], ForkName>, +} + +impl ForkContext { + /// Creates a new `ForkContext` object by enumerating all enabled forks and computing their + /// fork digest. + /// + /// A fork is disabled in the `ChainSpec` if the activation slot corresponding to that fork is `None`. + pub fn new( + current_slot: Slot, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> Self { + let mut fork_to_digest = vec![( + ForkName::Base, + ChainSpec::compute_fork_digest(spec.genesis_fork_version, genesis_validators_root), + )]; + + // Only add Altair to list of forks if it's enabled (i.e. spec.altair_fork_epoch != None) + if spec.altair_fork_epoch.is_some() { + fork_to_digest.push(( + ForkName::Altair, + ChainSpec::compute_fork_digest(spec.altair_fork_version, genesis_validators_root), + )) + } + + let fork_to_digest: HashMap = fork_to_digest.into_iter().collect(); + + let digest_to_fork = fork_to_digest + .clone() + .into_iter() + .map(|(k, v)| (v, k)) + .collect(); + + Self { + current_fork: RwLock::new(spec.fork_name_at_slot::(current_slot)), + fork_to_digest, + digest_to_fork, + } + } + + /// Returns `true` if the provided `fork_name` exists in the `ForkContext` object. + pub fn fork_exists(&self, fork_name: ForkName) -> bool { + self.fork_to_digest.contains_key(&fork_name) + } + + /// Returns the `current_fork`. + pub fn current_fork(&self) -> ForkName { + *self.current_fork.read() + } + + /// Updates the `current_fork` field to a new fork. + pub fn update_current_fork(&self, new_fork: ForkName) { + *self.current_fork.write() = new_fork; + } + + /// Returns the context bytes/fork_digest corresponding to the genesis fork version. + pub fn genesis_context_bytes(&self) -> [u8; 4] { + *self + .fork_to_digest + .get(&ForkName::Base) + .expect("ForkContext must contain genesis context bytes") + } + + /// Returns the fork type given the context bytes/fork_digest. + /// Returns `None` if context bytes doesn't correspond to any valid `ForkName`. + pub fn from_context_bytes(&self, context: [u8; 4]) -> Option<&ForkName> { + self.digest_to_fork.get(&context) + } + + /// Returns the context bytes/fork_digest corresponding to a fork name. + /// Returns `None` if the `ForkName` has not been initialized. + pub fn to_context_bytes(&self, fork_name: ForkName) -> Option<[u8; 4]> { + self.fork_to_digest.get(&fork_name).cloned() + } + + /// Returns all `fork_digest`s that are currently in the `ForkContext` object. + pub fn all_fork_digests(&self) -> Vec<[u8; 4]> { + self.digest_to_fork.keys().cloned().collect() + } +} diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 4941073b67b..a464a0ed9ad 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -1,6 +1,12 @@ use crate::{ChainSpec, Epoch}; +use serde_derive::{Deserialize, Serialize}; +use std::convert::TryFrom; +use std::fmt::{self, Display, Formatter}; +use std::str::FromStr; -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(try_from = "String")] +#[serde(into = "String")] pub enum ForkName { Base, Altair, @@ -48,7 +54,7 @@ impl ForkName { } } -impl std::str::FromStr for ForkName { +impl FromStr for ForkName { type Err = (); fn from_str(fork_name: &str) -> Result { @@ -60,6 +66,29 @@ impl std::str::FromStr for ForkName { } } +impl Display for ForkName { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + match self { + ForkName::Base => "phase0".fmt(f), + ForkName::Altair => "altair".fmt(f), + } + } +} + +impl From for String { + fn from(fork: ForkName) -> String { + fork.to_string() + } +} + +impl TryFrom for ForkName { + type Error = String; + + fn try_from(s: String) -> Result { + Self::from_str(&s).map_err(|()| format!("Invalid fork name: {}", s)) + } +} + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct InconsistentFork { pub fork_at_slot: ForkName, diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 7df65cb269a..403544f007c 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -55,12 +55,15 @@ pub mod signed_beacon_block_header; pub mod signed_contribution_and_proof; pub mod signed_voluntary_exit; pub mod signing_data; +pub mod sync_committee_subscription; +pub mod sync_duty; pub mod validator; pub mod validator_subscription; pub mod voluntary_exit; #[macro_use] pub mod slot_epoch_macros; pub mod config_and_preset; +pub mod fork_context; pub mod participation_flags; pub mod participation_list; pub mod preset; @@ -107,6 +110,7 @@ pub use crate::enr_fork_id::EnrForkId; pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; pub use crate::fork::Fork; +pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; pub use crate::fork_name::{ForkName, InconsistentFork}; pub use crate::free_attestation::FreeAttestation; @@ -134,8 +138,10 @@ pub use crate::subnet_id::SubnetId; pub use crate::sync_aggregate::SyncAggregate; pub use crate::sync_aggregator_selection_data::SyncAggregatorSelectionData; pub use crate::sync_committee::SyncCommittee; -pub use crate::sync_committee_contribution::SyncCommitteeContribution; +pub use crate::sync_committee_contribution::{SyncCommitteeContribution, SyncContributionData}; pub use crate::sync_committee_message::SyncCommitteeMessage; +pub use crate::sync_committee_subscription::SyncCommitteeSubscription; +pub use crate::sync_duty::SyncDuty; pub use crate::sync_selection_proof::SyncSelectionProof; pub use crate::sync_subnet_id::SyncSubnetId; pub use crate::validator::Validator; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 1a9e93b8e2d..14c1ffb8cfe 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -71,20 +71,7 @@ impl SignedBeaconBlock { /// Will return an `Err` if `self` has been instantiated to a variant conflicting with the fork /// dictated by `self.slot()`. pub fn fork_name(&self, spec: &ChainSpec) -> Result { - let fork_at_slot = spec.fork_name_at_slot::(self.slot()); - let object_fork = match self { - SignedBeaconBlock::Base { .. } => ForkName::Base, - SignedBeaconBlock::Altair { .. } => ForkName::Altair, - }; - - if fork_at_slot == object_fork { - Ok(object_fork) - } else { - Err(InconsistentFork { - fork_at_slot, - object_fork, - }) - } + self.message().fork_name(spec) } /// SSZ decode. diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index a2934090be6..c8fce78a8bb 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -77,9 +77,9 @@ impl SignedRoot for Hash256 {} /// This is not in the spec, but useful for determining uniqueness of sync committee contributions #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct SyncContributionData { - slot: Slot, - beacon_block_root: Hash256, - subcommittee_index: u64, + pub slot: Slot, + pub beacon_block_root: Hash256, + pub subcommittee_index: u64, } impl SyncContributionData { diff --git a/consensus/types/src/sync_committee_subscription.rs b/consensus/types/src/sync_committee_subscription.rs new file mode 100644 index 00000000000..28732258753 --- /dev/null +++ b/consensus/types/src/sync_committee_subscription.rs @@ -0,0 +1,15 @@ +use crate::Epoch; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; + +/// A sync committee subscription created when a validator subscribes to sync committee subnets to perform +/// sync committee duties. +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] +pub struct SyncCommitteeSubscription { + /// The validators index. + pub validator_index: u64, + /// The sync committee indices. + pub sync_committee_indices: Vec, + /// Epoch until which this subscription is required. + pub until_epoch: Epoch, +} diff --git a/consensus/types/src/sync_duty.rs b/consensus/types/src/sync_duty.rs new file mode 100644 index 00000000000..e3ffe62bfd1 --- /dev/null +++ b/consensus/types/src/sync_duty.rs @@ -0,0 +1,83 @@ +use crate::{EthSpec, SyncCommittee, SyncSubnetId}; +use bls::PublicKeyBytes; +use safe_arith::ArithError; +use serde_derive::{Deserialize, Serialize}; +use std::collections::HashSet; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct SyncDuty { + pub pubkey: PublicKeyBytes, + #[serde(with = "serde_utils::quoted_u64")] + pub validator_index: u64, + #[serde(with = "serde_utils::quoted_u64_vec")] + pub validator_sync_committee_indices: Vec, +} + +impl SyncDuty { + /// Create a new `SyncDuty` from the list of validator indices in a sync committee. + pub fn from_sync_committee_indices( + validator_index: u64, + pubkey: PublicKeyBytes, + sync_committee_indices: &[usize], + ) -> Option { + // Positions of the `validator_index` within the committee. + let validator_sync_committee_indices = sync_committee_indices + .iter() + .enumerate() + .filter_map(|(i, &v)| { + if validator_index == v as u64 { + Some(i as u64) + } else { + None + } + }) + .collect(); + Self::new(validator_index, pubkey, validator_sync_committee_indices) + } + + /// Create a new `SyncDuty` from a `SyncCommittee`, which contains the pubkeys but not the + /// indices. + pub fn from_sync_committee( + validator_index: u64, + pubkey: PublicKeyBytes, + sync_committee: &SyncCommittee, + ) -> Option { + let validator_sync_committee_indices = sync_committee + .pubkeys + .iter() + .enumerate() + .filter_map(|(i, committee_pubkey)| { + if &pubkey == committee_pubkey { + Some(i as u64) + } else { + None + } + }) + .collect(); + Self::new(validator_index, pubkey, validator_sync_committee_indices) + } + + /// Create a duty if the `validator_sync_committee_indices` is non-empty. + fn new( + validator_index: u64, + pubkey: PublicKeyBytes, + validator_sync_committee_indices: Vec, + ) -> Option { + if !validator_sync_committee_indices.is_empty() { + Some(SyncDuty { + pubkey, + validator_index, + validator_sync_committee_indices, + }) + } else { + None + } + } + + /// Get the set of subnet IDs for this duty. + pub fn subnet_ids(&self) -> Result, ArithError> { + SyncSubnetId::compute_subnets_for_sync_committee::( + &self.validator_sync_committee_indices, + ) + } +} diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index fba0b2993ea..b15e49ed93e 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -1,6 +1,11 @@ //! Identifies each sync committee subnet by an integer identifier. use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; +use crate::EthSpec; +use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; +use ssz_types::typenum::Unsigned; +use std::collections::HashSet; +use std::fmt::{self, Display}; use std::ops::{Deref, DerefMut}; lazy_static! { @@ -33,6 +38,24 @@ impl SyncSubnetId { pub fn new(id: u64) -> Self { id.into() } + + /// Compute required subnets to subscribe to given the sync committee indices. + pub fn compute_subnets_for_sync_committee( + sync_committee_indices: &[u64], + ) -> Result, ArithError> { + let subcommittee_size = T::SyncSubcommitteeSize::to_u64(); + + sync_committee_indices + .iter() + .map(|index| index.safe_div(subcommittee_size).map(Self::new)) + .collect() + } +} + +impl Display for SyncSubnetId { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "{}", self.0) + } } impl Deref for SyncSubnetId { diff --git a/scripts/local_testnet/beacon_node.sh b/scripts/local_testnet/beacon_node.sh index bb75da8d3a9..5c286e31978 100755 --- a/scripts/local_testnet/beacon_node.sh +++ b/scripts/local_testnet/beacon_node.sh @@ -20,4 +20,5 @@ exec lighthouse \ --enr-udp-port $2 \ --enr-tcp-port $2 \ --port $2 \ - --http-port $3 + --http-port $3 \ + --target-peers $((NODE_COUNT - 1)) diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh index a171fb1b085..6cb13b9b93a 100755 --- a/scripts/local_testnet/setup.sh +++ b/scripts/local_testnet/setup.sh @@ -22,7 +22,7 @@ GENESIS_TIME=`expr $NOW + $GENESIS_DELAY` lcli \ - --spec mainnet \ + --spec $SPEC_PRESET \ new-testnet \ --deposit-contract-address $DEPOSIT_CONTRACT_ADDRESS \ --testnet-dir $TESTNET_DIR \ @@ -50,7 +50,7 @@ echo Validators generated with keystore passwords at $DATADIR. echo "Building genesis state... (this might take a while)" lcli \ - --spec mainnet \ + --spec $SPEC_PRESET \ interop-genesis \ --genesis-time $GENESIS_TIME \ --testnet-dir $TESTNET_DIR \ diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index 5c2ed22bd67..293a46d58d9 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -29,6 +29,9 @@ NETWORK_ID=4242 # Hard fork configuration ALTAIR_FORK_EPOCH=18446744073709551615 +# Spec version (mainnet or minimal) +SPEC_PRESET=mainnet + # Seconds per Eth2 slot SECONDS_PER_SLOT=3 diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 801a0954b03..0e259340212 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -64,6 +64,7 @@ scrypt = { version = "0.5.0", default-features = false } lighthouse_metrics = { path = "../common/lighthouse_metrics" } lazy_static = "1.4.0" fallback = { path = "../common/fallback" } +itertools = "0.10.0" monitoring_api = { path = "../common/monitoring_api" } sensitive_url = { path = "../common/sensitive_url" } task_executor = { path = "../common/task_executor" } diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index 50f127db5bf..841a12574be 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -18,7 +18,7 @@ use types::{ }; /// Builds an `AttestationService`. -pub struct AttestationServiceBuilder { +pub struct AttestationServiceBuilder { duties_service: Option>>, validator_store: Option>>, slot_clock: Option, diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 1a667d1cb29..6e5e5a546d0 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -6,6 +6,8 @@ //! The `DutiesService` is also responsible for sending events to the `BlockService` which trigger //! block production. +mod sync; + use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; use crate::{ block_service::BlockServiceNotification, @@ -20,6 +22,8 @@ use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::sync::Arc; +use sync::poll_sync_committee_duties; +use sync::SyncDutiesMap; use tokio::{sync::mpsc::Sender, time::sleep}; use types::{ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, SelectionProof, Slot}; @@ -40,6 +44,14 @@ pub enum Error { FailedToDownloadAttesters(String), FailedToProduceSelectionProof(ValidatorStoreError), InvalidModulo(ArithError), + Arith(ArithError), + SyncDutiesNotFound(u64), +} + +impl From for Error { + fn from(e: ArithError) -> Self { + Self::Arith(e) + } } /// Neatly joins the server-generated `AttesterData` with the locally-generated `selection_proof`. @@ -94,6 +106,8 @@ pub struct DutiesService { /// Maps an epoch to all *local* proposers in this epoch. Notably, this does not contain /// proposals for any validators which are not registered locally. pub proposers: RwLock, + /// Map from validator index to sync committee duties. + pub sync_duties: SyncDutiesMap, /// Provides the canonical list of locally-managed validators. pub validator_store: Arc>, /// Tracks the current slot. @@ -302,6 +316,37 @@ pub fn start_update_service( }, "duties_service_attesters", ); + + // Spawn the task which keeps track of local sync committee duties. + let duties_service = core_duties_service.clone(); + let log = core_duties_service.context.log().clone(); + core_duties_service.context.executor.spawn( + async move { + loop { + if let Err(e) = poll_sync_committee_duties(&duties_service).await { + error!( + log, + "Failed to poll sync committee duties"; + "error" => ?e + ); + } + + // Wait until the next slot before polling again. + // This doesn't mean that the beacon node will get polled every slot + // as the sync duties service will return early if it deems it already has + // enough information. + if let Some(duration) = duties_service.slot_clock.duration_to_next_slot() { + sleep(duration).await; + } else { + // Just sleep for one slot if we are unable to read the system clock, this gives + // us an opportunity for the clock to eventually come good. + sleep(duties_service.slot_clock.slot_duration()).await; + continue; + } + } + }, + "duties_service_sync_committee", + ); } /// Iterate through all the voting pubkeys in the `ValidatorStore` and attempt to learn any unknown diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/src/duties_service/sync.rs new file mode 100644 index 00000000000..919b237fd19 --- /dev/null +++ b/validator_client/src/duties_service/sync.rs @@ -0,0 +1,599 @@ +use crate::{ + doppelganger_service::DoppelgangerStatus, + duties_service::{DutiesService, Error}, +}; +use itertools::Itertools; +use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use slog::{crit, debug, info, warn}; +use slot_clock::SlotClock; +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; +use types::{ + ChainSpec, Epoch, EthSpec, PublicKeyBytes, Slot, SyncDuty, SyncSelectionProof, SyncSubnetId, +}; + +/// Number of epochs in advance to compute selection proofs. +pub const AGGREGATION_PRE_COMPUTE_EPOCHS: u64 = 2; + +/// Top-level data-structure containing sync duty information. +/// +/// This data is structured as a series of nested `HashMap`s wrapped in `RwLock`s. Fine-grained +/// locking is used to provide maximum concurrency for the different services reading and writing. +/// +/// Deadlocks are prevented by: +/// +/// 1. Hierarchical locking. It is impossible to lock an inner lock (e.g. `validators`) without +/// first locking its parent. +/// 2. One-at-a-time locking. For the innermost locks on the aggregator duties, all of the functions +/// in this file take care to only lock one validator at a time. We never hold a lock while +/// trying to obtain another one (hence no lock ordering issues). +pub struct SyncDutiesMap { + /// Map from sync committee period to duties for members of that sync committee. + committees: RwLock>, +} + +/// Duties for a single sync committee period. +#[derive(Default)] +pub struct CommitteeDuties { + /// Map from validator index to validator duties. + /// + /// A `None` value indicates that the validator index is known *not* to be a member of the sync + /// committee, while a `Some` indicates a known member. An absent value indicates that the + /// validator index was not part of the set of local validators when the duties were fetched. + /// This allows us to track changes to the set of local validators. + validators: RwLock>>, +} + +/// Duties for a single validator. +pub struct ValidatorDuties { + /// The sync duty: including validator sync committee indices & pubkey. + duty: SyncDuty, + /// The aggregator duties: cached selection proofs for upcoming epochs. + aggregation_duties: AggregatorDuties, +} + +/// Aggregator duties for a single validator. +pub struct AggregatorDuties { + /// The epoch up to which aggregation proofs have already been computed (inclusive). + pre_compute_epoch: RwLock>, + /// Map from slot & subnet ID to proof that this validator is an aggregator. + /// + /// The slot is the slot at which the signed contribution and proof should be broadcast, + /// which is 1 less than the slot for which the `duty` was computed. + proofs: RwLock>, +} + +/// Duties for multiple validators, for a single slot. +/// +/// This type is returned to the sync service. +pub struct SlotDuties { + /// List of duties for all sync committee members at this slot. + /// + /// Note: this is intentionally NOT split by subnet so that we only sign + /// one `SyncCommitteeMessage` per validator (recall a validator may be part of multiple + /// subnets). + pub duties: Vec, + /// Map from subnet ID to validator index and selection proof of each aggregator. + pub aggregators: HashMap>, +} + +impl Default for SyncDutiesMap { + fn default() -> Self { + Self { + committees: RwLock::new(HashMap::new()), + } + } +} + +impl SyncDutiesMap { + /// Check if duties are already known for all of the given validators for `committee_period`. + pub fn all_duties_known(&self, committee_period: u64, validator_indices: &[u64]) -> bool { + self.committees + .read() + .get(&committee_period) + .map_or(false, |committee_duties| { + let validator_duties = committee_duties.validators.read(); + validator_indices + .iter() + .all(|index| validator_duties.contains_key(index)) + }) + } + + /// Prepare for pre-computation of selection proofs for `committee_period`. + /// + /// Return the epoch up to which proofs should be pre-computed, as well as a vec of + /// `(previous_pre_compute_epoch, sync_duty)` pairs for all validators which need to have proofs + /// computed. See `fill_in_aggregation_proofs` for the actual calculation. + pub fn prepare_for_aggregator_pre_compute( + &self, + committee_period: u64, + current_epoch: Epoch, + spec: &ChainSpec, + ) -> (Epoch, Vec<(Epoch, SyncDuty)>) { + let default_start_epoch = + std::cmp::max(current_epoch, first_epoch_of_period(committee_period, spec)); + let pre_compute_epoch = std::cmp::min( + current_epoch + AGGREGATION_PRE_COMPUTE_EPOCHS, + last_epoch_of_period(committee_period, spec), + ); + + let pre_compute_duties = self.committees.read().get(&committee_period).map_or_else( + Vec::new, + |committee_duties| { + let validator_duties = committee_duties.validators.read(); + validator_duties + .values() + .filter_map(|maybe_duty| { + let duty = maybe_duty.as_ref()?; + let old_pre_compute_epoch = duty + .aggregation_duties + .pre_compute_epoch + .write() + .replace(pre_compute_epoch); + + match old_pre_compute_epoch { + // No proofs pre-computed previously, compute all from the start of + // the period or the current epoch (whichever is later). + None => Some((default_start_epoch, duty.duty.clone())), + // Proofs computed up to `prev`, start from the subsequent epoch. + Some(prev) if prev < pre_compute_epoch => { + Some((prev + 1, duty.duty.clone())) + } + // Proofs already known, no need to compute. + _ => None, + } + }) + .collect() + }, + ); + (pre_compute_epoch, pre_compute_duties) + } + + pub fn get_or_create_committee_duties<'a, 'b>( + &'a self, + committee_period: u64, + validator_indices: impl IntoIterator, + ) -> MappedRwLockReadGuard<'a, CommitteeDuties> { + let mut committees_writer = self.committees.write(); + + committees_writer + .entry(committee_period) + .or_insert_with(CommitteeDuties::default) + .init(validator_indices); + + // Return shared reference + RwLockReadGuard::map( + RwLockWriteGuard::downgrade(committees_writer), + |committees_reader| &committees_reader[&committee_period], + ) + } + + /// Get duties for all validators for the given `wall_clock_slot`. + /// + /// This is the entry-point for the sync committee service. + pub fn get_duties_for_slot( + &self, + wall_clock_slot: Slot, + spec: &ChainSpec, + ) -> Option { + // Sync duties lag their assigned slot by 1 + let duty_slot = wall_clock_slot + 1; + + let sync_committee_period = duty_slot + .epoch(E::slots_per_epoch()) + .sync_committee_period(spec) + .ok()?; + + let committees_reader = self.committees.read(); + let committee_duties = committees_reader.get(&sync_committee_period)?; + + let mut duties = vec![]; + let mut aggregators = HashMap::new(); + + committee_duties + .validators + .read() + .values() + // Filter out non-members & failed subnet IDs. + .filter_map(|opt_duties| { + let duty = opt_duties.as_ref()?; + let subnet_ids = duty.duty.subnet_ids::().ok()?; + Some((duty, subnet_ids)) + }) + // Add duties for members to the vec of all duties, and aggregators to the + // aggregators map. + .for_each(|(validator_duty, subnet_ids)| { + duties.push(validator_duty.duty.clone()); + + let proofs = validator_duty.aggregation_duties.proofs.read(); + + for subnet_id in subnet_ids { + if let Some(proof) = proofs.get(&(wall_clock_slot, subnet_id)) { + aggregators.entry(subnet_id).or_insert_with(Vec::new).push(( + validator_duty.duty.validator_index, + validator_duty.duty.pubkey, + proof.clone(), + )); + } + } + }); + + Some(SlotDuties { + duties, + aggregators, + }) + } + + /// Prune duties for past sync committee periods from the map. + pub fn prune(&self, current_sync_committee_period: u64) { + self.committees + .write() + .retain(|period, _| *period >= current_sync_committee_period) + } +} + +impl CommitteeDuties { + fn init<'b>(&mut self, validator_indices: impl IntoIterator) { + validator_indices.into_iter().for_each(|validator_index| { + self.validators + .get_mut() + .entry(*validator_index) + .or_insert(None); + }) + } +} + +impl ValidatorDuties { + fn new(duty: SyncDuty) -> Self { + Self { + duty, + aggregation_duties: AggregatorDuties { + pre_compute_epoch: RwLock::new(None), + proofs: RwLock::new(HashMap::new()), + }, + } + } +} + +/// Number of epochs to wait from the start of the period before actually fetching duties. +fn epoch_offset(spec: &ChainSpec) -> u64 { + spec.epochs_per_sync_committee_period.as_u64() / 2 +} + +fn first_epoch_of_period(sync_committee_period: u64, spec: &ChainSpec) -> Epoch { + spec.epochs_per_sync_committee_period * sync_committee_period +} + +fn last_epoch_of_period(sync_committee_period: u64, spec: &ChainSpec) -> Epoch { + first_epoch_of_period(sync_committee_period + 1, spec) - 1 +} + +pub async fn poll_sync_committee_duties( + duties_service: &Arc>, +) -> Result<(), Error> { + let sync_duties = &duties_service.sync_duties; + let spec = &duties_service.spec; + let current_epoch = duties_service + .slot_clock + .now() + .ok_or(Error::UnableToReadSlotClock)? + .epoch(E::slots_per_epoch()); + + // If the Altair fork is yet to be activated, do not attempt to poll for duties. + if spec + .altair_fork_epoch + .map_or(true, |altair_epoch| current_epoch < altair_epoch) + { + return Ok(()); + } + + let current_sync_committee_period = current_epoch.sync_committee_period(spec)?; + let next_sync_committee_period = current_sync_committee_period + 1; + + // Collect *all* pubkeys, even those undergoing doppelganger protection. + // + // Sync committee messages are not slashable and are currently excluded from doppelganger + // protection. + let local_pubkeys: HashSet<_> = duties_service + .validator_store + .voting_pubkeys(DoppelgangerStatus::ignored); + + let local_indices = { + let mut local_indices = Vec::with_capacity(local_pubkeys.len()); + + let vals_ref = duties_service.validator_store.initialized_validators(); + let vals = vals_ref.read(); + for &pubkey in &local_pubkeys { + if let Some(validator_index) = vals.get_index(&pubkey) { + local_indices.push(validator_index) + } + } + local_indices + }; + + // If duties aren't known for the current period, poll for them. + if !sync_duties.all_duties_known(current_sync_committee_period, &local_indices) { + poll_sync_committee_duties_for_period( + duties_service, + &local_indices, + current_sync_committee_period, + ) + .await?; + + // Prune previous duties (we avoid doing this too often as it locks the whole map). + sync_duties.prune(current_sync_committee_period); + } + + // Pre-compute aggregator selection proofs for the current period. + let (current_pre_compute_epoch, new_pre_compute_duties) = sync_duties + .prepare_for_aggregator_pre_compute(current_sync_committee_period, current_epoch, spec); + + if !new_pre_compute_duties.is_empty() { + let sub_duties_service = duties_service.clone(); + duties_service.context.executor.spawn_blocking( + move || { + fill_in_aggregation_proofs( + sub_duties_service, + &new_pre_compute_duties, + current_sync_committee_period, + current_epoch, + current_pre_compute_epoch, + ) + }, + "duties_service_sync_selection_proofs", + ); + } + + // If we're past the point in the current period where we should determine duties for the next + // period and they are not yet known, then poll. + if current_epoch.as_u64() % spec.epochs_per_sync_committee_period.as_u64() >= epoch_offset(spec) + && !sync_duties.all_duties_known(next_sync_committee_period, &local_indices) + { + poll_sync_committee_duties_for_period( + duties_service, + &local_indices, + next_sync_committee_period, + ) + .await?; + + // Prune (this is the main code path for updating duties, so we should almost always hit + // this prune). + sync_duties.prune(current_sync_committee_period); + } + + // Pre-compute aggregator selection proofs for the next period. + if (current_epoch + AGGREGATION_PRE_COMPUTE_EPOCHS).sync_committee_period(spec)? + == next_sync_committee_period + { + let (pre_compute_epoch, new_pre_compute_duties) = sync_duties + .prepare_for_aggregator_pre_compute(next_sync_committee_period, current_epoch, spec); + + if !new_pre_compute_duties.is_empty() { + let sub_duties_service = duties_service.clone(); + duties_service.context.executor.spawn_blocking( + move || { + fill_in_aggregation_proofs( + sub_duties_service, + &new_pre_compute_duties, + next_sync_committee_period, + current_epoch, + pre_compute_epoch, + ) + }, + "duties_service_sync_selection_proofs", + ); + } + } + + Ok(()) +} + +pub async fn poll_sync_committee_duties_for_period( + duties_service: &Arc>, + local_indices: &[u64], + sync_committee_period: u64, +) -> Result<(), Error> { + let spec = &duties_service.spec; + let log = duties_service.context.log(); + + debug!( + log, + "Fetching sync committee duties"; + "sync_committee_period" => sync_committee_period, + "num_validators" => local_indices.len(), + ); + + let period_start_epoch = spec.epochs_per_sync_committee_period * sync_committee_period; + + let duties_response = duties_service + .beacon_nodes + .first_success(duties_service.require_synced, |beacon_node| async move { + beacon_node + .post_validator_duties_sync(period_start_epoch, local_indices) + .await + }) + .await; + + let duties = match duties_response { + Ok(res) => res.data, + Err(e) => { + warn!( + log, + "Failed to download sync committee duties"; + "sync_committee_period" => sync_committee_period, + "error" => %e, + ); + return Ok(()); + } + }; + + debug!(log, "Fetched sync duties from BN"; "count" => duties.len()); + + // Add duties to map. + let committee_duties = duties_service + .sync_duties + .get_or_create_committee_duties(sync_committee_period, local_indices); + + let mut validator_writer = committee_duties.validators.write(); + for duty in duties { + let validator_duties = validator_writer + .get_mut(&duty.validator_index) + .ok_or(Error::SyncDutiesNotFound(duty.validator_index))?; + + let updated = validator_duties.as_ref().map_or(true, |existing_duties| { + let updated_due_to_reorg = existing_duties.duty.validator_sync_committee_indices + != duty.validator_sync_committee_indices; + if updated_due_to_reorg { + warn!( + log, + "Sync committee duties changed"; + "message" => "this could be due to a really long re-org, or a bug" + ); + } + updated_due_to_reorg + }); + + if updated { + info!( + log, + "Validator in sync committee"; + "validator_index" => duty.validator_index, + "sync_committee_period" => sync_committee_period, + ); + + *validator_duties = Some(ValidatorDuties::new(duty)); + } + } + + Ok(()) +} + +pub fn fill_in_aggregation_proofs( + duties_service: Arc>, + pre_compute_duties: &[(Epoch, SyncDuty)], + sync_committee_period: u64, + current_epoch: Epoch, + pre_compute_epoch: Epoch, +) { + let log = duties_service.context.log(); + + debug!( + log, + "Calculating sync selection proofs"; + "period" => sync_committee_period, + "current_epoch" => current_epoch, + "pre_compute_epoch" => pre_compute_epoch + ); + + // Generate selection proofs for each validator at each slot, one epoch at a time. + for epoch in (current_epoch.as_u64()..=pre_compute_epoch.as_u64()).map(Epoch::new) { + // Generate proofs. + let validator_proofs: Vec<(u64, Vec<_>)> = pre_compute_duties + .iter() + .filter_map(|(validator_start_epoch, duty)| { + // Proofs are already known at this epoch for this validator. + if epoch < *validator_start_epoch { + return None; + } + + let subnet_ids = duty + .subnet_ids::() + .map_err(|e| { + crit!( + log, + "Arithmetic error computing subnet IDs"; + "error" => ?e, + ); + }) + .ok()?; + + let proofs = epoch + .slot_iter(E::slots_per_epoch()) + .cartesian_product(&subnet_ids) + .filter_map(|(duty_slot, &subnet_id)| { + // Construct proof for prior slot. + let slot = duty_slot - 1; + + let proof = duties_service + .validator_store + .produce_sync_selection_proof(&duty.pubkey, slot, subnet_id) + .map_err(|_| { + warn!( + log, + "Pubkey missing when signing selection proof"; + "pubkey" => ?duty.pubkey, + "slot" => slot, + ); + }) + .ok()?; + + let is_aggregator = proof + .is_aggregator::() + .map_err(|e| { + warn!( + log, + "Error determining is_aggregator"; + "pubkey" => ?duty.pubkey, + "slot" => slot, + "error" => ?e, + ); + }) + .ok()?; + + if is_aggregator { + debug!( + log, + "Validator is sync aggregator"; + "validator_index" => duty.validator_index, + "slot" => slot, + "subnet_id" => %subnet_id, + ); + Some(((slot, subnet_id), proof)) + } else { + None + } + }) + .collect(); + + Some((duty.validator_index, proofs)) + }) + .collect(); + + // Add to global storage (we add regularly so the proofs can be used ASAP). + let sync_map = duties_service.sync_duties.committees.read(); + let committee_duties = if let Some(duties) = sync_map.get(&sync_committee_period) { + duties + } else { + debug!( + log, + "Missing sync duties"; + "period" => sync_committee_period, + ); + continue; + }; + let validators = committee_duties.validators.read(); + let num_validators_updated = validator_proofs.len(); + + for (validator_index, proofs) in validator_proofs { + if let Some(Some(duty)) = validators.get(&validator_index) { + duty.aggregation_duties.proofs.write().extend(proofs); + } else { + debug!( + log, + "Missing sync duty to update"; + "validator_index" => validator_index, + "period" => sync_committee_period, + ); + } + } + + if num_validators_updated > 0 { + debug!( + log, + "Finished computing sync selection proofs"; + "epoch" => epoch, + "updated_validators" => num_validators_updated, + ); + } + } +} diff --git a/validator_client/src/fork_service.rs b/validator_client/src/fork_service.rs deleted file mode 100644 index f5d39e397c9..00000000000 --- a/validator_client/src/fork_service.rs +++ /dev/null @@ -1,222 +0,0 @@ -use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; -use crate::http_metrics::metrics; -use environment::RuntimeContext; -use eth2::types::StateId; -use parking_lot::RwLock; -use slog::{debug, trace}; -use slog::{error, Logger}; -use slot_clock::SlotClock; -use std::ops::Deref; -use std::sync::Arc; -use tokio::time::{sleep, Duration}; -use types::{EthSpec, Fork}; - -/// Delay this period of time after the slot starts. This allows the node to process the new slot. -const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(80); - -/// Builds a `ForkService`. -pub struct ForkServiceBuilder { - fork: Option, - slot_clock: Option, - beacon_nodes: Option>>, - log: Option, -} - -impl ForkServiceBuilder { - pub fn new() -> Self { - Self { - fork: None, - slot_clock: None, - beacon_nodes: None, - log: None, - } - } - - pub fn fork(mut self, fork: Fork) -> Self { - self.fork = Some(fork); - self - } - - pub fn slot_clock(mut self, slot_clock: T) -> Self { - self.slot_clock = Some(slot_clock); - self - } - - pub fn beacon_nodes(mut self, beacon_nodes: Arc>) -> Self { - self.beacon_nodes = Some(beacon_nodes); - self - } - - pub fn log(mut self, log: Logger) -> Self { - self.log = Some(log); - self - } - - pub fn build(self) -> Result, String> { - Ok(ForkService { - inner: Arc::new(Inner { - fork: RwLock::new(self.fork.ok_or("Cannot build ForkService without fork")?), - slot_clock: self - .slot_clock - .ok_or("Cannot build ForkService without slot_clock")?, - beacon_nodes: self - .beacon_nodes - .ok_or("Cannot build ForkService without beacon_node")?, - log: self - .log - .ok_or("Cannot build ForkService without logger")? - .clone(), - }), - }) - } -} - -#[cfg(test)] -#[allow(dead_code)] -impl ForkServiceBuilder { - pub fn testing_only(spec: types::ChainSpec, log: Logger) -> Self { - use crate::beacon_node_fallback::CandidateBeaconNode; - - let slot_clock = slot_clock::TestingSlotClock::new( - types::Slot::new(0), - std::time::Duration::from_secs(42), - std::time::Duration::from_secs(42), - ); - let candidates = vec![CandidateBeaconNode::new(eth2::BeaconNodeHttpClient::new( - sensitive_url::SensitiveUrl::parse("http://127.0.0.1").unwrap(), - eth2::Timeouts::set_all(Duration::from_secs(12)), - ))]; - let mut beacon_nodes = BeaconNodeFallback::new(candidates, spec, log.clone()); - beacon_nodes.set_slot_clock(slot_clock); - - Self { - fork: Some(types::Fork::default()), - slot_clock: Some(slot_clock::TestingSlotClock::new( - types::Slot::new(0), - std::time::Duration::from_secs(42), - std::time::Duration::from_secs(42), - )), - beacon_nodes: Some(Arc::new(beacon_nodes)), - log: Some(log), - } - } -} - -/// Helper to minimise `Arc` usage. -pub struct Inner { - fork: RwLock, - beacon_nodes: Arc>, - log: Logger, - slot_clock: T, -} - -/// Attempts to download the `Fork` struct from the beacon node at the start of each epoch. -pub struct ForkService { - inner: Arc>, -} - -impl Clone for ForkService { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - } - } -} - -impl Deref for ForkService { - type Target = Inner; - - fn deref(&self) -> &Self::Target { - self.inner.deref() - } -} - -impl ForkService { - /// Returns the last fork downloaded from the beacon node, if any. - pub fn fork(&self) -> Fork { - *self.fork.read() - } - - /// Returns the slot clock. - pub fn slot_clock(&self) -> T { - self.slot_clock.clone() - } - - /// Starts the service that periodically polls for the `Fork`. - pub fn start_update_service(self, context: &RuntimeContext) -> Result<(), String> { - // Run an immediate update before starting the updater service. - context - .executor - .spawn_ignoring_error(self.clone().do_update(), "fork service update"); - - let executor = context.executor.clone(); - let log = context.log().clone(); - let spec = E::default_spec(); - - let interval_fut = async move { - loop { - // Run this poll before the wait, this should hopefully download the fork before the - // other services need them. - self.clone().do_update().await.ok(); - - if let Some(duration_to_next_epoch) = - self.slot_clock.duration_to_next_epoch(E::slots_per_epoch()) - { - sleep(duration_to_next_epoch + TIME_DELAY_FROM_SLOT).await; - } else { - error!(log, "Failed to read slot clock"); - // If we can't read the slot clock, just wait another slot. - sleep(Duration::from_secs(spec.seconds_per_slot)).await; - continue; - } - } - }; - - executor.spawn(interval_fut, "fork_service"); - - Ok(()) - } - - /// Attempts to download the `Fork` from the server. - async fn do_update(self) -> Result<(), ()> { - let _timer = - metrics::start_timer_vec(&metrics::FORK_SERVICE_TIMES, &[metrics::FULL_UPDATE]); - - let log = &self.log; - let fork = self - .inner - .beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - beacon_node - .get_beacon_states_fork(StateId::Head) - .await - .map_err(|e| { - trace!( - log, - "Fork update failed"; - "error" => format!("Error retrieving fork: {:?}", e) - ) - })? - .ok_or_else(|| { - trace!( - log, - "Fork update failed"; - "error" => "The beacon head fork is unknown" - ) - }) - .map(|result| result.data) - }) - .await - .map_err(|_| ())?; - - if *(self.fork.read()) != fork { - *(self.fork.write()) = fork; - } - - debug!(self.log, "Fork update success"); - - // Returning an error will stop the interval. This is not desired, a single failure - // should not stop all future attempts. - Ok(()) - } -} diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index fd8b2b9e730..a3cda8c6601 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -4,7 +4,7 @@ use crate::doppelganger_service::DoppelgangerService; use crate::{ http_api::{ApiSecret, Config as HttpConfig, Context}, - Config, ForkServiceBuilder, InitializedValidators, ValidatorDefinitions, ValidatorStore, + Config, InitializedValidators, ValidatorDefinitions, ValidatorStore, }; use account_utils::{ eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, @@ -17,10 +17,11 @@ use eth2_keystore::KeystoreBuilder; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; -use slot_clock::TestingSlotClock; +use slot_clock::{SlotClock, TestingSlotClock}; use std::marker::PhantomData; use std::net::Ipv4Addr; use std::sync::Arc; +use std::time::Duration; use tempfile::{tempdir, TempDir}; use tokio::runtime::Runtime; use tokio::sync::oneshot; @@ -73,20 +74,19 @@ impl ApiTester { let spec = E::default_spec(); - let fork_service = ForkServiceBuilder::testing_only(spec.clone(), log.clone()) - .build() - .unwrap(); - let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); - let validator_store: ValidatorStore = ValidatorStore::new( + let slot_clock = + TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); + + let validator_store = ValidatorStore::<_, E>::new( initialized_validators, slashing_protection, Hash256::repeat_byte(42), spec, - fork_service.clone(), Some(Arc::new(DoppelgangerService::new(log.clone()))), + slot_clock, log.clone(), ); @@ -96,7 +96,7 @@ impl ApiTester { let initialized_validators = validator_store.initialized_validators(); - let context: Arc> = Arc::new(Context { + let context = Arc::new(Context { runtime, api_secret, validator_dir: Some(validator_dir.path().into()), diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 120e6c2c7e6..db8eb821241 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -67,16 +67,26 @@ lazy_static::lazy_static! { "Total count of attempted SelectionProof signings", &["status"] ); + pub static ref SIGNED_SYNC_COMMITTEE_MESSAGES_TOTAL: Result = try_create_int_counter_vec( + "vc_signed_sync_committee_messages_total", + "Total count of attempted SyncCommitteeMessage signings", + &["status"] + ); + pub static ref SIGNED_SYNC_COMMITTEE_CONTRIBUTIONS_TOTAL: Result = try_create_int_counter_vec( + "vc_signed_sync_committee_contributions_total", + "Total count of attempted ContributionAndProof signings", + &["status"] + ); + pub static ref SIGNED_SYNC_SELECTION_PROOFS_TOTAL: Result = try_create_int_counter_vec( + "vc_signed_sync_selection_proofs_total", + "Total count of attempted SyncSelectionProof signings", + &["status"] + ); pub static ref DUTIES_SERVICE_TIMES: Result = try_create_histogram_vec( "vc_duties_service_task_times_seconds", "Duration to perform duties service tasks", &["task"] ); - pub static ref FORK_SERVICE_TIMES: Result = try_create_histogram_vec( - "vc_fork_service_task_times_seconds", - "Duration to perform fork service tasks", - &["task"] - ); pub static ref ATTESTATION_SERVICE_TIMES: Result = try_create_histogram_vec( "vc_attestation_service_task_times_seconds", "Duration to perform attestation service tasks", diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index be9a27db7b9..bfdc183d436 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -5,12 +5,12 @@ mod check_synced; mod cli; mod config; mod duties_service; -mod fork_service; mod graffiti_file; mod http_metrics; mod initialized_validators; mod key_cache; mod notifier; +mod sync_committee_service; mod validator_store; mod doppelganger_service; @@ -31,9 +31,7 @@ use block_service::{BlockService, BlockServiceBuilder}; use clap::ArgMatches; use duties_service::DutiesService; use environment::RuntimeContext; -use eth2::types::StateId; use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, StatusCode, Timeouts}; -use fork_service::{ForkService, ForkServiceBuilder}; use http_api::ApiSecret; use initialized_validators::InitializedValidators; use notifier::spawn_notifier; @@ -46,11 +44,12 @@ use std::marker::PhantomData; use std::net::SocketAddr; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; +use sync_committee_service::SyncCommitteeService; use tokio::{ sync::mpsc, time::{sleep, Duration}, }; -use types::{EthSpec, Fork, Hash256}; +use types::{EthSpec, Hash256}; use validator_store::ValidatorStore; /// The interval between attempts to contact the beacon node during startup. @@ -66,6 +65,7 @@ const HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_LIVENESS_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_PROPOSAL_TIMEOUT_QUOTIENT: u32 = 2; const HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger"; @@ -73,9 +73,9 @@ const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger"; pub struct ProductionValidatorClient { context: RuntimeContext, duties_service: Arc>, - fork_service: ForkService, block_service: BlockService, attestation_service: AttestationService, + sync_committee_service: SyncCommitteeService, doppelganger_service: Option>, validator_store: Arc>, http_api_listen_addr: Option, @@ -263,6 +263,7 @@ impl ProductionValidatorClient { liveness: slot_duration / HTTP_LIVENESS_TIMEOUT_QUOTIENT, proposal: slot_duration / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, proposer_duties: slot_duration / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, + sync_duties: slot_duration / HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT, } } else { Timeouts::set_all(slot_duration) @@ -293,7 +294,7 @@ impl ProductionValidatorClient { BeaconNodeFallback::new(candidates, context.eth2_config.spec.clone(), log.clone()); // Perform some potentially long-running initialization tasks. - let (genesis_time, genesis_validators_root, fork) = tokio::select! { + let (genesis_time, genesis_validators_root) = tokio::select! { tuple = init_from_beacon_node(&beacon_nodes, &context) => tuple?, () = context.executor.exit() => return Err("Shutting down".to_string()) }; @@ -313,13 +314,6 @@ impl ProductionValidatorClient { let beacon_nodes = Arc::new(beacon_nodes); start_fallback_updater_service(context.clone(), beacon_nodes.clone())?; - let fork_service = ForkServiceBuilder::new() - .fork(fork) - .slot_clock(slot_clock.clone()) - .beacon_nodes(beacon_nodes.clone()) - .log(log.clone()) - .build()?; - let doppelganger_service = if config.enable_doppelganger_protection { Some(Arc::new(DoppelgangerService::new( context @@ -331,16 +325,15 @@ impl ProductionValidatorClient { None }; - let validator_store: Arc> = - Arc::new(ValidatorStore::new( - validators, - slashing_protection, - genesis_validators_root, - context.eth2_config.spec.clone(), - fork_service.clone(), - doppelganger_service.clone(), - log.clone(), - )); + let validator_store = Arc::new(ValidatorStore::new( + validators, + slashing_protection, + genesis_validators_root, + context.eth2_config.spec.clone(), + doppelganger_service.clone(), + slot_clock.clone(), + log.clone(), + )); info!( log, @@ -359,6 +352,7 @@ impl ProductionValidatorClient { let duties_service = Arc::new(DutiesService { attesters: <_>::default(), proposers: <_>::default(), + sync_duties: <_>::default(), slot_clock: slot_clock.clone(), beacon_nodes: beacon_nodes.clone(), validator_store: validator_store.clone(), @@ -394,6 +388,14 @@ impl ProductionValidatorClient { .runtime_context(context.service_context("attestation".into())) .build()?; + let sync_committee_service = SyncCommitteeService::new( + duties_service.clone(), + validator_store.clone(), + slot_clock, + beacon_nodes.clone(), + context.service_context("sync_committee".into()), + ); + // Wait until genesis has occured. // // It seems most sensible to move this into the `start_service` function, but I'm caution @@ -406,9 +408,9 @@ impl ProductionValidatorClient { Ok(Self { context, duties_service, - fork_service, block_service, attestation_service, + sync_committee_service, doppelganger_service, validator_store, config, @@ -427,11 +429,6 @@ impl ProductionValidatorClient { duties_service::start_update_service(self.duties_service.clone(), block_service_tx); - self.fork_service - .clone() - .start_update_service(&self.context) - .map_err(|e| format!("Unable to start fork service: {}", e))?; - self.block_service .clone() .start_update_service(block_service_rx) @@ -442,6 +439,11 @@ impl ProductionValidatorClient { .start_update_service(&self.context.eth2_config.spec) .map_err(|e| format!("Unable to start attestation service: {}", e))?; + self.sync_committee_service + .clone() + .start_update_service(&self.context.eth2_config.spec) + .map_err(|e| format!("Unable to start sync committee service: {}", e))?; + if let Some(doppelganger_service) = self.doppelganger_service.clone() { DoppelgangerService::start_update_service( doppelganger_service, @@ -461,7 +463,7 @@ impl ProductionValidatorClient { let api_secret = ApiSecret::create_or_open(&self.config.validator_dir)?; self.http_api_listen_addr = if self.config.http_api.enabled { - let ctx: Arc> = Arc::new(http_api::Context { + let ctx = Arc::new(http_api::Context { runtime: self.context.executor.runtime(), api_secret, validator_store: Some(self.validator_store.clone()), @@ -495,7 +497,7 @@ impl ProductionValidatorClient { async fn init_from_beacon_node( beacon_nodes: &BeaconNodeFallback, context: &RuntimeContext, -) -> Result<(u64, Hash256, Fork), String> { +) -> Result<(u64, Hash256), String> { loop { beacon_nodes.update_unready_candidates().await; let num_available = beacon_nodes.num_available().await; @@ -554,33 +556,7 @@ async fn init_from_beacon_node( sleep(RETRY_DELAY).await; }; - let fork = loop { - match beacon_nodes - .first_success(RequireSynced::No, |node| async move { - node.get_beacon_states_fork(StateId::Head).await - }) - .await - { - Ok(Some(fork)) => break fork.data, - Ok(None) => { - info!( - context.log(), - "Failed to get fork, state not found"; - ); - } - Err(errors) => { - error!( - context.log(), - "Failed to get fork"; - "error" => %errors - ); - } - } - - sleep(RETRY_DELAY).await; - }; - - Ok((genesis.genesis_time, genesis.genesis_validators_root, fork)) + Ok((genesis.genesis_time, genesis.genesis_validators_root)) } async fn wait_for_genesis( diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs new file mode 100644 index 00000000000..33110ed2e82 --- /dev/null +++ b/validator_client/src/sync_committee_service.rs @@ -0,0 +1,537 @@ +use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; +use crate::{duties_service::DutiesService, validator_store::ValidatorStore}; +use environment::RuntimeContext; +use eth2::types::BlockId; +use futures::future::FutureExt; +use slog::{crit, debug, error, info, trace, warn}; +use slot_clock::SlotClock; +use std::collections::HashMap; +use std::ops::Deref; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use tokio::time::{sleep, sleep_until, Duration, Instant}; +use types::{ + ChainSpec, EthSpec, Hash256, PublicKeyBytes, Slot, SyncCommitteeSubscription, + SyncContributionData, SyncDuty, SyncSelectionProof, SyncSubnetId, +}; + +pub const SUBSCRIPTION_LOOKAHEAD_EPOCHS: u64 = 4; + +pub struct SyncCommitteeService { + inner: Arc>, +} + +impl Clone for SyncCommitteeService { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +impl Deref for SyncCommitteeService { + type Target = Inner; + + fn deref(&self) -> &Self::Target { + self.inner.deref() + } +} + +pub struct Inner { + duties_service: Arc>, + validator_store: Arc>, + slot_clock: T, + beacon_nodes: Arc>, + context: RuntimeContext, + /// Boolean to track whether the service has posted subscriptions to the BN at least once. + /// + /// This acts as a latch that fires once upon start-up, and then never again. + first_subscription_done: AtomicBool, +} + +impl SyncCommitteeService { + pub fn new( + duties_service: Arc>, + validator_store: Arc>, + slot_clock: T, + beacon_nodes: Arc>, + context: RuntimeContext, + ) -> Self { + Self { + inner: Arc::new(Inner { + duties_service, + validator_store, + slot_clock, + beacon_nodes, + context, + first_subscription_done: AtomicBool::new(false), + }), + } + } + + /// Check if the Altair fork has been activated and therefore sync duties should be performed. + /// + /// Slot clock errors are mapped to `false`. + fn altair_fork_activated(&self) -> bool { + self.duties_service + .spec + .altair_fork_epoch + .and_then(|fork_epoch| { + let current_epoch = self.slot_clock.now()?.epoch(E::slots_per_epoch()); + Some(current_epoch >= fork_epoch) + }) + .unwrap_or(false) + } + + pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> { + let log = self.context.log().clone(); + let slot_duration = Duration::from_secs(spec.seconds_per_slot); + let duration_to_next_slot = self + .slot_clock + .duration_to_next_slot() + .ok_or("Unable to determine duration to next slot")?; + + info!( + log, + "Sync committee service started"; + "next_update_millis" => duration_to_next_slot.as_millis() + ); + + let executor = self.context.executor.clone(); + + let interval_fut = async move { + loop { + if let Some(duration_to_next_slot) = self.slot_clock.duration_to_next_slot() { + // Wait for contribution broadcast interval 1/3 of the way through the slot. + let log = self.context.log(); + sleep(duration_to_next_slot + slot_duration / 3).await; + + // Do nothing if the Altair fork has not yet occurred. + if !self.altair_fork_activated() { + continue; + } + + if let Err(e) = self.spawn_contribution_tasks(slot_duration).await { + crit!( + log, + "Failed to spawn sync contribution tasks"; + "error" => e + ) + } else { + trace!( + log, + "Spawned sync contribution tasks"; + ) + } + + // Do subscriptions for future slots/epochs. + self.spawn_subscription_tasks(); + } else { + error!(log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(slot_duration).await; + } + } + }; + + executor.spawn(interval_fut, "sync_committee_service"); + Ok(()) + } + + async fn spawn_contribution_tasks(&self, slot_duration: Duration) -> Result<(), String> { + let log = self.context.log().clone(); + let slot = self.slot_clock.now().ok_or("Failed to read slot clock")?; + let duration_to_next_slot = self + .slot_clock + .duration_to_next_slot() + .ok_or("Unable to determine duration to next slot")?; + + // If a validator needs to publish a sync aggregate, they must do so at 2/3 + // through the slot. This delay triggers at this time + let aggregate_production_instant = Instant::now() + + duration_to_next_slot + .checked_sub(slot_duration / 3) + .unwrap_or_else(|| Duration::from_secs(0)); + + let slot_duties = self + .duties_service + .sync_duties + .get_duties_for_slot::(slot, &self.duties_service.spec) + .ok_or_else(|| format!("Error fetching duties for slot {}", slot))?; + + if slot_duties.duties.is_empty() { + debug!( + log, + "No local validators in current sync committee"; + "slot" => slot, + ); + return Ok(()); + } + + // Fetch block root for `SyncCommitteeContribution`. + let block_root = self + .beacon_nodes + .first_success(RequireSynced::Yes, |beacon_node| async move { + beacon_node.get_beacon_blocks_root(BlockId::Head).await + }) + .await + .map_err(|e| e.to_string())? + .ok_or_else(|| format!("No block root found for slot {}", slot))? + .data + .root; + + // Spawn one task to publish all of the sync committee signatures. + let validator_duties = slot_duties.duties; + self.inner.context.executor.spawn( + self.clone() + .publish_sync_committee_signatures(slot, block_root, validator_duties) + .map(|_| ()), + "sync_committee_signature_publish", + ); + + let aggregators = slot_duties.aggregators; + self.inner.context.executor.spawn( + self.clone() + .publish_sync_committee_aggregates( + slot, + block_root, + aggregators, + aggregate_production_instant, + ) + .map(|_| ()), + "sync_committee_aggregate_publish", + ); + + Ok(()) + } + + /// Publish sync committee signatures. + async fn publish_sync_committee_signatures( + self, + slot: Slot, + beacon_block_root: Hash256, + validator_duties: Vec, + ) -> Result<(), ()> { + let log = self.context.log().clone(); + + let committee_signatures = validator_duties + .iter() + .filter_map(|duty| { + self.validator_store + .produce_sync_committee_signature( + slot, + beacon_block_root, + duty.validator_index, + &duty.pubkey, + ) + .map_err(|e| { + crit!( + log, + "Failed to sign sync committee signature"; + "validator_index" => duty.validator_index, + "slot" => slot, + "error" => ?e, + ); + }) + .ok() + }) + .collect::>(); + + let signatures_slice = &committee_signatures; + + self.beacon_nodes + .first_success(RequireSynced::No, |beacon_node| async move { + beacon_node + .post_beacon_pool_sync_committee_signatures(signatures_slice) + .await + }) + .await + .map_err(|e| { + error!( + log, + "Unable to publish sync committee messages"; + "slot" => slot, + "error" => %e, + ); + })?; + + info!( + log, + "Successfully published sync committee messages"; + "count" => committee_signatures.len(), + "head_block" => ?beacon_block_root, + "slot" => slot, + ); + + Ok(()) + } + + async fn publish_sync_committee_aggregates( + self, + slot: Slot, + beacon_block_root: Hash256, + aggregators: HashMap>, + aggregate_instant: Instant, + ) { + for (subnet_id, subnet_aggregators) in aggregators { + let service = self.clone(); + self.inner.context.executor.spawn( + service + .publish_sync_committee_aggregate_for_subnet( + slot, + beacon_block_root, + subnet_id, + subnet_aggregators, + aggregate_instant, + ) + .map(|_| ()), + "sync_committee_aggregate_publish_subnet", + ); + } + } + + async fn publish_sync_committee_aggregate_for_subnet( + self, + slot: Slot, + beacon_block_root: Hash256, + subnet_id: SyncSubnetId, + subnet_aggregators: Vec<(u64, PublicKeyBytes, SyncSelectionProof)>, + aggregate_instant: Instant, + ) -> Result<(), ()> { + sleep_until(aggregate_instant).await; + + let log = self.context.log(); + + let contribution = self + .beacon_nodes + .first_success(RequireSynced::No, |beacon_node| async move { + let sync_contribution_data = SyncContributionData { + slot, + beacon_block_root, + subcommittee_index: subnet_id.into(), + }; + + beacon_node + .get_validator_sync_committee_contribution::(&sync_contribution_data) + .await + }) + .await + .map_err(|e| { + crit!( + log, + "Failed to produce sync contribution"; + "slot" => slot, + "beacon_block_root" => ?beacon_block_root, + "error" => %e, + ) + })? + .ok_or_else(|| { + crit!( + log, + "No aggregate contribution found"; + "slot" => slot, + "beacon_block_root" => ?beacon_block_root, + ); + })? + .data; + + // Make `SignedContributionAndProof`s + let signed_contributions = subnet_aggregators + .into_iter() + .filter_map(|(aggregator_index, aggregator_pk, selection_proof)| { + self.validator_store + .produce_signed_contribution_and_proof( + aggregator_index, + &aggregator_pk, + contribution.clone(), + selection_proof, + ) + .map_err(|e| { + crit!( + log, + "Unable to sign sync committee contribution"; + "slot" => slot, + "error" => ?e, + ); + }) + .ok() + }) + .collect::>(); + + // Publish to the beacon node. + let signed_contributions_slice = &signed_contributions; + self.beacon_nodes + .first_success(RequireSynced::No, |beacon_node| async move { + beacon_node + .post_validator_contribution_and_proofs(signed_contributions_slice) + .await + }) + .await + .map_err(|e| { + error!( + log, + "Unable to publish signed contributions and proofs"; + "slot" => slot, + "error" => %e, + ); + })?; + + info!( + log, + "Successfully published sync contributions"; + "subnet" => %subnet_id, + "beacon_block_root" => %beacon_block_root, + "num_signers" => contribution.aggregation_bits.num_set_bits(), + "slot" => slot, + ); + + Ok(()) + } + + fn spawn_subscription_tasks(&self) { + let service = self.clone(); + let log = self.context.log().clone(); + self.inner.context.executor.spawn( + async move { + service.publish_subscriptions().await.unwrap_or_else(|e| { + error!( + log, + "Error publishing subscriptions"; + "error" => ?e, + ) + }); + }, + "sync_committee_subscription_publish", + ); + } + + async fn publish_subscriptions(self) -> Result<(), String> { + let log = self.context.log().clone(); + let spec = &self.duties_service.spec; + let slot = self.slot_clock.now().ok_or("Failed to read slot clock")?; + + let mut duty_slots = vec![]; + let mut all_succeeded = true; + + // At the start of every epoch during the current period, re-post the subscriptions + // to the beacon node. This covers the case where the BN has forgotten the subscriptions + // due to a restart, or where the VC has switched to a fallback BN. + let current_period = sync_period_of_slot::(slot, spec)?; + + if !self.first_subscription_done.load(Ordering::Relaxed) + || slot.as_u64() % E::slots_per_epoch() == 0 + { + duty_slots.push((slot, current_period)); + } + + // Near the end of the current period, push subscriptions for the next period to the + // beacon node. We aggressively push every slot in the lead-up, as this is the main way + // that we want to ensure that the BN is subscribed (well in advance). + let lookahead_slot = slot + SUBSCRIPTION_LOOKAHEAD_EPOCHS * E::slots_per_epoch(); + + let lookahead_period = sync_period_of_slot::(lookahead_slot, spec)?; + + if lookahead_period > current_period { + duty_slots.push((lookahead_slot, lookahead_period)); + } + + if duty_slots.is_empty() { + return Ok(()); + } + + // Collect subscriptions. + let mut subscriptions = vec![]; + + for (duty_slot, sync_committee_period) in duty_slots { + debug!( + log, + "Fetching subscription duties"; + "duty_slot" => duty_slot, + "current_slot" => slot, + ); + match self + .duties_service + .sync_duties + .get_duties_for_slot::(duty_slot, spec) + { + Some(duties) => subscriptions.extend(subscriptions_from_sync_duties( + duties.duties, + sync_committee_period, + spec, + )), + None => { + warn!( + log, + "Missing duties for subscription"; + "slot" => duty_slot, + ); + all_succeeded = false; + } + } + } + + // Post subscriptions to BN. + debug!( + log, + "Posting sync subscriptions to BN"; + "count" => subscriptions.len(), + ); + let subscriptions_slice = &subscriptions; + + for subscription in subscriptions_slice { + debug!( + log, + "Subscription"; + "validator_index" => subscription.validator_index, + "validator_sync_committee_indices" => ?subscription.sync_committee_indices, + "until_epoch" => subscription.until_epoch, + ); + } + + if let Err(e) = self + .beacon_nodes + .first_success(RequireSynced::No, |beacon_node| async move { + beacon_node + .post_validator_sync_committee_subscriptions(subscriptions_slice) + .await + }) + .await + { + error!( + log, + "Unable to post sync committee subscriptions"; + "slot" => slot, + "error" => %e, + ); + all_succeeded = false; + } + + // Disable first-subscription latch once all duties have succeeded once. + if all_succeeded { + self.first_subscription_done.store(true, Ordering::Relaxed); + } + + Ok(()) + } +} + +fn sync_period_of_slot(slot: Slot, spec: &ChainSpec) -> Result { + slot.epoch(E::slots_per_epoch()) + .sync_committee_period(spec) + .map_err(|e| format!("Error computing sync period: {:?}", e)) +} + +fn subscriptions_from_sync_duties( + duties: Vec, + sync_committee_period: u64, + spec: &ChainSpec, +) -> impl Iterator { + let until_epoch = spec.epochs_per_sync_committee_period * (sync_committee_period + 1); + duties + .into_iter() + .map(move |duty| SyncCommitteeSubscription { + validator_index: duty.validator_index, + sync_committee_indices: duty.validator_sync_committee_indices, + until_epoch, + }) +} diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 54cef8b6783..7100ee35232 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -1,5 +1,5 @@ use crate::{ - doppelganger_service::DoppelgangerService, fork_service::ForkService, http_metrics::metrics, + doppelganger_service::DoppelgangerService, http_metrics::metrics, initialized_validators::InitializedValidators, }; use account_utils::{validator_definitions::ValidatorDefinition, ZeroizeString}; @@ -8,12 +8,15 @@ use slashing_protection::{NotSafe, Safe, SlashingDatabase}; use slog::{crit, error, info, warn, Logger}; use slot_clock::SlotClock; use std::iter::FromIterator; +use std::marker::PhantomData; use std::path::Path; use std::sync::Arc; use types::{ attestation::Error as AttestationError, graffiti::GraffitiString, Attestation, BeaconBlock, ChainSpec, Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, - SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedRoot, Slot, + SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, + SignedContributionAndProof, SignedRoot, Slot, SyncCommitteeContribution, SyncCommitteeMessage, + SyncSelectionProof, SyncSubnetId, }; use validator_dir::ValidatorDir; @@ -69,8 +72,8 @@ pub struct ValidatorStore { spec: Arc, log: Logger, doppelganger_service: Option>, - fork_service: ForkService, slot_clock: T, + _phantom: PhantomData, } impl ValidatorStore { @@ -79,8 +82,8 @@ impl ValidatorStore { slashing_protection: SlashingDatabase, genesis_validators_root: Hash256, spec: ChainSpec, - fork_service: ForkService, doppelganger_service: Option>, + slot_clock: T, log: Logger, ) -> Self { Self { @@ -89,10 +92,10 @@ impl ValidatorStore { slashing_protection_last_prune: Arc::new(Mutex::new(Epoch::new(0))), genesis_validators_root, spec: Arc::new(spec), - log: log.clone(), + log, doppelganger_service, - slot_clock: fork_service.slot_clock(), - fork_service, + slot_clock, + _phantom: PhantomData, } } @@ -253,8 +256,8 @@ impl ValidatorStore { self.validators.read().num_enabled() } - fn fork(&self) -> Fork { - self.fork_service.fork() + fn fork(&self, epoch: Epoch) -> Fork { + self.spec.fork_at_epoch(epoch) } /// Runs `func`, providing it access to the `Keypair` corresponding to `validator_pubkey`. @@ -301,7 +304,7 @@ impl ValidatorStore { let domain = self.spec.get_domain( epoch, Domain::Randao, - &self.fork(), + &self.fork(epoch), self.genesis_validators_root, ); let message = epoch.signing_root(domain); @@ -334,7 +337,7 @@ impl ValidatorStore { } // Check for slashing conditions. - let fork = self.fork(); + let fork = self.fork(block.epoch()); let domain = self.spec.get_domain( block.epoch(), Domain::BeaconProposer, @@ -403,7 +406,7 @@ impl ValidatorStore { } // Checking for slashing conditions. - let fork = self.fork(); + let fork = self.fork(attestation.data.target.epoch); let domain = self.spec.get_domain( attestation.data.target.epoch, @@ -486,8 +489,7 @@ impl ValidatorStore { aggregate: Attestation, selection_proof: SelectionProof, ) -> Result, Error> { - // Take the fork early to avoid lock interleaving. - let fork = self.fork(); + let fork = self.fork(aggregate.data.target.epoch); let proof = self.with_validator_keypair(validator_pubkey, move |keypair| { SignedAggregateAndProof::from_aggregate( @@ -513,9 +515,6 @@ impl ValidatorStore { validator_pubkey: PublicKeyBytes, slot: Slot, ) -> Result { - // Take the fork early to avoid lock interleaving. - let fork = self.fork(); - // Bypass the `with_validator_keypair` function. // // This is because we don't care about doppelganger protection when it comes to selection @@ -531,7 +530,7 @@ impl ValidatorStore { let proof = SelectionProof::new::( slot, &keypair.sk, - &fork, + &self.fork(slot.epoch(E::slots_per_epoch())), self.genesis_validators_root, &self.spec, ); @@ -541,6 +540,93 @@ impl ValidatorStore { Ok(proof) } + /// Produce a `SyncSelectionProof` for `slot` signed by the secret key of `validator_pubkey`. + pub fn produce_sync_selection_proof( + &self, + validator_pubkey: &PublicKeyBytes, + slot: Slot, + subnet_id: SyncSubnetId, + ) -> Result { + // Bypass `with_validator_keypair`: sync committee messages are not slashable. + let validators = self.validators.read(); + let voting_keypair = validators + .voting_keypair(validator_pubkey) + .ok_or(Error::UnknownPubkey(*validator_pubkey))?; + + metrics::inc_counter_vec( + &metrics::SIGNED_SYNC_SELECTION_PROOFS_TOTAL, + &[metrics::SUCCESS], + ); + + Ok(SyncSelectionProof::new::( + slot, + subnet_id.into(), + &voting_keypair.sk, + &self.fork(slot.epoch(E::slots_per_epoch())), + self.genesis_validators_root, + &self.spec, + )) + } + + pub fn produce_sync_committee_signature( + &self, + slot: Slot, + beacon_block_root: Hash256, + validator_index: u64, + validator_pubkey: &PublicKeyBytes, + ) -> Result { + // Bypass `with_validator_keypair`: sync committee messages are not slashable. + let validators = self.validators.read(); + let voting_keypair = validators + .voting_keypair(validator_pubkey) + .ok_or(Error::UnknownPubkey(*validator_pubkey))?; + + metrics::inc_counter_vec( + &metrics::SIGNED_SYNC_COMMITTEE_MESSAGES_TOTAL, + &[metrics::SUCCESS], + ); + + Ok(SyncCommitteeMessage::new::( + slot, + beacon_block_root, + validator_index, + &voting_keypair.sk, + &self.fork(slot.epoch(E::slots_per_epoch())), + self.genesis_validators_root, + &self.spec, + )) + } + + pub fn produce_signed_contribution_and_proof( + &self, + aggregator_index: u64, + aggregator_pubkey: &PublicKeyBytes, + contribution: SyncCommitteeContribution, + selection_proof: SyncSelectionProof, + ) -> Result, Error> { + // Bypass `with_validator_keypair`: sync committee messages are not slashable. + let validators = self.validators.read(); + let voting_keypair = validators + .voting_keypair(aggregator_pubkey) + .ok_or(Error::UnknownPubkey(*aggregator_pubkey))?; + let fork = self.fork(contribution.slot.epoch(E::slots_per_epoch())); + + metrics::inc_counter_vec( + &metrics::SIGNED_SYNC_COMMITTEE_CONTRIBUTIONS_TOTAL, + &[metrics::SUCCESS], + ); + + Ok(SignedContributionAndProof::from_aggregate( + aggregator_index, + contribution, + Some(selection_proof), + &voting_keypair.sk, + &fork, + self.genesis_validators_root, + &self.spec, + )) + } + /// Prune the slashing protection database so that it remains performant. /// /// This function will only do actual pruning periodically, so it should usually be