diff --git a/Cargo.lock b/Cargo.lock index 30b0cfbfa58..90eacceacba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -226,12 +226,6 @@ dependencies = [ "syn", ] -[[package]] -name = "assert_matches" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5" - [[package]] name = "async-tls" version = "0.8.0" @@ -348,7 +342,6 @@ dependencies = [ "rand 0.7.3", "rand_core 0.5.1", "rayon", - "regex", "safe_arith", "serde", "serde_derive", @@ -517,7 +510,7 @@ dependencies = [ "rand 0.7.3", "serde", "serde_derive", - "serde_hex", + "serde_utils", "tree_hash", "zeroize", ] @@ -570,6 +563,16 @@ dependencies = [ "serde", ] +[[package]] +name = "buf_redux" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" +dependencies = [ + "memchr", + "safemem", +] + [[package]] name = "bumpalo" version = "3.4.0" @@ -762,13 +765,14 @@ dependencies = [ "eth2_ssz", "futures 0.3.5", "genesis", + "http_api", + "http_metrics", "lazy_static", "lighthouse_metrics", "network", "parking_lot 0.11.0", "prometheus", "reqwest", - "rest_api", "serde", "serde_derive", "serde_yaml", @@ -1448,6 +1452,21 @@ dependencies = [ "web3", ] +[[package]] +name = "eth2" +version = "0.1.0" +dependencies = [ + "eth2_libp2p", + "hex 0.4.2", + "procinfo", + "proto_array", + "psutil", + "reqwest", + "serde", + "serde_json", + "types", +] + [[package]] name = "eth2_config" version = "0.2.0" @@ -1587,7 +1606,7 @@ dependencies = [ "eth2_ssz", "serde", "serde_derive", - "serde_hex", + "serde_utils", "tree_hash", "tree_hash_derive", "typenum", @@ -2135,6 +2154,31 @@ dependencies = [ "tokio 0.2.22", ] +[[package]] +name = "headers" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed18eb2459bf1a09ad2d6b1547840c3e5e62882fa09b9a6a20b1de8e3228848f" +dependencies = [ + "base64 0.12.3", + "bitflags 1.2.1", + "bytes 0.5.6", + "headers-core", + "http 0.2.1", + "mime 0.3.16", + "sha-1 0.8.2", + "time 0.1.44", +] + +[[package]] +name = "headers-core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +dependencies = [ + "http 0.2.1", +] + [[package]] name = "heck" version = "0.3.1" @@ -2256,6 +2300,58 @@ dependencies = [ "http 0.2.1", ] +[[package]] +name = "http_api" +version = "0.1.0" +dependencies = [ + "beacon_chain", + "discv5", + "environment", + "eth1", + "eth2", + "eth2_libp2p", + "fork_choice", + "hex 0.4.2", + "lazy_static", + "lighthouse_metrics", + "lighthouse_version", + "network", + "parking_lot 0.11.0", + "serde", + "slog", + "slot_clock", + "state_processing", + "store", + "tokio 0.2.22", + "tree_hash", + "types", + "warp", + "warp_utils", +] + +[[package]] +name = "http_metrics" +version = "0.1.0" +dependencies = [ + "beacon_chain", + "environment", + "eth2", + "eth2_libp2p", + "lazy_static", + "lighthouse_metrics", + "lighthouse_version", + "prometheus", + "reqwest", + "serde", + "slog", + "slot_clock", + "store", + "tokio 0.2.22", + "types", + "warp", + "warp_utils", +] + [[package]] name = "httparse" version = "1.3.4" @@ -2435,6 +2531,15 @@ dependencies = [ "hashbrown 0.9.0", ] +[[package]] +name = "input_buffer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19a8a95243d5a0398cae618ec29477c6e3cb631152be5c19481f80bc71559754" +dependencies = [ + "bytes 0.5.6", +] + [[package]] name = "instant" version = "0.1.7" @@ -3244,6 +3349,24 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" +[[package]] +name = "multipart" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8209c33c951f07387a8497841122fc6f712165e3f9bda3e6be4645b58188f676" +dependencies = [ + "buf_redux", + "httparse", + "log 0.4.11", + "mime 0.3.16", + "mime_guess", + "quick-error", + "rand 0.6.5", + "safemem", + "tempfile", + "twoway", +] + [[package]] name = "multistream-select" version = "0.8.2" @@ -3324,7 +3447,6 @@ dependencies = [ "num_cpus", "parking_lot 0.11.0", "rand 0.7.3", - "rest_types", "rlp", "slog", "sloggers", @@ -3357,10 +3479,10 @@ version = "0.2.0" dependencies = [ "beacon_node", "environment", + "eth2", "eth2_config", "futures 0.3.5", "genesis", - "remote_beacon_node", "reqwest", "serde", "tempdir", @@ -4039,6 +4161,25 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "rand" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" +dependencies = [ + "autocfg 0.1.7", + "libc", + "rand_chacha 0.1.1", + "rand_core 0.4.2", + "rand_hc 0.1.0", + "rand_isaac", + "rand_jitter", + "rand_os", + "rand_pcg", + "rand_xorshift 0.1.1", + "winapi 0.3.9", +] + [[package]] name = "rand" version = "0.7.3" @@ -4047,9 +4188,19 @@ checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ "getrandom", "libc", - "rand_chacha", + "rand_chacha 0.2.2", "rand_core 0.5.1", - "rand_hc", + "rand_hc 0.2.0", +] + +[[package]] +name = "rand_chacha" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" +dependencies = [ + "autocfg 0.1.7", + "rand_core 0.3.1", ] [[package]] @@ -4086,6 +4237,15 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rand_hc" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "rand_hc" version = "0.2.0" @@ -4095,6 +4255,59 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_isaac" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "rand_jitter" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" +dependencies = [ + "libc", + "rand_core 0.4.2", + "winapi 0.3.9", +] + +[[package]] +name = "rand_os" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" +dependencies = [ + "cloudabi 0.0.3", + "fuchsia-cprng", + "libc", + "rand_core 0.4.2", + "rdrand", + "winapi 0.3.9", +] + +[[package]] +name = "rand_pcg" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" +dependencies = [ + "autocfg 0.1.7", + "rand_core 0.4.2", +] + +[[package]] +name = "rand_xorshift" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "rand_xorshift" version = "0.2.0" @@ -4182,24 +4395,6 @@ version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" -[[package]] -name = "remote_beacon_node" -version = "0.2.0" -dependencies = [ - "eth2_config", - "eth2_ssz", - "futures 0.3.5", - "hex 0.4.2", - "operation_pool", - "proto_array", - "reqwest", - "rest_types", - "serde", - "serde_json", - "types", - "url 2.1.1", -] - [[package]] name = "remove_dir_all" version = "0.5.3" @@ -4245,73 +4440,6 @@ dependencies = [ "winreg", ] -[[package]] -name = "rest_api" -version = "0.2.0" -dependencies = [ - "assert_matches", - "beacon_chain", - "bls", - "bus", - "environment", - "eth2_config", - "eth2_libp2p", - "eth2_ssz", - "eth2_ssz_derive", - "futures 0.3.5", - "hex 0.4.2", - "http 0.2.1", - "hyper 0.13.8", - "itertools 0.9.0", - "lazy_static", - "lighthouse_metrics", - "lighthouse_version", - "network", - "node_test_rig", - "operation_pool", - "parking_lot 0.11.0", - "remote_beacon_node", - "rest_types", - "serde", - "serde_json", - "serde_yaml", - "slog", - "slog-async", - "slog-term", - "slot_clock", - "state_processing", - "store", - "tokio 0.2.22", - "tree_hash", - "types", - "uhttp_sse", - "url 2.1.1", -] - -[[package]] -name = "rest_types" -version = "0.2.0" -dependencies = [ - "beacon_chain", - "bls", - "environment", - "eth2_hashing", - "eth2_ssz", - "eth2_ssz_derive", - "hyper 0.13.8", - "procinfo", - "psutil", - "rayon", - "serde", - "serde_json", - "serde_yaml", - "state_processing", - "store", - "tokio 0.2.22", - "tree_hash", - "types", -] - [[package]] name = "ring" version = "0.16.12" @@ -4600,14 +4728,6 @@ dependencies = [ "syn", ] -[[package]] -name = "serde_hex" -version = "0.2.0" -dependencies = [ - "hex 0.4.2", - "serde", -] - [[package]] name = "serde_json" version = "1.0.57" @@ -4646,6 +4766,7 @@ dependencies = [ name = "serde_utils" version = "0.1.0" dependencies = [ + "hex 0.4.2", "serde", "serde_derive", "serde_json", @@ -5674,6 +5795,19 @@ dependencies = [ "tokio 0.2.22", ] +[[package]] +name = "tokio-tungstenite" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d9e878ad426ca286e4dcae09cbd4e1973a7f8987d97570e2469703dd7f5720c" +dependencies = [ + "futures-util", + "log 0.4.11", + "pin-project", + "tokio 0.2.22", + "tungstenite", +] + [[package]] name = "tokio-udp" version = "0.1.6" @@ -5774,6 +5908,16 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "tracing-futures" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" +dependencies = [ + "pin-project", + "tracing", +] + [[package]] name = "trackable" version = "1.0.0" @@ -5827,6 +5971,34 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +[[package]] +name = "tungstenite" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0308d80d86700c5878b9ef6321f020f29b1bb9d5ff3cab25e75e23f3a492a23" +dependencies = [ + "base64 0.12.3", + "byteorder", + "bytes 0.5.6", + "http 0.2.1", + "httparse", + "input_buffer", + "log 0.4.11", + "rand 0.7.3", + "sha-1 0.9.1", + "url 2.1.1", + "utf-8", +] + +[[package]] +name = "twoway" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" +dependencies = [ + "memchr", +] + [[package]] name = "typeable" version = "0.1.2" @@ -5862,13 +6034,15 @@ dependencies = [ "log 0.4.11", "merkle_proof", "rand 0.7.3", - "rand_xorshift", + "rand_xorshift 0.2.0", "rayon", + "regex", "rusqlite", "safe_arith", "serde", "serde_derive", "serde_json", + "serde_utils", "serde_yaml", "slog", "swap_or_not_shuffle", @@ -5878,12 +6052,6 @@ dependencies = [ "tree_hash_derive", ] -[[package]] -name = "uhttp_sse" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6ff93345ba2206230b1bb1aa3ece1a63dd9443b7531024575d16a0680a59444" - [[package]] name = "uint" version = "0.8.5" @@ -6024,6 +6192,18 @@ dependencies = [ "percent-encoding 2.1.0", ] +[[package]] +name = "urlencoding" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9232eb53352b4442e40d7900465dfc534e8cb2dc8f18656fcb2ac16112b5593" + +[[package]] +name = "utf-8" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05e42f7c18b8f902290b009cde6d651262f956c98bc51bca4cd1d511c9cd85c7" + [[package]] name = "uuid" version = "0.8.1" @@ -6045,6 +6225,7 @@ dependencies = [ "deposit_contract", "dirs", "environment", + "eth2", "eth2_config", "eth2_interop_keypairs", "eth2_keystore", @@ -6057,8 +6238,6 @@ dependencies = [ "logging", "parking_lot 0.11.0", "rayon", - "remote_beacon_node", - "rest_types", "serde", "serde_derive", "serde_json", @@ -6153,6 +6332,45 @@ dependencies = [ "try-lock", ] +[[package]] +name = "warp" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f41be6df54c97904af01aa23e613d4521eed7ab23537cede692d4058f6449407" +dependencies = [ + "bytes 0.5.6", + "futures 0.3.5", + "headers", + "http 0.2.1", + "hyper 0.13.8", + "log 0.4.11", + "mime 0.3.16", + "mime_guess", + "multipart", + "pin-project", + "scoped-tls 1.0.0", + "serde", + "serde_json", + "serde_urlencoded", + "tokio 0.2.22", + "tokio-tungstenite", + "tower-service", + "tracing", + "tracing-futures", + "urlencoding", +] + +[[package]] +name = "warp_utils" +version = "0.1.0" +dependencies = [ + "beacon_chain", + "eth2", + "state_processing", + "types", + "warp", +] + [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" diff --git a/Cargo.toml b/Cargo.toml index 92fb5bccf39..6ad7c74664a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,8 +7,9 @@ members = [ "beacon_node/client", "beacon_node/eth1", "beacon_node/eth2_libp2p", + "beacon_node/http_api", + "beacon_node/http_metrics", "beacon_node/network", - "beacon_node/rest_api", "beacon_node/store", "beacon_node/timer", "beacon_node/websocket_server", @@ -20,6 +21,7 @@ members = [ "common/compare_fields", "common/compare_fields_derive", "common/deposit_contract", + "common/eth2", "common/eth2_config", "common/eth2_interop_keypairs", "common/eth2_testnet_config", @@ -29,10 +31,9 @@ members = [ "common/lighthouse_version", "common/logging", "common/lru_cache", - "common/remote_beacon_node", - "common/rest_types", "common/slot_clock", "common/test_random_derive", + "common/warp_utils", "common/validator_dir", "consensus/cached_tree_hash", @@ -43,7 +44,6 @@ members = [ "consensus/ssz", "consensus/ssz_derive", "consensus/ssz_types", - "consensus/serde_hex", "consensus/serde_utils", "consensus/state_processing", "consensus/swap_or_not_shuffle", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 05ae819c49c..04e22f4268f 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -58,4 +58,3 @@ environment = { path = "../../lighthouse/environment" } bus = "2.2.3" derivative = "2.1.1" itertools = "0.9.0" -regex = "1.3.9" diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index b81daa4bdb8..e019c92e841 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -28,8 +28,7 @@ use crate::{ beacon_chain::{ - ATTESTATION_CACHE_LOCK_TIMEOUT, HEAD_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, - VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, + HEAD_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, }, metrics, observed_attestations::ObserveOutcome, @@ -37,12 +36,10 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, }; use bls::verify_signature_sets; -use slog::debug; use slot_clock::SlotClock; use state_processing::{ common::get_indexed_attestation, per_block_processing::errors::AttestationValidationError, - per_slot_processing, signature_sets::{ indexed_attestation_signature_set_from_pubkeys, signed_aggregate_selection_proof_signature_set, signed_aggregate_signature_set, @@ -52,7 +49,7 @@ use std::borrow::Cow; use tree_hash::TreeHash; use types::{ Attestation, BeaconCommittee, CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation, - RelativeEpoch, SelectionProof, SignedAggregateAndProof, Slot, SubnetId, + SelectionProof, SignedAggregateAndProof, Slot, SubnetId, }; /// Returned when an attestation was not successfully verified. It might not have been verified for @@ -251,6 +248,7 @@ pub struct VerifiedAggregatedAttestation { pub struct VerifiedUnaggregatedAttestation { attestation: Attestation, indexed_attestation: IndexedAttestation, + subnet_id: SubnetId, } /// Custom `Clone` implementation is to avoid the restrictive trait bounds applied by the usual derive @@ -260,6 +258,7 @@ impl Clone for VerifiedUnaggregatedAttestation { Self { attestation: self.attestation.clone(), indexed_attestation: self.indexed_attestation.clone(), + subnet_id: self.subnet_id, } } } @@ -412,6 +411,11 @@ impl VerifiedAggregatedAttestation { pub fn attestation(&self) -> &Attestation { &self.signed_aggregate.message.aggregate } + + /// Returns the underlying `signed_aggregate`. + pub fn aggregate(&self) -> &SignedAggregateAndProof { + &self.signed_aggregate + } } impl VerifiedUnaggregatedAttestation { @@ -422,7 +426,7 @@ impl VerifiedUnaggregatedAttestation { /// verify that it was received on the correct subnet. pub fn verify( attestation: Attestation, - subnet_id: SubnetId, + subnet_id: Option, chain: &BeaconChain, ) -> Result { // Ensure attestation is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (within a @@ -454,13 +458,15 @@ impl VerifiedUnaggregatedAttestation { ) .map_err(BeaconChainError::from)?; - // Ensure the attestation is from the correct subnet. - if subnet_id != expected_subnet_id { - return Err(Error::InvalidSubnetId { - received: subnet_id, - expected: expected_subnet_id, - }); - } + // If a subnet was specified, ensure that subnet is correct. + if let Some(subnet_id) = subnet_id { + if subnet_id != expected_subnet_id { + return Err(Error::InvalidSubnetId { + received: subnet_id, + expected: expected_subnet_id, + }); + } + }; let validator_index = *indexed_attestation .attesting_indices @@ -505,6 +511,7 @@ impl VerifiedUnaggregatedAttestation { Ok(Self { attestation, indexed_attestation, + subnet_id: expected_subnet_id, }) } @@ -513,6 +520,11 @@ impl VerifiedUnaggregatedAttestation { chain.add_to_naive_aggregation_pool(self) } + /// Returns the correct subnet for the attestation. + pub fn subnet_id(&self) -> SubnetId { + self.subnet_id + } + /// Returns the wrapped `attestation`. pub fn attestation(&self) -> &Attestation { &self.attestation @@ -527,7 +539,8 @@ impl VerifiedUnaggregatedAttestation { } } -/// Returns `Ok(())` if the `attestation.data.beacon_block_root` is known to this chain. +/// Returns `Ok(shuffling_id)` if the `attestation.data.beacon_block_root` is known to this chain. +/// You can use this `shuffling_id` to read from the shuffling cache. /// /// The block root may not be known for two reasons: /// @@ -556,6 +569,7 @@ fn verify_head_block_is_known( }); } } + Ok(()) } else { Err(Error::UnknownHeadBlock { @@ -711,7 +725,7 @@ type CommitteesPerSlot = u64; /// Returns the `indexed_attestation` and committee count per slot for the `attestation` using the /// public keys cached in the `chain`. -pub fn obtain_indexed_attestation_and_committees_per_slot( +fn obtain_indexed_attestation_and_committees_per_slot( chain: &BeaconChain, attestation: &Attestation, ) -> Result<(IndexedAttestation, CommitteesPerSlot), Error> { @@ -731,7 +745,7 @@ pub fn obtain_indexed_attestation_and_committees_per_slot( /// /// If the committee for `attestation` isn't found in the `shuffling_cache`, we will read a state /// from disk and then update the `shuffling_cache`. -pub fn map_attestation_committee<'a, T, F, R>( +fn map_attestation_committee<'a, T, F, R>( chain: &'a BeaconChain, attestation: &Attestation, map_fn: F, @@ -750,104 +764,23 @@ where // processing an attestation that does not include our latest finalized block in its chain. // // We do not delay consideration for later, we simply drop the attestation. - let target_block = chain - .fork_choice - .read() - .get_block(&target.root) - .ok_or_else(|| Error::UnknownTargetRoot(target.root))?; - - // Obtain the shuffling cache, timing how long we wait. - let cache_wait_timer = - metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES); - - let mut shuffling_cache = chain - .shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| BeaconChainError::AttestationCacheLockTimeout)?; - - metrics::stop_timer(cache_wait_timer); - - if let Some(committee_cache) = shuffling_cache.get(attestation_epoch, target.root) { - let committees_per_slot = committee_cache.committees_per_slot(); - committee_cache - .get_beacon_committee(attestation.data.slot, attestation.data.index) - .map(|committee| map_fn((committee, committees_per_slot))) - .unwrap_or_else(|| { - Err(Error::NoCommitteeForSlotAndIndex { - slot: attestation.data.slot, - index: attestation.data.index, - }) - }) - } else { - // Drop the shuffling cache to avoid holding the lock for any longer than - // required. - drop(shuffling_cache); - - debug!( - chain.log, - "Attestation processing cache miss"; - "attn_epoch" => attestation_epoch.as_u64(), - "target_block_epoch" => target_block.slot.epoch(T::EthSpec::slots_per_epoch()).as_u64(), - ); - - let state_read_timer = - metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES); - - let mut state = chain - .store - .get_inconsistent_state_for_attestation_verification_only( - &target_block.state_root, - Some(target_block.slot), - ) - .map_err(BeaconChainError::from)? - .ok_or_else(|| BeaconChainError::MissingBeaconState(target_block.state_root))?; - - metrics::stop_timer(state_read_timer); - let state_skip_timer = - metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_SKIP_TIMES); - - while state.current_epoch() + 1 < attestation_epoch { - // Here we tell `per_slot_processing` to skip hashing the state and just - // use the zero hash instead. - // - // The state roots are not useful for the shuffling, so there's no need to - // compute them. - per_slot_processing(&mut state, Some(Hash256::zero()), &chain.spec) - .map_err(BeaconChainError::from)?; - } - - metrics::stop_timer(state_skip_timer); - let committee_building_timer = - metrics::start_timer(&metrics::ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES); - - let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), attestation_epoch) - .map_err(BeaconChainError::IncorrectStateForAttestation)?; - - state - .build_committee_cache(relative_epoch, &chain.spec) - .map_err(BeaconChainError::from)?; - - let committee_cache = state - .committee_cache(relative_epoch) - .map_err(BeaconChainError::from)?; - - chain - .shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| BeaconChainError::AttestationCacheLockTimeout)? - .insert(attestation_epoch, target.root, committee_cache); - - metrics::stop_timer(committee_building_timer); - - let committees_per_slot = committee_cache.committees_per_slot(); - committee_cache - .get_beacon_committee(attestation.data.slot, attestation.data.index) - .map(|committee| map_fn((committee, committees_per_slot))) - .unwrap_or_else(|| { - Err(Error::NoCommitteeForSlotAndIndex { - slot: attestation.data.slot, - index: attestation.data.index, - }) - }) + if !chain.fork_choice.read().contains_block(&target.root) { + return Err(Error::UnknownTargetRoot(target.root)); } + + chain + .with_committee_cache(target.root, attestation_epoch, |committee_cache| { + let committees_per_slot = committee_cache.committees_per_slot(); + + Ok(committee_cache + .get_beacon_committee(attestation.data.slot, attestation.data.index) + .map(|committee| map_fn((committee, committees_per_slot))) + .unwrap_or_else(|| { + Err(Error::NoCommitteeForSlotAndIndex { + slot: attestation.data.slot, + index: attestation.data.index, + }) + })) + }) + .map_err(BeaconChainError::from)? } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 1caaec5fea4..952cd5402dc 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -21,7 +21,7 @@ use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::persisted_fork_choice::PersistedForkChoice; -use crate::shuffling_cache::ShufflingCache; +use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::snapshot_cache::SnapshotCache; use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_pubkey_cache::ValidatorPubkeyCache; @@ -31,7 +31,6 @@ use fork_choice::ForkChoice; use itertools::process_results; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; -use regex::bytes::Regex; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use state_processing::{ @@ -201,6 +200,8 @@ pub struct BeaconChain { pub(crate) canonical_head: TimeoutRwLock>, /// The root of the genesis block. pub genesis_block_root: Hash256, + /// The root of the genesis state. + pub genesis_state_root: Hash256, /// The root of the list of genesis validators, used during syncing. pub genesis_validators_root: Hash256, @@ -459,6 +460,30 @@ impl BeaconChain { } } + /// Returns the block at the given slot, if any. Only returns blocks in the canonical chain. + /// + /// ## Errors + /// + /// May return a database error. + pub fn state_root_at_slot(&self, slot: Slot) -> Result, Error> { + process_results(self.rev_iter_state_roots()?, |mut iter| { + iter.find(|(_, this_slot)| *this_slot == slot) + .map(|(root, _)| root) + }) + } + + /// Returns the block root at the given slot, if any. Only returns roots in the canonical chain. + /// + /// ## Errors + /// + /// May return a database error. + pub fn block_root_at_slot(&self, slot: Slot) -> Result, Error> { + process_results(self.rev_iter_block_roots()?, |mut iter| { + iter.find(|(_, this_slot)| *this_slot == slot) + .map(|(root, _)| root) + }) + } + /// Returns the block at the given root, if any. /// /// ## Errors @@ -506,6 +531,30 @@ impl BeaconChain { f(&head_lock) } + /// Returns the beacon block root at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_block_root(&self) -> Result { + self.with_head(|s| Ok(s.beacon_block_root)) + } + + /// Returns the beacon block at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_block(&self) -> Result, Error> { + self.with_head(|s| Ok(s.beacon_block.clone())) + } + + /// Returns the beacon state at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_state(&self) -> Result, Error> { + self.with_head(|s| { + Ok(s.beacon_state + .clone_with(CloneConfig::committee_caches_only())) + }) + } + /// Returns info representing the head block and state. /// /// A summarized version of `Self::head` that involves less cloning. @@ -719,46 +768,20 @@ impl BeaconChain { .map_err(Into::into) } - /// Returns the attestation slot and committee index for a given validator index. + /// Returns the attestation duties for a given validator index. /// /// Information is read from the current state, so only information from the present and prior /// epoch is available. - pub fn validator_attestation_slot_and_index( + pub fn validator_attestation_duty( &self, validator_index: usize, epoch: Epoch, - ) -> Result, Error> { - let as_epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); - let head_state = &self.head()?.beacon_state; - - let mut state = if epoch == as_epoch(head_state.slot) { - self.head()?.beacon_state - } else { - // The block proposer shuffling is not affected by the state roots, so we don't need to - // calculate them. - self.state_at_slot( - epoch.start_slot(T::EthSpec::slots_per_epoch()), - StateSkipConfig::WithoutStateRoots, - )? - }; - - state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; - - if as_epoch(state.slot) != epoch { - return Err(Error::InvariantViolated(format!( - "Epochs in consistent in attestation duties lookup: state: {}, requested: {}", - as_epoch(state.slot), - epoch - ))); - } + ) -> Result, Error> { + let head_block_root = self.head_beacon_block_root()?; - if let Some(attestation_duty) = - state.get_attestation_duties(validator_index, RelativeEpoch::Current)? - { - Ok(Some((attestation_duty.slot, attestation_duty.index))) - } else { - Ok(None) - } + self.with_committee_cache(head_block_root, epoch, |committee_cache| { + Ok(committee_cache.get_attestation_duties(validator_index)) + }) } /// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`. @@ -774,6 +797,20 @@ impl BeaconChain { .map_err(Into::into) } + /// Returns an aggregated `Attestation`, if any, that has a matching + /// `attestation.data.tree_hash_root()`. + /// + /// The attestation will be obtained from `self.naive_aggregation_pool`. + pub fn get_aggregated_attestation_by_slot_and_root( + &self, + slot: Slot, + attestation_data_root: &Hash256, + ) -> Option> { + self.naive_aggregation_pool + .read() + .get_by_slot_and_root(slot, attestation_data_root) + } + /// Produce an unaggregated `Attestation` that is valid for the given `slot` and `index`. /// /// The produced `Attestation` will not be valid until it has been signed by exactly one @@ -898,7 +935,7 @@ impl BeaconChain { pub fn verify_unaggregated_attestation_for_gossip( &self, attestation: Attestation, - subnet_id: SubnetId, + subnet_id: Option, ) -> Result, AttestationError> { metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_REQUESTS); let _timer = @@ -1320,11 +1357,7 @@ impl BeaconChain { block: SignedBeaconBlock, ) -> Result, BlockError> { let slot = block.message.slot; - #[allow(clippy::invalid_regex)] - let re = Regex::new("\\p{C}").expect("regex is valid"); - let graffiti_string = - String::from_utf8_lossy(&re.replace_all(&block.message.body.graffiti[..], &b""[..])) - .to_string(); + let graffiti_string = block.message.body.graffiti.as_utf8_lossy(); match GossipVerifiedBlock::new(block, self) { Ok(verified) => { @@ -1449,8 +1482,7 @@ impl BeaconChain { ) -> Result> { let signed_block = fully_verified_block.block; let block_root = fully_verified_block.block_root; - let state = fully_verified_block.state; - let parent_block = fully_verified_block.parent_block; + let mut state = fully_verified_block.state; let current_slot = self.slot()?; let mut ops = fully_verified_block.intermediate_states; @@ -1482,29 +1514,25 @@ impl BeaconChain { .ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)? .import_new_pubkeys(&state)?; - // If the imported block is in the previous or current epochs (according to the - // wall-clock), check to see if this is the first block of the epoch. If so, add the - // committee to the shuffling cache. - if state.current_epoch() + 1 >= self.epoch()? - && parent_block.slot().epoch(T::EthSpec::slots_per_epoch()) != state.current_epoch() - { - let mut shuffling_cache = self - .shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| Error::AttestationCacheLockTimeout)?; - - let committee_cache = state.committee_cache(RelativeEpoch::Current)?; + // For the current and next epoch of this state, ensure we have the shuffling from this + // block in our cache. + for relative_epoch in &[RelativeEpoch::Current, RelativeEpoch::Next] { + let shuffling_id = ShufflingId::new(block_root, &state, *relative_epoch)?; - let epoch_start_slot = state - .current_epoch() - .start_slot(T::EthSpec::slots_per_epoch()); - let target_root = if state.slot == epoch_start_slot { - block_root - } else { - *state.get_block_root(epoch_start_slot)? - }; - - shuffling_cache.insert(state.current_epoch(), target_root, committee_cache); + let shuffling_is_cached = self + .shuffling_cache + .try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or_else(|| Error::AttestationCacheLockTimeout)? + .contains(&shuffling_id); + + if !shuffling_is_cached { + state.build_committee_cache(*relative_epoch, &self.spec)?; + let committee_cache = state.committee_cache(*relative_epoch)?; + self.shuffling_cache + .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or_else(|| Error::AttestationCacheLockTimeout)? + .insert(shuffling_id, committee_cache); + } } let mut fork_choice = self.fork_choice.write(); @@ -1992,6 +2020,128 @@ impl BeaconChain { Ok(()) } + /// Runs the `map_fn` with the committee cache at `shuffling_epoch` if the head of the chain is + /// `head_block_root`. + /// + /// It's not necessary that `head_block_root` matches our current view of the chain, it can be + /// any block that is: + /// + /// - Known to us. + /// - The finalized block a descendant of it. + /// + /// It would be quite common for attestation verification operations to use a `head_block_root` + /// that differs from our view of the head. + /// + /// ## Important + /// + /// This function is **not** suitable for determining proposer duties. + /// + /// ## Notes + /// + /// This function exists in this odd "map" pattern because efficiently obtaining a committee + /// can be complex. It might involve reading straight from the `beacon_chain.shuffling_cache` + /// or it might involve reading it from a state from the DB. Due to the complexities of + /// `RwLock`s on the shuffling cache, a simple `Cow` isn't suitable here. + /// + /// If the committee for `(head_block_root, shuffling_epoch)` isn't found in the + /// `shuffling_cache`, we will read a state from disk and then update the `shuffling_cache`. + pub(crate) fn with_committee_cache( + &self, + head_block_root: Hash256, + shuffling_epoch: Epoch, + map_fn: F, + ) -> Result + where + F: Fn(&CommitteeCache) -> Result, + { + let head_block = self + .fork_choice + .read() + .get_block(&head_block_root) + .ok_or_else(|| Error::MissingBeaconBlock(head_block_root))?; + + let shuffling_id = BlockShufflingIds { + current: head_block.current_epoch_shuffling_id.clone(), + next: head_block.next_epoch_shuffling_id.clone(), + } + .id_for_epoch(shuffling_epoch) + .ok_or_else(|| Error::InvalidShufflingId { + shuffling_epoch, + head_block_epoch: head_block.slot.epoch(T::EthSpec::slots_per_epoch()), + })?; + + // Obtain the shuffling cache, timing how long we wait. + let cache_wait_timer = + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES); + + let mut shuffling_cache = self + .shuffling_cache + .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or_else(|| Error::AttestationCacheLockTimeout)?; + + metrics::stop_timer(cache_wait_timer); + + if let Some(committee_cache) = shuffling_cache.get(&shuffling_id) { + map_fn(committee_cache) + } else { + // Drop the shuffling cache to avoid holding the lock for any longer than + // required. + drop(shuffling_cache); + + debug!( + self.log, + "Committee cache miss"; + "shuffling_epoch" => shuffling_epoch.as_u64(), + "head_block_root" => head_block_root.to_string(), + ); + + let state_read_timer = + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES); + + let mut state = self + .store + .get_inconsistent_state_for_attestation_verification_only( + &head_block.state_root, + Some(head_block.slot), + )? + .ok_or_else(|| Error::MissingBeaconState(head_block.state_root))?; + + metrics::stop_timer(state_read_timer); + let state_skip_timer = + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_SKIP_TIMES); + + while state.current_epoch() + 1 < shuffling_epoch { + // Here we tell `per_slot_processing` to skip hashing the state and just + // use the zero hash instead. + // + // The state roots are not useful for the shuffling, so there's no need to + // compute them. + per_slot_processing(&mut state, Some(Hash256::zero()), &self.spec) + .map_err(Error::from)?; + } + + metrics::stop_timer(state_skip_timer); + let committee_building_timer = + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES); + + let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), shuffling_epoch) + .map_err(Error::IncorrectStateForAttestation)?; + + state.build_committee_cache(relative_epoch, &self.spec)?; + + let committee_cache = state.committee_cache(relative_epoch)?; + + self.shuffling_cache + .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or_else(|| Error::AttestationCacheLockTimeout)? + .insert(shuffling_id, committee_cache); + + metrics::stop_timer(committee_building_timer); + + map_fn(&committee_cache) + } + } + /// Returns `true` if the given block root has not been processed. pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result { Ok(!self diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index ac9d1c4b1fd..8600d3a7628 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -561,6 +561,7 @@ where observed_attester_slashings: <_>::default(), eth1_chain: self.eth1_chain, genesis_validators_root: canonical_head.beacon_state.genesis_validators_root, + genesis_state_root: canonical_head.beacon_state_root, canonical_head: TimeoutRwLock::new(canonical_head.clone()), genesis_block_root, fork_choice: RwLock::new(fork_choice), diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 96f1c9a8411..6eb7bceeb21 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -83,6 +83,10 @@ pub enum BeaconChainError { ObservedBlockProducersError(ObservedBlockProducersError), PruningError(PruningError), ArithError(ArithError), + InvalidShufflingId { + shuffling_epoch: Epoch, + head_block_epoch: Epoch, + }, } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs index c561141a1de..deeb779b1eb 100644 --- a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -1,6 +1,9 @@ use crate::metrics; use std::collections::HashMap; -use types::{Attestation, AttestationData, EthSpec, Slot}; +use tree_hash::TreeHash; +use types::{Attestation, AttestationData, EthSpec, Hash256, Slot}; + +type AttestationDataRoot = Hash256; /// The number of slots that will be stored in the pool. /// @@ -53,7 +56,7 @@ pub enum Error { /// A collection of `Attestation` objects, keyed by their `attestation.data`. Enforces that all /// `attestation` are from the same slot. struct AggregatedAttestationMap { - map: HashMap>, + map: HashMap>, } impl AggregatedAttestationMap { @@ -87,7 +90,9 @@ impl AggregatedAttestationMap { return Err(Error::MoreThanOneAggregationBitSet(set_bits.len())); } - if let Some(existing_attestation) = self.map.get_mut(&a.data) { + let attestation_data_root = a.data.tree_hash_root(); + + if let Some(existing_attestation) = self.map.get_mut(&attestation_data_root) { if existing_attestation .aggregation_bits .get(committee_index) @@ -107,7 +112,7 @@ impl AggregatedAttestationMap { )); } - self.map.insert(a.data.clone(), a.clone()); + self.map.insert(attestation_data_root, a.clone()); Ok(InsertOutcome::NewAttestationData { committee_index }) } } @@ -116,7 +121,12 @@ impl AggregatedAttestationMap { /// /// The given `a.data.slot` must match the slot that `self` was initialized with. pub fn get(&self, data: &AttestationData) -> Result>, Error> { - Ok(self.map.get(data).cloned()) + Ok(self.map.get(&data.tree_hash_root()).cloned()) + } + + /// Returns an aggregated `Attestation` with the given `root`, if any. + pub fn get_by_root(&self, root: &AttestationDataRoot) -> Option<&Attestation> { + self.map.get(root) } /// Iterate all attestations in `self`. @@ -228,6 +238,18 @@ impl NaiveAggregationPool { .unwrap_or_else(|| Ok(None)) } + /// Returns an aggregated `Attestation` with the given `data`, if any. + pub fn get_by_slot_and_root( + &self, + slot: Slot, + root: &AttestationDataRoot, + ) -> Option> { + self.maps + .iter() + .find(|(map_slot, _)| **map_slot == slot) + .and_then(|(_slot, map)| map.get_by_root(root).cloned()) + } + /// Iterate all attestations in all slots of `self`. pub fn iter(&self) -> impl Iterator> { self.maps.iter().map(|(_slot, map)| map.iter()).flatten() diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index d8b6e8706e1..b9b41830ca4 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -1,6 +1,6 @@ use crate::metrics; use lru::LruCache; -use types::{beacon_state::CommitteeCache, Epoch, Hash256}; +use types::{beacon_state::CommitteeCache, Epoch, ShufflingId}; /// The size of the LRU cache that stores committee caches for quicker verification. /// @@ -14,7 +14,7 @@ const CACHE_SIZE: usize = 16; /// It has been named `ShufflingCache` because `CommitteeCacheCache` is a bit weird and looks like /// a find/replace error. pub struct ShufflingCache { - cache: LruCache<(Epoch, Hash256), CommitteeCache>, + cache: LruCache, } impl ShufflingCache { @@ -24,8 +24,8 @@ impl ShufflingCache { } } - pub fn get(&mut self, epoch: Epoch, root: Hash256) -> Option<&CommitteeCache> { - let opt = self.cache.get(&(epoch, root)); + pub fn get(&mut self, key: &ShufflingId) -> Option<&CommitteeCache> { + let opt = self.cache.get(key); if opt.is_some() { metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS); @@ -36,11 +36,38 @@ impl ShufflingCache { opt } - pub fn insert(&mut self, epoch: Epoch, root: Hash256, committee_cache: &CommitteeCache) { - let key = (epoch, root); + pub fn contains(&self, key: &ShufflingId) -> bool { + self.cache.contains(key) + } + pub fn insert(&mut self, key: ShufflingId, committee_cache: &CommitteeCache) { if !self.cache.contains(&key) { self.cache.put(key, committee_cache.clone()); } } } + +/// Contains the shuffling IDs for a beacon block. +pub struct BlockShufflingIds { + pub current: ShufflingId, + pub next: ShufflingId, +} + +impl BlockShufflingIds { + /// Returns the shuffling ID for the given epoch. + /// + /// Returns `None` if `epoch` is prior to `self.current.shuffling_epoch`. + pub fn id_for_epoch(&self, epoch: Epoch) -> Option { + if epoch == self.current.shuffling_epoch { + Some(self.current.clone()) + } else if epoch == self.next.shuffling_epoch { + Some(self.next.clone()) + } else if epoch > self.next.shuffling_epoch { + let mut shuffling_id = self.next.clone(); + shuffling_id.shuffling_epoch = epoch; + Some(shuffling_id) + } else { + None + } + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 8690c2e8d2d..a6b0d156a74 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -26,9 +26,11 @@ use store::{config::StoreConfig, BlockReplay, HotColdDB, ItemStore, LevelDB, Mem use tempfile::{tempdir, TempDir}; use tree_hash::TreeHash; use types::{ - AggregateSignature, Attestation, BeaconState, BeaconStateHash, ChainSpec, Domain, Epoch, - EthSpec, Hash256, Keypair, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, - SignedBeaconBlockHash, SignedRoot, Slot, SubnetId, + AggregateSignature, Attestation, AttestationData, AttesterSlashing, BeaconState, + BeaconStateHash, ChainSpec, Checkpoint, Domain, Epoch, EthSpec, Hash256, IndexedAttestation, + Keypair, ProposerSlashing, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, + SignedBeaconBlockHash, SignedRoot, SignedVoluntaryExit, Slot, SubnetId, VariableList, + VoluntaryExit, }; pub use types::test_utils::generate_deterministic_keypairs; @@ -129,7 +131,7 @@ impl BeaconChainHarness> { let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); let drain = slog_term::FullFormat::new(decorator).build(); - let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug); + let debug_level = slog::LevelFilter::new(drain, slog::Level::Critical); let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!()); let config = StoreConfig::default(); @@ -193,7 +195,7 @@ impl BeaconChainHarness> { let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); let drain = slog_term::FullFormat::new(decorator).build(); - let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug); + let debug_level = slog::LevelFilter::new(drain, slog::Level::Critical); let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!()); let store = HotColdDB::open_ephemeral(config, spec.clone(), log.clone()).unwrap(); @@ -238,7 +240,7 @@ impl BeaconChainHarness> { let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); let drain = slog_term::FullFormat::new(decorator).build(); - let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug); + let debug_level = slog::LevelFilter::new(drain, slog::Level::Critical); let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!()); let chain = BeaconChainBuilder::new(eth_spec_instance) @@ -397,7 +399,7 @@ where // If we produce two blocks for the same slot, they hash up to the same value and // BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce // different blocks each time. - self.chain.set_graffiti(self.rng.gen::<[u8; 32]>()); + self.chain.set_graffiti(self.rng.gen::<[u8; 32]>().into()); let randao_reveal = { let epoch = slot.epoch(E::slots_per_epoch()); @@ -442,8 +444,8 @@ where let committee_count = state.get_committee_count_at_slot(state.slot).unwrap(); state - .get_beacon_committees_at_slot(state.slot) - .unwrap() + .get_beacon_committees_at_slot(attestation_slot) + .expect("should get committees") .iter() .map(|bc| { bc.committee @@ -601,6 +603,94 @@ where .collect() } + pub fn make_attester_slashing(&self, validator_indices: Vec) -> AttesterSlashing { + let mut attestation_1 = IndexedAttestation { + attesting_indices: VariableList::new(validator_indices).unwrap(), + data: AttestationData { + slot: Slot::new(0), + index: 0, + beacon_block_root: Hash256::zero(), + target: Checkpoint { + root: Hash256::zero(), + epoch: Epoch::new(0), + }, + source: Checkpoint { + root: Hash256::zero(), + epoch: Epoch::new(0), + }, + }, + signature: AggregateSignature::infinity(), + }; + + let mut attestation_2 = attestation_1.clone(); + attestation_2.data.index += 1; + + for attestation in &mut [&mut attestation_1, &mut attestation_2] { + for &i in &attestation.attesting_indices { + let sk = &self.validators_keypairs[i as usize].sk; + + let fork = self.chain.head_info().unwrap().fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + let domain = self.chain.spec.get_domain( + attestation.data.target.epoch, + Domain::BeaconAttester, + &fork, + genesis_validators_root, + ); + let message = attestation.data.signing_root(domain); + + attestation.signature.add_assign(&sk.sign(message)); + } + } + + AttesterSlashing { + attestation_1, + attestation_2, + } + } + + pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing { + let mut block_header_1 = self + .chain + .head_beacon_block() + .unwrap() + .message + .block_header(); + block_header_1.proposer_index = validator_index; + + let mut block_header_2 = block_header_1.clone(); + block_header_2.state_root = Hash256::zero(); + + let sk = &self.validators_keypairs[validator_index as usize].sk; + let fork = self.chain.head_info().unwrap().fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + let mut signed_block_headers = vec![block_header_1, block_header_2] + .into_iter() + .map(|block_header| { + block_header.sign::(&sk, &fork, genesis_validators_root, &self.chain.spec) + }) + .collect::>(); + + ProposerSlashing { + signed_header_2: signed_block_headers.remove(1), + signed_header_1: signed_block_headers.remove(0), + } + } + + pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit { + let sk = &self.validators_keypairs[validator_index as usize].sk; + let fork = self.chain.head_info().unwrap().fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + VoluntaryExit { + epoch, + validator_index, + } + .sign(sk, &fork, genesis_validators_root, &self.chain.spec) + } + pub fn process_block(&self, slot: Slot, block: SignedBeaconBlock) -> SignedBeaconBlockHash { assert_eq!(self.chain.slot().unwrap(), slot); let block_hash: SignedBeaconBlockHash = self.chain.process_block(block).unwrap().into(); @@ -612,7 +702,10 @@ where for (unaggregated_attestations, maybe_signed_aggregate) in attestations.into_iter() { for (attestation, subnet_id) in unaggregated_attestations { self.chain - .verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id) + .verify_unaggregated_attestation_for_gossip( + attestation.clone(), + Some(subnet_id), + ) .unwrap() .add_to_pool(&self.chain) .unwrap(); diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index ec2200b1d77..fa9fe589be0 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -568,7 +568,7 @@ fn unaggregated_gossip_verification() { matches!( harness .chain - .verify_unaggregated_attestation_for_gossip($attn_getter, $subnet_getter) + .verify_unaggregated_attestation_for_gossip($attn_getter, Some($subnet_getter)) .err() .expect(&format!( "{} should error during verify_unaggregated_attestation_for_gossip", @@ -742,7 +742,7 @@ fn unaggregated_gossip_verification() { harness .chain - .verify_unaggregated_attestation_for_gossip(valid_attestation.clone(), subnet_id) + .verify_unaggregated_attestation_for_gossip(valid_attestation.clone(), Some(subnet_id)) .expect("valid attestation should be verified"); /* @@ -831,6 +831,6 @@ fn attestation_that_skips_epochs() { harness .chain - .verify_unaggregated_attestation_for_gossip(attestation, subnet_id) + .verify_unaggregated_attestation_for_gossip(attestation, Some(subnet_id)) .expect("should gossip verify attestation that skips slots"); } diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index caa2f9d6cfc..e9006a6268d 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -326,7 +326,7 @@ fn epoch_boundary_state_attestation_processing() { let res = harness .chain - .verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id); + .verify_unaggregated_attestation_for_gossip(attestation.clone(), Some(subnet_id)); let current_slot = harness.chain.slot().expect("should get slot"); let expected_attestation_slot = attestation.data.slot; diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 12f1c4364a4..721eb409167 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -463,7 +463,7 @@ fn attestations_with_increasing_slots() { for (attestation, subnet_id) in attestations.into_iter().flatten() { let res = harness .chain - .verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id); + .verify_unaggregated_attestation_for_gossip(attestation.clone(), Some(subnet_id)); let current_slot = harness.chain.slot().expect("should get slot"); let expected_attestation_slot = attestation.data.slot; diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index de6f7e59d76..3b5ce51658c 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -14,7 +14,6 @@ store = { path = "../store" } network = { path = "../network" } timer = { path = "../timer" } eth2_libp2p = { path = "../eth2_libp2p" } -rest_api = { path = "../rest_api" } parking_lot = "0.11.0" websocket_server = { path = "../websocket_server" } prometheus = "0.9.0" @@ -41,3 +40,5 @@ lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } time = "0.2.16" bus = "2.2.3" +http_api = { path = "../http_api" } +http_metrics = { path = "../http_metrics" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 16e7a026467..2196fddcf6d 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -13,15 +13,14 @@ use beacon_chain::{ use bus::Bus; use environment::RuntimeContext; use eth1::{Config as Eth1Config, Service as Eth1Service}; -use eth2_config::Eth2Config; use eth2_libp2p::NetworkGlobals; use genesis::{interop_genesis_state, Eth1GenesisService}; use network::{NetworkConfig, NetworkMessage, NetworkService}; use parking_lot::Mutex; -use slog::info; +use slog::{debug, info}; use ssz::Decode; use std::net::SocketAddr; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use timer::spawn_timer; @@ -61,7 +60,10 @@ pub struct ClientBuilder { event_handler: Option, network_globals: Option>>, network_send: Option>>, - http_listen_addr: Option, + db_path: Option, + freezer_db_path: Option, + http_api_config: http_api::Config, + http_metrics_config: http_metrics::Config, websocket_listen_addr: Option, eth_spec_instance: T::EthSpec, } @@ -103,7 +105,10 @@ where event_handler: None, network_globals: None, network_send: None, - http_listen_addr: None, + db_path: None, + freezer_db_path: None, + http_api_config: <_>::default(), + http_metrics_config: <_>::default(), websocket_listen_addr: None, eth_spec_instance, } @@ -280,55 +285,16 @@ where Ok(self) } - /// Immediately starts the beacon node REST API http server. - pub fn http_server( - mut self, - client_config: &ClientConfig, - eth2_config: &Eth2Config, - events: Arc>>, - ) -> Result { - let beacon_chain = self - .beacon_chain - .clone() - .ok_or_else(|| "http_server requires a beacon chain")?; - let context = self - .runtime_context - .as_ref() - .ok_or_else(|| "http_server requires a runtime_context")? - .service_context("http".into()); - let network_globals = self - .network_globals - .clone() - .ok_or_else(|| "http_server requires a libp2p network")?; - let network_send = self - .network_send - .clone() - .ok_or_else(|| "http_server requires a libp2p network sender")?; - - let network_info = rest_api::NetworkInfo { - network_globals, - network_chan: network_send, - }; - - let listening_addr = rest_api::start_server( - context.executor, - &client_config.rest_api, - beacon_chain, - network_info, - client_config - .create_db_path() - .map_err(|_| "unable to read data dir")?, - client_config - .create_freezer_db_path() - .map_err(|_| "unable to read freezer DB dir")?, - eth2_config.clone(), - events, - ) - .map_err(|e| format!("Failed to start HTTP API: {:?}", e))?; - - self.http_listen_addr = Some(listening_addr); + /// Provides configuration for the HTTP API. + pub fn http_api_config(mut self, config: http_api::Config) -> Self { + self.http_api_config = config; + self + } - Ok(self) + /// Provides configuration for the HTTP server that serves Prometheus metrics. + pub fn http_metrics_config(mut self, config: http_metrics::Config) -> Self { + self.http_metrics_config = config; + self } /// Immediately starts the service that periodically logs information each slot. @@ -367,25 +333,85 @@ where /// specified. /// /// If type inference errors are being raised, see the comment on the definition of `Self`. + #[allow(clippy::type_complexity)] pub fn build( self, - ) -> Client< - Witness< - TStoreMigrator, - TSlotClock, - TEth1Backend, - TEthSpec, - TEventHandler, - THotStore, - TColdStore, + ) -> Result< + Client< + Witness< + TStoreMigrator, + TSlotClock, + TEth1Backend, + TEthSpec, + TEventHandler, + THotStore, + TColdStore, + >, >, + String, > { - Client { + let runtime_context = self + .runtime_context + .as_ref() + .ok_or_else(|| "build requires a runtime context".to_string())?; + let log = runtime_context.log().clone(); + + let http_api_listen_addr = if self.http_api_config.enabled { + let ctx = Arc::new(http_api::Context { + config: self.http_api_config.clone(), + chain: self.beacon_chain.clone(), + network_tx: self.network_send.clone(), + network_globals: self.network_globals.clone(), + log: log.clone(), + }); + + let exit = runtime_context.executor.exit(); + + let (listen_addr, server) = http_api::serve(ctx, exit) + .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; + + runtime_context + .clone() + .executor + .spawn_without_exit(async move { server.await }, "http-api"); + + Some(listen_addr) + } else { + info!(log, "HTTP server is disabled"); + None + }; + + let http_metrics_listen_addr = if self.http_metrics_config.enabled { + let ctx = Arc::new(http_metrics::Context { + config: self.http_metrics_config.clone(), + chain: self.beacon_chain.clone(), + db_path: self.db_path.clone(), + freezer_db_path: self.freezer_db_path.clone(), + log: log.clone(), + }); + + let exit = runtime_context.executor.exit(); + + let (listen_addr, server) = http_metrics::serve(ctx, exit) + .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; + + runtime_context + .executor + .spawn_without_exit(async move { server.await }, "http-api"); + + Some(listen_addr) + } else { + debug!(log, "Metrics server is disabled"); + None + }; + + Ok(Client { beacon_chain: self.beacon_chain, network_globals: self.network_globals, - http_listen_addr: self.http_listen_addr, + http_api_listen_addr, + http_metrics_listen_addr, websocket_listen_addr: self.websocket_listen_addr, - } + }) } } @@ -520,6 +546,9 @@ where .clone() .ok_or_else(|| "disk_store requires a chain spec".to_string())?; + self.db_path = Some(hot_path.into()); + self.freezer_db_path = Some(cold_path.into()); + let store = HotColdDB::open(hot_path, cold_path, config, spec, context.log().clone()) .map_err(|e| format!("Unable to open database: {:?}", e))?; self.store = Some(Arc::new(store)); diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 19088e785b5..a2975129f1e 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -63,10 +63,11 @@ pub struct Config { pub genesis: ClientGenesis, pub store: store::StoreConfig, pub network: network::NetworkConfig, - pub rest_api: rest_api::Config, pub chain: beacon_chain::ChainConfig, pub websocket_server: websocket_server::Config, pub eth1: eth1::Config, + pub http_api: http_api::Config, + pub http_metrics: http_metrics::Config, } impl Default for Config { @@ -80,7 +81,6 @@ impl Default for Config { store: <_>::default(), network: NetworkConfig::default(), chain: <_>::default(), - rest_api: <_>::default(), websocket_server: <_>::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), dummy_eth1_backend: false, @@ -88,6 +88,8 @@ impl Default for Config { eth1: <_>::default(), disabled_forks: Vec::new(), graffiti: Graffiti::default(), + http_api: <_>::default(), + http_metrics: <_>::default(), } } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index da670ff1344..6b721aee924 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -23,7 +23,10 @@ pub use eth2_config::Eth2Config; pub struct Client { beacon_chain: Option>>, network_globals: Option>>, - http_listen_addr: Option, + /// Listen address for the standard eth2.0 API, if the service was started. + http_api_listen_addr: Option, + /// Listen address for the HTTP server which serves Prometheus metrics. + http_metrics_listen_addr: Option, websocket_listen_addr: Option, } @@ -33,9 +36,14 @@ impl Client { self.beacon_chain.clone() } - /// Returns the address of the client's HTTP API server, if it was started. - pub fn http_listen_addr(&self) -> Option { - self.http_listen_addr + /// Returns the address of the client's standard eth2.0 API server, if it was started. + pub fn http_api_listen_addr(&self) -> Option { + self.http_api_listen_addr + } + + /// Returns the address of the client's HTTP Prometheus metrics server, if it was started. + pub fn http_metrics_listen_addr(&self) -> Option { + self.http_metrics_listen_addr } /// Returns the address of the client's WebSocket API server, if it was started. diff --git a/beacon_node/eth1/src/http.rs b/beacon_node/eth1/src/http.rs index 6dffdaa7c6d..e8f7d23a026 100644 --- a/beacon_node/eth1/src/http.rs +++ b/beacon_node/eth1/src/http.rs @@ -39,19 +39,34 @@ pub enum Eth1NetworkId { Custom(u64), } +impl Into for Eth1NetworkId { + fn into(self) -> u64 { + match self { + Eth1NetworkId::Mainnet => 1, + Eth1NetworkId::Goerli => 5, + Eth1NetworkId::Custom(id) => id, + } + } +} + +impl From for Eth1NetworkId { + fn from(id: u64) -> Self { + let into = |x: Eth1NetworkId| -> u64 { x.into() }; + match id { + id if id == into(Eth1NetworkId::Mainnet) => Eth1NetworkId::Mainnet, + id if id == into(Eth1NetworkId::Goerli) => Eth1NetworkId::Goerli, + id => Eth1NetworkId::Custom(id), + } + } +} + impl FromStr for Eth1NetworkId { type Err = String; fn from_str(s: &str) -> Result { - match s { - "1" => Ok(Eth1NetworkId::Mainnet), - "5" => Ok(Eth1NetworkId::Goerli), - custom => { - let network_id = u64::from_str_radix(custom, 10) - .map_err(|e| format!("Failed to parse eth1 network id {}", e))?; - Ok(Eth1NetworkId::Custom(network_id)) - } - } + u64::from_str_radix(s, 10) + .map(Into::into) + .map_err(|e| format!("Failed to parse eth1 network id {}", e)) } } diff --git a/beacon_node/eth1/src/lib.rs b/beacon_node/eth1/src/lib.rs index f5f018bd17b..a7aba85a28a 100644 --- a/beacon_node/eth1/src/lib.rs +++ b/beacon_node/eth1/src/lib.rs @@ -13,4 +13,6 @@ pub use block_cache::{BlockCache, Eth1Block}; pub use deposit_cache::DepositCache; pub use deposit_log::DepositLog; pub use inner::SszEth1Cache; -pub use service::{BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service}; +pub use service::{ + BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service, DEFAULT_NETWORK_ID, +}; diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/http_api/Cargo.toml similarity index 51% rename from beacon_node/rest_api/Cargo.toml rename to beacon_node/http_api/Cargo.toml index 38a5a1e7d55..828d26deb3d 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -1,50 +1,34 @@ [package] -name = "rest_api" -version = "0.2.0" -authors = ["Paul Hauner ", "Age Manning ", "Luke Anderson "] +name = "http_api" +version = "0.1.0" +authors = ["Paul Hauner "] edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + [dependencies] -bls = { path = "../../crypto/bls" } -rest_types = { path = "../../common/rest_types" } +warp = "0.2.5" +serde = { version = "1.0.110", features = ["derive"] } +tokio = { version = "0.2.21", features = ["sync"] } +parking_lot = "0.11.0" +types = { path = "../../consensus/types" } +hex = "0.4.2" beacon_chain = { path = "../beacon_chain" } +eth2 = { path = "../../common/eth2", features = ["lighthouse"] } +slog = "2.5.2" network = { path = "../network" } eth2_libp2p = { path = "../eth2_libp2p" } -store = { path = "../store" } -serde = { version = "1.0.110", features = ["derive"] } -serde_json = "1.0.52" -serde_yaml = "0.8.11" -slog = "2.5.2" -slog-term = "2.5.0" -slog-async = "2.5.0" -eth2_ssz = "0.1.2" -eth2_ssz_derive = "0.1.0" +eth1 = { path = "../eth1" } +fork_choice = { path = "../../consensus/fork_choice" } state_processing = { path = "../../consensus/state_processing" } -types = { path = "../../consensus/types" } -http = "0.2.1" -hyper = "0.13.5" -tokio = { version = "0.2.21", features = ["sync"] } -url = "2.1.1" -lazy_static = "1.4.0" -eth2_config = { path = "../../common/eth2_config" } +lighthouse_version = { path = "../../common/lighthouse_version" } lighthouse_metrics = { path = "../../common/lighthouse_metrics" } +lazy_static = "1.4.0" +warp_utils = { path = "../../common/warp_utils" } slot_clock = { path = "../../common/slot_clock" } -hex = "0.4.2" -parking_lot = "0.11.0" -futures = "0.3.5" -operation_pool = { path = "../operation_pool" } -environment = { path = "../../lighthouse/environment" } -uhttp_sse = "0.5.1" -bus = "2.2.3" -itertools = "0.9.0" -lighthouse_version = { path = "../../common/lighthouse_version" } [dev-dependencies] -assert_matches = "1.3.0" -remote_beacon_node = { path = "../../common/remote_beacon_node" } -node_test_rig = { path = "../../testing/node_test_rig" } -tree_hash = "0.1.0" - -[features] -fake_crypto = [] +store = { path = "../store" } +environment = { path = "../../lighthouse/environment" } +tree_hash = { path = "../../consensus/tree_hash" } +discv5 = { version = "0.1.0-alpha.10", features = ["libp2p"] } diff --git a/beacon_node/http_api/src/beacon_proposer_cache.rs b/beacon_node/http_api/src/beacon_proposer_cache.rs new file mode 100644 index 00000000000..013c7dcc913 --- /dev/null +++ b/beacon_node/http_api/src/beacon_proposer_cache.rs @@ -0,0 +1,168 @@ +use crate::metrics; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::types::ProposerData; +use fork_choice::ProtoBlock; +use slot_clock::SlotClock; +use state_processing::per_slot_processing; +use types::{Epoch, EthSpec, Hash256, PublicKeyBytes}; + +/// This sets a maximum bound on the number of epochs to skip whilst instantiating the cache for +/// the first time. +const EPOCHS_TO_SKIP: u64 = 2; + +/// Caches the beacon block proposers for a given `epoch` and `epoch_boundary_root`. +/// +/// This cache is only able to contain a single set of proposers and is only +/// intended to cache the proposers for the current epoch according to the head +/// of the chain. A change in epoch or re-org to a different chain may cause a +/// cache miss and rebuild. +pub struct BeaconProposerCache { + epoch: Epoch, + epoch_boundary_root: Hash256, + proposers: Vec, +} + +impl BeaconProposerCache { + /// Create a new cache for the current epoch of the `chain`. + pub fn new(chain: &BeaconChain) -> Result { + let (head_root, head_block) = Self::current_head_block(chain)?; + + // If the head epoch is more than `EPOCHS_TO_SKIP` in the future, just build the cache at + // the epoch of the head. This prevents doing a massive amount of skip slots when starting + // a new database from genesis. + let epoch = { + let epoch_now = chain + .epoch() + .unwrap_or_else(|_| chain.spec.genesis_slot.epoch(T::EthSpec::slots_per_epoch())); + let head_epoch = head_block.slot.epoch(T::EthSpec::slots_per_epoch()); + if epoch_now > head_epoch + EPOCHS_TO_SKIP { + head_epoch + } else { + epoch_now + } + }; + + Self::for_head_block(chain, epoch, head_root, head_block) + } + + /// Create a new cache that contains the shuffling for `current_epoch`, + /// assuming that `head_root` and `head_block` represents the most recent + /// canonical block. + fn for_head_block( + chain: &BeaconChain, + current_epoch: Epoch, + head_root: Hash256, + head_block: ProtoBlock, + ) -> Result { + let _timer = metrics::start_timer(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_TIMES); + + let mut head_state = chain + .get_state(&head_block.state_root, Some(head_block.slot))? + .ok_or_else(|| BeaconChainError::MissingBeaconState(head_block.state_root))?; + + // We *must* skip forward to the current epoch to obtain valid proposer + // duties. We cannot skip to the previous epoch, like we do with + // attester duties. + while head_state.current_epoch() < current_epoch { + // Skip slots until the current epoch, providing `Hash256::zero()` as the state root + // since we don't require it to be valid to identify producers. + per_slot_processing(&mut head_state, Some(Hash256::zero()), &chain.spec)?; + } + + let proposers = current_epoch + .slot_iter(T::EthSpec::slots_per_epoch()) + .map(|slot| { + head_state + .get_beacon_proposer_index(slot, &chain.spec) + .map_err(BeaconChainError::from) + .and_then(|i| { + let pubkey = chain + .validator_pubkey(i)? + .ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheIncomplete(i))?; + + Ok(ProposerData { + pubkey: PublicKeyBytes::from(pubkey), + slot, + }) + }) + }) + .collect::>()?; + + let epoch_boundary_slot = head_state + .current_epoch() + .start_slot(T::EthSpec::slots_per_epoch()); + let epoch_boundary_root = if head_state.slot >= epoch_boundary_slot { + head_root + } else { + *head_state.get_block_root(epoch_boundary_slot)? + }; + + Ok(Self { + epoch: current_epoch, + epoch_boundary_root, + proposers, + }) + } + + /// Return the proposers for the given `Epoch`. + /// + /// The cache may be rebuilt if: + /// + /// - The epoch has changed since the last cache build. + /// - There has been a re-org that crosses an epoch boundary. + pub fn get_proposers( + &mut self, + chain: &BeaconChain, + epoch: Epoch, + ) -> Result, warp::Rejection> { + let is_prior_to_genesis = chain.slot_clock.is_prior_to_genesis().ok_or_else(|| { + warp_utils::reject::custom_server_error("unable to read slot clock".to_string()) + })?; + let current_epoch = if is_prior_to_genesis { + chain.spec.genesis_slot.epoch(T::EthSpec::slots_per_epoch()) + } else { + chain + .epoch() + .map_err(warp_utils::reject::beacon_chain_error)? + }; + + // Disallow requests that are outside the current epoch. This ensures the cache doesn't get + // washed-out with old values. + if current_epoch != epoch { + return Err(warp_utils::reject::custom_bad_request(format!( + "requested epoch is {} but only current epoch {} is allowed", + epoch, current_epoch + ))); + } + + let (head_root, head_block) = + Self::current_head_block(chain).map_err(warp_utils::reject::beacon_chain_error)?; + let epoch_boundary_root = head_block.target_root; + + // Rebuild the cache if this call causes a cache-miss. + if self.epoch != current_epoch || self.epoch_boundary_root != epoch_boundary_root { + metrics::inc_counter(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_MISSES_TOTAL); + + *self = Self::for_head_block(chain, current_epoch, head_root, head_block) + .map_err(warp_utils::reject::beacon_chain_error)?; + } else { + metrics::inc_counter(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_HITS_TOTAL); + } + + Ok(self.proposers.clone()) + } + + /// Use fork choice to obtain some information about the head block of `chain`. + fn current_head_block( + chain: &BeaconChain, + ) -> Result<(Hash256, ProtoBlock), BeaconChainError> { + let head_root = chain.head_beacon_block_root()?; + + chain + .fork_choice + .read() + .get_block(&head_root) + .ok_or_else(|| BeaconChainError::MissingBeaconBlock(head_root)) + .map(|head_block| (head_root, head_block)) + } +} diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs new file mode 100644 index 00000000000..5e358a2d683 --- /dev/null +++ b/beacon_node/http_api/src/block_id.rs @@ -0,0 +1,87 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::BlockId as CoreBlockId; +use std::str::FromStr; +use types::{Hash256, SignedBeaconBlock, Slot}; + +/// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given +/// `BlockId`. +#[derive(Debug)] +pub struct BlockId(pub CoreBlockId); + +impl BlockId { + pub fn from_slot(slot: Slot) -> Self { + Self(CoreBlockId::Slot(slot)) + } + + pub fn from_root(root: Hash256) -> Self { + Self(CoreBlockId::Root(root)) + } + + /// Return the block root identified by `self`. + pub fn root( + &self, + chain: &BeaconChain, + ) -> Result { + match &self.0 { + CoreBlockId::Head => chain + .head_info() + .map(|head| head.block_root) + .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Genesis => Ok(chain.genesis_block_root), + CoreBlockId::Finalized => chain + .head_info() + .map(|head| head.finalized_checkpoint.root) + .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Justified => chain + .head_info() + .map(|head| head.current_justified_checkpoint.root) + .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Slot(slot) => chain + .block_root_at_slot(*slot) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|root_opt| { + root_opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block at slot {}", + slot + )) + }) + }), + CoreBlockId::Root(root) => Ok(*root), + } + } + + /// Return the `SignedBeaconBlock` identified by `self`. + pub fn block( + &self, + chain: &BeaconChain, + ) -> Result, warp::Rejection> { + match &self.0 { + CoreBlockId::Head => chain + .head_beacon_block() + .map_err(warp_utils::reject::beacon_chain_error), + _ => { + let root = self.root(chain)?; + chain + .get_block(&root) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|root_opt| { + root_opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + )) + }) + }) + } + } + } +} + +impl FromStr for BlockId { + type Err = String; + + fn from_str(s: &str) -> Result { + CoreBlockId::from_str(s).map(Self) + } +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs new file mode 100644 index 00000000000..8db4da58639 --- /dev/null +++ b/beacon_node/http_api/src/lib.rs @@ -0,0 +1,1688 @@ +//! This crate contains a HTTP server which serves the endpoints listed here: +//! +//! https://github.com/ethereum/eth2.0-APIs +//! +//! There are also some additional, non-standard endpoints behind the `/lighthouse/` path which are +//! used for development. + +mod beacon_proposer_cache; +mod block_id; +mod metrics; +mod state_id; +mod validator_inclusion; + +use beacon_chain::{ + observed_operations::ObservationOutcome, AttestationError as AttnError, BeaconChain, + BeaconChainError, BeaconChainTypes, +}; +use beacon_proposer_cache::BeaconProposerCache; +use block_id::BlockId; +use eth2::{ + types::{self as api_types, ValidatorId}, + StatusCode, +}; +use eth2_libp2p::{NetworkGlobals, PubsubMessage}; +use lighthouse_version::version_with_platform; +use network::NetworkMessage; +use parking_lot::Mutex; +use serde::{Deserialize, Serialize}; +use slog::{crit, error, info, trace, warn, Logger}; +use state_id::StateId; +use state_processing::per_slot_processing; +use std::borrow::Cow; +use std::convert::TryInto; +use std::future::Future; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedSender; +use types::{ + Attestation, AttestationDuty, AttesterSlashing, CloneConfig, CommitteeCache, Epoch, EthSpec, + Hash256, ProposerSlashing, PublicKey, RelativeEpoch, SignedAggregateAndProof, + SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig, +}; +use warp::Filter; + +const API_PREFIX: &str = "eth"; +const API_VERSION: &str = "v1"; + +/// A wrapper around all the items required to spawn the HTTP server. +/// +/// The server will gracefully handle the case where any fields are `None`. +pub struct Context { + pub config: Config, + pub chain: Option>>, + pub network_tx: Option>>, + pub network_globals: Option>>, + pub log: Logger, +} + +/// Configuration for the HTTP server. +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct Config { + pub enabled: bool, + pub listen_addr: Ipv4Addr, + pub listen_port: u16, + pub allow_origin: Option, +} + +impl Default for Config { + fn default() -> Self { + Self { + enabled: false, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 5052, + allow_origin: None, + } + } +} + +#[derive(Debug)] +pub enum Error { + Warp(warp::Error), + Other(String), +} + +impl From for Error { + fn from(e: warp::Error) -> Self { + Error::Warp(e) + } +} + +impl From for Error { + fn from(e: String) -> Self { + Error::Other(e) + } +} + +/// Creates a `warp` logging wrapper which we use to create `slog` logs. +pub fn slog_logging( + log: Logger, +) -> warp::filters::log::Log { + warp::log::custom(move |info| { + match info.status() { + status if status == StatusCode::OK || status == StatusCode::NOT_FOUND => { + trace!( + log, + "Processed HTTP API request"; + "elapsed" => format!("{:?}", info.elapsed()), + "status" => status.to_string(), + "path" => info.path(), + "method" => info.method().to_string(), + ); + } + status => { + warn!( + log, + "Error processing HTTP API request"; + "elapsed" => format!("{:?}", info.elapsed()), + "status" => status.to_string(), + "path" => info.path(), + "method" => info.method().to_string(), + ); + } + }; + }) +} + +/// Creates a `warp` logging wrapper which we use for Prometheus metrics (not necessarily logging, +/// per say). +pub fn prometheus_metrics() -> warp::filters::log::Log { + warp::log::custom(move |info| { + // Here we restrict the `info.path()` value to some predefined values. Without this, we end + // up with a new metric type each time someone includes something unique in the path (e.g., + // a block hash). + let path = { + let equals = |s: &'static str| -> Option<&'static str> { + if info.path() == format!("/{}/{}/{}", API_PREFIX, API_VERSION, s) { + Some(s) + } else { + None + } + }; + + let starts_with = |s: &'static str| -> Option<&'static str> { + if info + .path() + .starts_with(&format!("/{}/{}/{}", API_PREFIX, API_VERSION, s)) + { + Some(s) + } else { + None + } + }; + + equals("beacon/blocks") + .or_else(|| starts_with("validator/duties/attester")) + .or_else(|| starts_with("validator/duties/proposer")) + .or_else(|| starts_with("validator/attestation_data")) + .or_else(|| starts_with("validator/blocks")) + .or_else(|| starts_with("validator/aggregate_attestation")) + .or_else(|| starts_with("validator/aggregate_and_proofs")) + .or_else(|| starts_with("validator/beacon_committee_subscriptions")) + .or_else(|| starts_with("beacon/")) + .or_else(|| starts_with("config/")) + .or_else(|| starts_with("debug/")) + .or_else(|| starts_with("events/")) + .or_else(|| starts_with("node/")) + .or_else(|| starts_with("validator/")) + .unwrap_or("other") + }; + + metrics::inc_counter_vec(&metrics::HTTP_API_PATHS_TOTAL, &[path]); + metrics::inc_counter_vec( + &metrics::HTTP_API_STATUS_CODES_TOTAL, + &[&info.status().to_string()], + ); + metrics::observe_timer_vec(&metrics::HTTP_API_PATHS_TIMES, &[path], info.elapsed()); + }) +} + +/// Creates a server that will serve requests using information from `ctx`. +/// +/// The server will shut down gracefully when the `shutdown` future resolves. +/// +/// ## Returns +/// +/// This function will bind the server to the provided address and then return a tuple of: +/// +/// - `SocketAddr`: the address that the HTTP server will listen on. +/// - `Future`: the actual server future that will need to be awaited. +/// +/// ## Errors +/// +/// Returns an error if the server is unable to bind or there is another error during +/// configuration. +pub fn serve( + ctx: Arc>, + shutdown: impl Future + Send + Sync + 'static, +) -> Result<(SocketAddr, impl Future), Error> { + let config = ctx.config.clone(); + let log = ctx.log.clone(); + let allow_origin = config.allow_origin.clone(); + + // Sanity check. + if !config.enabled { + crit!(log, "Cannot start disabled HTTP server"); + return Err(Error::Other( + "A disabled server should not be started".to_string(), + )); + } + + let eth1_v1 = warp::path(API_PREFIX).and(warp::path(API_VERSION)); + + // Instantiate the beacon proposer cache. + let beacon_proposer_cache = ctx + .chain + .as_ref() + .map(|chain| BeaconProposerCache::new(&chain)) + .transpose() + .map_err(|e| format!("Unable to initialize beacon proposer cache: {:?}", e))? + .map(Mutex::new) + .map(Arc::new); + + // Create a `warp` filter that provides access to the proposer cache. + let beacon_proposer_cache = || { + warp::any() + .map(move || beacon_proposer_cache.clone()) + .and_then(|beacon_proposer_cache| async move { + match beacon_proposer_cache { + Some(cache) => Ok(cache), + None => Err(warp_utils::reject::custom_not_found( + "Beacon proposer cache is not initialized.".to_string(), + )), + } + }) + }; + + // Create a `warp` filter that provides access to the network globals. + let inner_network_globals = ctx.network_globals.clone(); + let network_globals = warp::any() + .map(move || inner_network_globals.clone()) + .and_then(|network_globals| async move { + match network_globals { + Some(globals) => Ok(globals), + None => Err(warp_utils::reject::custom_not_found( + "network globals are not initialized.".to_string(), + )), + } + }); + + // Create a `warp` filter that provides access to the beacon chain. + let inner_ctx = ctx.clone(); + let chain_filter = + warp::any() + .map(move || inner_ctx.chain.clone()) + .and_then(|chain| async move { + match chain { + Some(chain) => Ok(chain), + None => Err(warp_utils::reject::custom_not_found( + "Beacon chain genesis has not yet been observed.".to_string(), + )), + } + }); + + // Create a `warp` filter that provides access to the network sender channel. + let inner_ctx = ctx.clone(); + let network_tx_filter = warp::any() + .map(move || inner_ctx.network_tx.clone()) + .and_then(|network_tx| async move { + match network_tx { + Some(network_tx) => Ok(network_tx), + None => Err(warp_utils::reject::custom_not_found( + "The networking stack has not yet started.".to_string(), + )), + } + }); + + // Create a `warp` filter that provides access to the logger. + let log_filter = warp::any().map(move || ctx.log.clone()); + + /* + * + * Start of HTTP method definitions. + * + */ + + // GET beacon/genesis + let get_beacon_genesis = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("genesis")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + chain + .head_info() + .map_err(warp_utils::reject::beacon_chain_error) + .map(|head| api_types::GenesisData { + genesis_time: head.genesis_time, + genesis_validators_root: head.genesis_validators_root, + genesis_fork_version: chain.spec.genesis_fork_version, + }) + .map(api_types::GenericResponse::from) + }) + }); + + /* + * beacon/states/{state_id} + */ + + let beacon_states_path = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("states")) + .and(warp::path::param::()) + .and(chain_filter.clone()); + + // GET beacon/states/{state_id}/root + let get_beacon_state_root = beacon_states_path + .clone() + .and(warp::path("root")) + .and(warp::path::end()) + .and_then(|state_id: StateId, chain: Arc>| { + blocking_json_task(move || { + state_id + .root(&chain) + .map(api_types::RootData::from) + .map(api_types::GenericResponse::from) + }) + }); + + // GET beacon/states/{state_id}/fork + let get_beacon_state_fork = beacon_states_path + .clone() + .and(warp::path("fork")) + .and(warp::path::end()) + .and_then(|state_id: StateId, chain: Arc>| { + blocking_json_task(move || state_id.fork(&chain).map(api_types::GenericResponse::from)) + }); + + // GET beacon/states/{state_id}/finality_checkpoints + let get_beacon_state_finality_checkpoints = beacon_states_path + .clone() + .and(warp::path("finality_checkpoints")) + .and(warp::path::end()) + .and_then(|state_id: StateId, chain: Arc>| { + blocking_json_task(move || { + state_id + .map_state(&chain, |state| { + Ok(api_types::FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint, + current_justified: state.current_justified_checkpoint, + finalized: state.finalized_checkpoint, + }) + }) + .map(api_types::GenericResponse::from) + }) + }); + + // GET beacon/states/{state_id}/validators + let get_beacon_state_validators = beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::end()) + .and_then(|state_id: StateId, chain: Arc>| { + blocking_json_task(move || { + state_id + .map_state(&chain, |state| { + let epoch = state.current_epoch(); + let finalized_epoch = state.finalized_checkpoint.epoch; + let far_future_epoch = chain.spec.far_future_epoch; + + Ok(state + .validators + .iter() + .zip(state.balances.iter()) + .enumerate() + .map(|(index, (validator, balance))| api_types::ValidatorData { + index: index as u64, + balance: *balance, + status: api_types::ValidatorStatus::from_validator( + Some(validator), + epoch, + finalized_epoch, + far_future_epoch, + ), + validator: validator.clone(), + }) + .collect::>()) + }) + .map(api_types::GenericResponse::from) + }) + }); + + // GET beacon/states/{state_id}/validators/{validator_id} + let get_beacon_state_validators_id = beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and_then( + |state_id: StateId, chain: Arc>, validator_id: ValidatorId| { + blocking_json_task(move || { + state_id + .map_state(&chain, |state| { + let index_opt = match &validator_id { + ValidatorId::PublicKey(pubkey) => { + state.validators.iter().position(|v| v.pubkey == *pubkey) + } + ValidatorId::Index(index) => Some(*index as usize), + }; + + index_opt + .and_then(|index| { + let validator = state.validators.get(index)?; + let balance = *state.balances.get(index)?; + let epoch = state.current_epoch(); + let finalized_epoch = state.finalized_checkpoint.epoch; + let far_future_epoch = chain.spec.far_future_epoch; + + Some(api_types::ValidatorData { + index: index as u64, + balance, + status: api_types::ValidatorStatus::from_validator( + Some(validator), + epoch, + finalized_epoch, + far_future_epoch, + ), + validator: validator.clone(), + }) + }) + .ok_or_else(warp::reject::not_found) + }) + .map(api_types::GenericResponse::from) + }) + }, + ); + + // GET beacon/states/{state_id}/committees/{epoch} + let get_beacon_state_committees = beacon_states_path + .clone() + .and(warp::path("committees")) + .and(warp::path::param::()) + .and(warp::query::()) + .and(warp::path::end()) + .and_then( + |state_id: StateId, + chain: Arc>, + epoch: Epoch, + query: api_types::CommitteesQuery| { + blocking_json_task(move || { + state_id.map_state(&chain, |state| { + let relative_epoch = + RelativeEpoch::from_epoch(state.current_epoch(), epoch).map_err( + |_| { + warp_utils::reject::custom_bad_request(format!( + "state is epoch {} and only previous, current and next epochs are supported", + state.current_epoch() + )) + }, + )?; + + let committee_cache = if state + .committee_cache_is_initialized(relative_epoch) + { + state.committee_cache(relative_epoch).map(Cow::Borrowed) + } else { + CommitteeCache::initialized(state, epoch, &chain.spec).map(Cow::Owned) + } + .map_err(BeaconChainError::BeaconStateError) + .map_err(warp_utils::reject::beacon_chain_error)?; + + // Use either the supplied slot or all slots in the epoch. + let slots = query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { + epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() + }); + + // Use either the supplied committee index or all available indices. + let indices = query.index.map(|index| vec![index]).unwrap_or_else(|| { + (0..committee_cache.committees_per_slot()).collect() + }); + + let mut response = Vec::with_capacity(slots.len() * indices.len()); + + for slot in slots { + // It is not acceptable to query with a slot that is not within the + // specified epoch. + if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { + return Err(warp_utils::reject::custom_bad_request(format!( + "{} is not in epoch {}", + slot, epoch + ))); + } + + for &index in &indices { + let committee = committee_cache + .get_beacon_committee(slot, index) + .ok_or_else(|| { + warp_utils::reject::custom_bad_request(format!( + "committee index {} does not exist in epoch {}", + index, epoch + )) + })?; + + response.push(api_types::CommitteeData { + index, + slot, + validators: committee + .committee + .iter() + .map(|i| *i as u64) + .collect(), + }); + } + } + + Ok(api_types::GenericResponse::from(response)) + }) + }) + }, + ); + + // GET beacon/headers + // + // Note: this endpoint only returns information about blocks in the canonical chain. Given that + // there's a `canonical` flag on the response, I assume it should also return non-canonical + // things. Returning non-canonical things is hard for us since we don't already have a + // mechanism for arbitrary forwards block iteration, we only support iterating forwards along + // the canonical chain. + let get_beacon_headers = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("headers")) + .and(warp::query::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then( + |query: api_types::HeadersQuery, chain: Arc>| { + blocking_json_task(move || { + let (root, block) = match (query.slot, query.parent_root) { + // No query parameters, return the canonical head block. + (None, None) => chain + .head_beacon_block() + .map_err(warp_utils::reject::beacon_chain_error) + .map(|block| (block.canonical_root(), block))?, + // Only the parent root parameter, do a forwards-iterator lookup. + (None, Some(parent_root)) => { + let parent = BlockId::from_root(parent_root).block(&chain)?; + let (root, _slot) = chain + .forwards_iter_block_roots(parent.slot()) + .map_err(warp_utils::reject::beacon_chain_error)? + // Ignore any skip-slots immediately following the parent. + .find(|res| { + res.as_ref().map_or(false, |(root, _)| *root != parent_root) + }) + .transpose() + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "child of block with root {}", + parent_root + )) + })?; + + BlockId::from_root(root) + .block(&chain) + .map(|block| (root, block))? + } + // Slot is supplied, search by slot and optionally filter by + // parent root. + (Some(slot), parent_root_opt) => { + let root = BlockId::from_slot(slot).root(&chain)?; + let block = BlockId::from_root(root).block(&chain)?; + + // If the parent root was supplied, check that it matches the block + // obtained via a slot lookup. + if let Some(parent_root) = parent_root_opt { + if block.parent_root() != parent_root { + return Err(warp_utils::reject::custom_not_found(format!( + "no canonical block at slot {} with parent root {}", + slot, parent_root + ))); + } + } + + (root, block) + } + }; + + let data = api_types::BlockHeaderData { + root, + canonical: true, + header: api_types::BlockHeaderAndSignature { + message: block.message.block_header(), + signature: block.signature.into(), + }, + }; + + Ok(api_types::GenericResponse::from(vec![data])) + }) + }, + ); + + // GET beacon/headers/{block_id} + let get_beacon_headers_block_id = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("headers")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|block_id: BlockId, chain: Arc>| { + blocking_json_task(move || { + let root = block_id.root(&chain)?; + let block = BlockId::from_root(root).block(&chain)?; + + let canonical = chain + .block_root_at_slot(block.slot()) + .map_err(warp_utils::reject::beacon_chain_error)? + .map_or(false, |canonical| root == canonical); + + let data = api_types::BlockHeaderData { + root, + canonical, + header: api_types::BlockHeaderAndSignature { + message: block.message.block_header(), + signature: block.signature.into(), + }, + }; + + Ok(api_types::GenericResponse::from(data)) + }) + }); + + /* + * beacon/blocks + */ + + // POST beacon/blocks/{block_id} + let post_beacon_blocks = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .and_then( + |block: SignedBeaconBlock, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { + blocking_json_task(move || { + // Send the block, regardless of whether or not it is valid. The API + // specification is very clear that this is the desired behaviour. + publish_pubsub_message( + &network_tx, + PubsubMessage::BeaconBlock(Box::new(block.clone())), + )?; + + match chain.process_block(block.clone()) { + Ok(root) => { + info!( + log, + "Valid block from HTTP API"; + "root" => format!("{}", root) + ); + + // Update the head since it's likely this block will become the new + // head. + chain + .fork_choice() + .map_err(warp_utils::reject::beacon_chain_error)?; + + Ok(()) + } + Err(e) => { + let msg = format!("{:?}", e); + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::broadcast_without_import(msg)) + } + } + }) + }, + ); + + let beacon_blocks_path = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::path::param::()) + .and(chain_filter.clone()); + + // GET beacon/blocks/{block_id} + let get_beacon_block = beacon_blocks_path.clone().and(warp::path::end()).and_then( + |block_id: BlockId, chain: Arc>| { + blocking_json_task(move || block_id.block(&chain).map(api_types::GenericResponse::from)) + }, + ); + + // GET beacon/blocks/{block_id}/root + let get_beacon_block_root = beacon_blocks_path + .clone() + .and(warp::path("root")) + .and(warp::path::end()) + .and_then(|block_id: BlockId, chain: Arc>| { + blocking_json_task(move || { + block_id + .root(&chain) + .map(api_types::RootData::from) + .map(api_types::GenericResponse::from) + }) + }); + + // GET beacon/blocks/{block_id}/attestations + let get_beacon_block_attestations = beacon_blocks_path + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and_then(|block_id: BlockId, chain: Arc>| { + blocking_json_task(move || { + block_id + .block(&chain) + .map(|block| block.message.body.attestations) + .map(api_types::GenericResponse::from) + }) + }); + + /* + * beacon/pool + */ + + let beacon_pool_path = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("pool")) + .and(chain_filter.clone()); + + // POST beacon/pool/attestations + let post_beacon_pool_attestations = beacon_pool_path + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc>, + attestation: Attestation, + network_tx: UnboundedSender>| { + blocking_json_task(move || { + let attestation = chain + .verify_unaggregated_attestation_for_gossip(attestation.clone(), None) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + publish_pubsub_message( + &network_tx, + PubsubMessage::Attestation(Box::new(( + attestation.subnet_id(), + attestation.attestation().clone(), + ))), + )?; + + chain + .apply_attestation_to_fork_choice(&attestation) + .map_err(|e| { + warp_utils::reject::broadcast_without_import(format!( + "not applied to fork choice: {:?}", + e + )) + })?; + + chain + .add_to_naive_aggregation_pool(attestation) + .map_err(|e| { + warp_utils::reject::broadcast_without_import(format!( + "not applied to naive aggregation pool: {:?}", + e + )) + })?; + + Ok(()) + }) + }, + ); + + // GET beacon/pool/attestations + let get_beacon_pool_attestations = beacon_pool_path + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let mut attestations = chain.op_pool.get_all_attestations(); + attestations.extend(chain.naive_aggregation_pool.read().iter().cloned()); + Ok(api_types::GenericResponse::from(attestations)) + }) + }); + + // POST beacon/pool/attester_slashings + let post_beacon_pool_attester_slashings = beacon_pool_path + .clone() + .and(warp::path("attester_slashings")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc>, + slashing: AttesterSlashing, + network_tx: UnboundedSender>| { + blocking_json_task(move || { + let outcome = chain + .verify_attester_slashing_for_gossip(slashing.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + if let ObservationOutcome::New(slashing) = outcome { + publish_pubsub_message( + &network_tx, + PubsubMessage::AttesterSlashing(Box::new( + slashing.clone().into_inner(), + )), + )?; + + chain + .import_attester_slashing(slashing) + .map_err(warp_utils::reject::beacon_chain_error)?; + } + + Ok(()) + }) + }, + ); + + // GET beacon/pool/attester_slashings + let get_beacon_pool_attester_slashings = beacon_pool_path + .clone() + .and(warp::path("attester_slashings")) + .and(warp::path::end()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let attestations = chain.op_pool.get_all_attester_slashings(); + Ok(api_types::GenericResponse::from(attestations)) + }) + }); + + // POST beacon/pool/proposer_slashings + let post_beacon_pool_proposer_slashings = beacon_pool_path + .clone() + .and(warp::path("proposer_slashings")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc>, + slashing: ProposerSlashing, + network_tx: UnboundedSender>| { + blocking_json_task(move || { + let outcome = chain + .verify_proposer_slashing_for_gossip(slashing.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + if let ObservationOutcome::New(slashing) = outcome { + publish_pubsub_message( + &network_tx, + PubsubMessage::ProposerSlashing(Box::new( + slashing.clone().into_inner(), + )), + )?; + + chain.import_proposer_slashing(slashing); + } + + Ok(()) + }) + }, + ); + + // GET beacon/pool/proposer_slashings + let get_beacon_pool_proposer_slashings = beacon_pool_path + .clone() + .and(warp::path("proposer_slashings")) + .and(warp::path::end()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let attestations = chain.op_pool.get_all_proposer_slashings(); + Ok(api_types::GenericResponse::from(attestations)) + }) + }); + + // POST beacon/pool/voluntary_exits + let post_beacon_pool_voluntary_exits = beacon_pool_path + .clone() + .and(warp::path("voluntary_exits")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc>, + exit: SignedVoluntaryExit, + network_tx: UnboundedSender>| { + blocking_json_task(move || { + let outcome = chain + .verify_voluntary_exit_for_gossip(exit.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + if let ObservationOutcome::New(exit) = outcome { + publish_pubsub_message( + &network_tx, + PubsubMessage::VoluntaryExit(Box::new(exit.clone().into_inner())), + )?; + + chain.import_voluntary_exit(exit); + } + + Ok(()) + }) + }, + ); + + // GET beacon/pool/voluntary_exits + let get_beacon_pool_voluntary_exits = beacon_pool_path + .clone() + .and(warp::path("voluntary_exits")) + .and(warp::path::end()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let attestations = chain.op_pool.get_all_voluntary_exits(); + Ok(api_types::GenericResponse::from(attestations)) + }) + }); + + /* + * config/fork_schedule + */ + + let config_path = eth1_v1.and(warp::path("config")); + + // GET config/fork_schedule + let get_config_fork_schedule = config_path + .clone() + .and(warp::path("fork_schedule")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + StateId::head() + .fork(&chain) + .map(|fork| api_types::GenericResponse::from(vec![fork])) + }) + }); + + // GET config/spec + let get_config_spec = config_path + .clone() + .and(warp::path("spec")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from(YamlConfig::from_spec::< + T::EthSpec, + >( + &chain.spec + ))) + }) + }); + + // GET config/deposit_contract + let get_config_deposit_contract = config_path + .clone() + .and(warp::path("deposit_contract")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from( + api_types::DepositContractData { + address: chain.spec.deposit_contract_address, + chain_id: eth1::DEFAULT_NETWORK_ID.into(), + }, + )) + }) + }); + + /* + * debug + */ + + // GET debug/beacon/states/{state_id} + let get_debug_beacon_states = eth1_v1 + .and(warp::path("debug")) + .and(warp::path("beacon")) + .and(warp::path("states")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|state_id: StateId, chain: Arc>| { + blocking_task(move || { + state_id.map_state(&chain, |state| { + Ok(warp::reply::json(&api_types::GenericResponseRef::from( + &state, + ))) + }) + }) + }); + + // GET debug/beacon/heads + let get_debug_beacon_heads = eth1_v1 + .and(warp::path("debug")) + .and(warp::path("beacon")) + .and(warp::path("heads")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let heads = chain + .heads() + .into_iter() + .map(|(root, slot)| api_types::ChainHeadData { root, slot }) + .collect::>(); + Ok(api_types::GenericResponse::from(heads)) + }) + }); + + /* + * node + */ + + // GET node/identity + let get_node_identity = eth1_v1 + .and(warp::path("node")) + .and(warp::path("identity")) + .and(warp::path::end()) + .and(network_globals.clone()) + .and_then(|network_globals: Arc>| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from(api_types::IdentityData { + peer_id: network_globals.local_peer_id().to_base58(), + enr: network_globals.local_enr(), + p2p_addresses: network_globals.listen_multiaddrs(), + })) + }) + }); + + // GET node/version + let get_node_version = eth1_v1 + .and(warp::path("node")) + .and(warp::path("version")) + .and(warp::path::end()) + .and_then(|| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from(api_types::VersionData { + version: version_with_platform(), + })) + }) + }); + + // GET node/syncing + let get_node_syncing = eth1_v1 + .and(warp::path("node")) + .and(warp::path("syncing")) + .and(warp::path::end()) + .and(network_globals.clone()) + .and(chain_filter.clone()) + .and_then( + |network_globals: Arc>, chain: Arc>| { + blocking_json_task(move || { + let head_slot = chain + .head_info() + .map(|info| info.slot) + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_slot = chain + .slot() + .map_err(warp_utils::reject::beacon_chain_error)?; + + // Taking advantage of saturating subtraction on slot. + let sync_distance = current_slot - head_slot; + + let syncing_data = api_types::SyncingData { + is_syncing: network_globals.sync_state.read().is_syncing(), + head_slot, + sync_distance, + }; + + Ok(api_types::GenericResponse::from(syncing_data)) + }) + }, + ); + + /* + * validator + */ + + // GET validator/duties/attester/{epoch} + let get_validator_duties_attester = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("attester")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(warp::query::()) + .and(chain_filter.clone()) + .and_then( + |epoch: Epoch, query: api_types::ValidatorDutiesQuery, chain: Arc>| { + blocking_json_task(move || { + let current_epoch = chain + .epoch() + .map_err(warp_utils::reject::beacon_chain_error)?; + + if epoch > current_epoch + 1 { + return Err(warp_utils::reject::custom_bad_request(format!( + "request epoch {} is more than one epoch past the current epoch {}", + epoch, current_epoch + ))); + } + + let validator_count = StateId::head() + .map_state(&chain, |state| Ok(state.validators.len() as u64))?; + + let indices = query + .index + .as_ref() + .map(|index| index.0.clone()) + .map(Result::Ok) + .unwrap_or_else(|| { + Ok::<_, warp::Rejection>((0..validator_count).collect()) + })?; + + let pubkeys = indices + .into_iter() + .filter(|i| *i < validator_count as u64) + .map(|i| { + let pubkey = chain + .validator_pubkey(i as usize) + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_bad_request(format!( + "unknown validator index {}", + i + )) + })?; + + Ok((i, pubkey)) + }) + .collect::, warp::Rejection>>()?; + + // Converts the internal Lighthouse `AttestationDuty` struct into an + // API-conforming `AttesterData` struct. + let convert = |validator_index: u64, + pubkey: PublicKey, + duty: AttestationDuty| + -> api_types::AttesterData { + api_types::AttesterData { + pubkey: pubkey.into(), + validator_index, + committees_at_slot: duty.committees_at_slot, + committee_index: duty.index, + committee_length: duty.committee_len as u64, + validator_committee_index: duty.committee_position as u64, + slot: duty.slot, + } + }; + + // Here we have two paths: + // + // ## Fast + // + // If the request epoch is the current epoch, use the cached beacon chain + // method. + // + // ## Slow + // + // If the request epoch is prior to the current epoch, load a beacon state from + // disk + // + // The idea is to stop historical requests from washing out the cache on the + // beacon chain, whilst allowing a VC to request duties quickly. + let duties = if epoch == current_epoch { + // Fast path. + pubkeys + .into_iter() + // Exclude indices which do not represent a known public key and a + // validator duty. + .filter_map(|(i, pubkey)| { + Some( + chain + .validator_attestation_duty(i as usize, epoch) + .transpose()? + .map_err(warp_utils::reject::beacon_chain_error) + .map(|duty| convert(i, pubkey, duty)), + ) + }) + .collect::, warp::Rejection>>()? + } else { + // If the head state is equal to or earlier than the request epoch, use it. + let mut state = chain + .with_head(|head| { + if head.beacon_state.current_epoch() <= epoch { + Ok(Some( + head.beacon_state + .clone_with(CloneConfig::committee_caches_only()), + )) + } else { + Ok(None) + } + }) + .map_err(warp_utils::reject::beacon_chain_error)? + .map(Result::Ok) + .unwrap_or_else(|| { + StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) + .state(&chain) + })?; + + // Only skip forward to the epoch prior to the request, since we have a + // one-epoch look-ahead on shuffling. + while state.next_epoch() < epoch { + // Don't calculate state roots since they aren't required for calculating + // shuffling (achieved by providing Hash256::zero()). + per_slot_processing(&mut state, Some(Hash256::zero()), &chain.spec) + .map_err(warp_utils::reject::slot_processing_error)?; + } + + let relative_epoch = + RelativeEpoch::from_epoch(state.current_epoch(), epoch).map_err( + |e| { + warp_utils::reject::custom_server_error(format!( + "unable to obtain suitable state: {:?}", + e + )) + }, + )?; + + state + .build_committee_cache(relative_epoch, &chain.spec) + .map_err(warp_utils::reject::beacon_state_error)?; + pubkeys + .into_iter() + .filter_map(|(i, pubkey)| { + Some( + state + .get_attestation_duties(i as usize, relative_epoch) + .transpose()? + .map_err(warp_utils::reject::beacon_state_error) + .map(|duty| convert(i, pubkey, duty)), + ) + }) + .collect::, warp::Rejection>>()? + }; + + Ok(api_types::GenericResponse::from(duties)) + }) + }, + ); + + // GET validator/duties/proposer/{epoch} + let get_validator_duties_proposer = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("proposer")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and(beacon_proposer_cache()) + .and_then( + |epoch: Epoch, + chain: Arc>, + beacon_proposer_cache: Arc>| { + blocking_json_task(move || { + beacon_proposer_cache + .lock() + .get_proposers(&chain, epoch) + .map(api_types::GenericResponse::from) + }) + }, + ); + + // GET validator/blocks/{slot} + let get_validator_blocks = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("blocks")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(warp::query::()) + .and(chain_filter.clone()) + .and_then( + |slot: Slot, query: api_types::ValidatorBlocksQuery, chain: Arc>| { + blocking_json_task(move || { + let randao_reveal = (&query.randao_reveal).try_into().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not valid BLS signature: {:?}", + e + )) + })?; + + chain + .produce_block(randao_reveal, slot, query.graffiti.map(Into::into)) + .map(|block_and_state| block_and_state.0) + .map(api_types::GenericResponse::from) + .map_err(warp_utils::reject::block_production_error) + }) + }, + ); + + // GET validator/attestation_data?slot,committee_index + let get_validator_attestation_data = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("attestation_data")) + .and(warp::path::end()) + .and(warp::query::()) + .and(chain_filter.clone()) + .and_then( + |query: api_types::ValidatorAttestationDataQuery, chain: Arc>| { + blocking_json_task(move || { + chain + .produce_unaggregated_attestation(query.slot, query.committee_index) + .map(|attestation| attestation.data) + .map(api_types::GenericResponse::from) + .map_err(warp_utils::reject::beacon_chain_error) + }) + }, + ); + + // GET validator/aggregate_attestation?attestation_data_root,slot + let get_validator_aggregate_attestation = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("aggregate_attestation")) + .and(warp::path::end()) + .and(warp::query::()) + .and(chain_filter.clone()) + .and_then( + |query: api_types::ValidatorAggregateAttestationQuery, chain: Arc>| { + blocking_json_task(move || { + chain + .get_aggregated_attestation_by_slot_and_root( + query.slot, + &query.attestation_data_root, + ) + .map(api_types::GenericResponse::from) + .ok_or_else(|| { + warp_utils::reject::custom_not_found( + "no matching aggregate found".to_string(), + ) + }) + }) + }, + ); + + // POST validator/aggregate_and_proofs + let post_validator_aggregate_and_proofs = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("aggregate_and_proofs")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc>, + aggregate: SignedAggregateAndProof, + network_tx: UnboundedSender>| { + blocking_json_task(move || { + let aggregate = + match chain.verify_aggregated_attestation_for_gossip(aggregate.clone()) { + Ok(aggregate) => aggregate, + // If we already know the attestation, don't broadcast it or attempt to + // further verify it. Return success. + // + // It's reasonably likely that two different validators produce + // identical aggregates, especially if they're using the same beacon + // node. + Err(AttnError::AttestationAlreadyKnown(_)) => return Ok(()), + Err(e) => { + return Err(warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + ))) + } + }; + + publish_pubsub_message( + &network_tx, + PubsubMessage::AggregateAndProofAttestation(Box::new( + aggregate.aggregate().clone(), + )), + )?; + + chain + .apply_attestation_to_fork_choice(&aggregate) + .map_err(|e| { + warp_utils::reject::broadcast_without_import(format!( + "not applied to fork choice: {:?}", + e + )) + })?; + + chain.add_to_block_inclusion_pool(aggregate).map_err(|e| { + warp_utils::reject::broadcast_without_import(format!( + "not applied to block inclusion pool: {:?}", + e + )) + })?; + + Ok(()) + }) + }, + ); + + // POST validator/beacon_committee_subscriptions + let post_validator_beacon_committee_subscriptions = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("beacon_committee_subscriptions")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter) + .and_then( + |subscriptions: Vec, + network_tx: UnboundedSender>| { + blocking_json_task(move || { + for subscription in &subscriptions { + let subscription = api_types::ValidatorSubscription { + validator_index: subscription.validator_index, + attestation_committee_index: subscription.committee_index, + slot: subscription.slot, + committee_count_at_slot: subscription.committees_at_slot, + is_aggregator: subscription.is_aggregator, + }; + + publish_network_message( + &network_tx, + NetworkMessage::Subscribe { + subscriptions: vec![subscription], + }, + )?; + } + + Ok(()) + }) + }, + ); + + // GET lighthouse/health + let get_lighthouse_health = warp::path("lighthouse") + .and(warp::path("health")) + .and(warp::path::end()) + .and_then(|| { + blocking_json_task(move || { + eth2::lighthouse::Health::observe() + .map(api_types::GenericResponse::from) + .map_err(warp_utils::reject::custom_bad_request) + }) + }); + + // GET lighthouse/syncing + let get_lighthouse_syncing = warp::path("lighthouse") + .and(warp::path("syncing")) + .and(warp::path::end()) + .and(network_globals.clone()) + .and_then(|network_globals: Arc>| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from( + network_globals.sync_state(), + )) + }) + }); + + // GET lighthouse/peers + let get_lighthouse_peers = warp::path("lighthouse") + .and(warp::path("peers")) + .and(warp::path::end()) + .and(network_globals.clone()) + .and_then(|network_globals: Arc>| { + blocking_json_task(move || { + Ok(network_globals + .peers + .read() + .peers() + .map(|(peer_id, peer_info)| eth2::lighthouse::Peer { + peer_id: peer_id.to_string(), + peer_info: peer_info.clone(), + }) + .collect::>()) + }) + }); + + // GET lighthouse/peers/connected + let get_lighthouse_peers_connected = warp::path("lighthouse") + .and(warp::path("peers")) + .and(warp::path("connected")) + .and(warp::path::end()) + .and(network_globals) + .and_then(|network_globals: Arc>| { + blocking_json_task(move || { + Ok(network_globals + .peers + .read() + .connected_peers() + .map(|(peer_id, peer_info)| eth2::lighthouse::Peer { + peer_id: peer_id.to_string(), + peer_info: peer_info.clone(), + }) + .collect::>()) + }) + }); + + // GET lighthouse/proto_array + let get_lighthouse_proto_array = warp::path("lighthouse") + .and(warp::path("proto_array")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_task(move || { + Ok::<_, warp::Rejection>(warp::reply::json(&api_types::GenericResponseRef::from( + chain.fork_choice.read().proto_array().core_proto_array(), + ))) + }) + }); + + // GET lighthouse/validator_inclusion/{epoch}/{validator_id} + let get_lighthouse_validator_inclusion_global = warp::path("lighthouse") + .and(warp::path("validator_inclusion")) + .and(warp::path::param::()) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then( + |epoch: Epoch, validator_id: ValidatorId, chain: Arc>| { + blocking_json_task(move || { + validator_inclusion::validator_inclusion_data(epoch, &validator_id, &chain) + .map(api_types::GenericResponse::from) + }) + }, + ); + + // GET lighthouse/validator_inclusion/{epoch}/global + let get_lighthouse_validator_inclusion = warp::path("lighthouse") + .and(warp::path("validator_inclusion")) + .and(warp::path::param::()) + .and(warp::path("global")) + .and(warp::path::end()) + .and(chain_filter) + .and_then(|epoch: Epoch, chain: Arc>| { + blocking_json_task(move || { + validator_inclusion::global_validator_inclusion_data(epoch, &chain) + .map(api_types::GenericResponse::from) + }) + }); + + // Define the ultimate set of routes that will be provided to the server. + let routes = warp::get() + .and( + get_beacon_genesis + .or(get_beacon_state_root.boxed()) + .or(get_beacon_state_fork.boxed()) + .or(get_beacon_state_finality_checkpoints.boxed()) + .or(get_beacon_state_validators.boxed()) + .or(get_beacon_state_validators_id.boxed()) + .or(get_beacon_state_committees.boxed()) + .or(get_beacon_headers.boxed()) + .or(get_beacon_headers_block_id.boxed()) + .or(get_beacon_block.boxed()) + .or(get_beacon_block_attestations.boxed()) + .or(get_beacon_block_root.boxed()) + .or(get_beacon_pool_attestations.boxed()) + .or(get_beacon_pool_attester_slashings.boxed()) + .or(get_beacon_pool_proposer_slashings.boxed()) + .or(get_beacon_pool_voluntary_exits.boxed()) + .or(get_config_fork_schedule.boxed()) + .or(get_config_spec.boxed()) + .or(get_config_deposit_contract.boxed()) + .or(get_debug_beacon_states.boxed()) + .or(get_debug_beacon_heads.boxed()) + .or(get_node_identity.boxed()) + .or(get_node_version.boxed()) + .or(get_node_syncing.boxed()) + .or(get_validator_duties_attester.boxed()) + .or(get_validator_duties_proposer.boxed()) + .or(get_validator_blocks.boxed()) + .or(get_validator_attestation_data.boxed()) + .or(get_validator_aggregate_attestation.boxed()) + .or(get_lighthouse_health.boxed()) + .or(get_lighthouse_syncing.boxed()) + .or(get_lighthouse_peers.boxed()) + .or(get_lighthouse_peers_connected.boxed()) + .or(get_lighthouse_proto_array.boxed()) + .or(get_lighthouse_validator_inclusion_global.boxed()) + .or(get_lighthouse_validator_inclusion.boxed()) + .boxed(), + ) + .or(warp::post() + .and( + post_beacon_blocks + .or(post_beacon_pool_attestations.boxed()) + .or(post_beacon_pool_attester_slashings.boxed()) + .or(post_beacon_pool_proposer_slashings.boxed()) + .or(post_beacon_pool_voluntary_exits.boxed()) + .or(post_validator_aggregate_and_proofs.boxed()) + .or(post_validator_beacon_committee_subscriptions.boxed()) + .boxed(), + ) + .boxed()) + .boxed() + // Maps errors into HTTP responses. + .recover(warp_utils::reject::handle_rejection) + .with(slog_logging(log.clone())) + .with(prometheus_metrics()) + // Add a `Server` header. + .map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform())) + // Maybe add some CORS headers. + .map(move |reply| warp_utils::reply::maybe_cors(reply, allow_origin.as_ref())); + + let (listening_socket, server) = warp::serve(routes).try_bind_with_graceful_shutdown( + SocketAddrV4::new(config.listen_addr, config.listen_port), + async { + shutdown.await; + }, + )?; + + info!( + log, + "HTTP API started"; + "listen_address" => listening_socket.to_string(), + ); + + Ok((listening_socket, server)) +} + +/// Publish a message to the libp2p pubsub network. +fn publish_pubsub_message( + network_tx: &UnboundedSender>, + message: PubsubMessage, +) -> Result<(), warp::Rejection> { + publish_network_message( + network_tx, + NetworkMessage::Publish { + messages: vec![message], + }, + ) +} + +/// Publish a message to the libp2p network. +fn publish_network_message( + network_tx: &UnboundedSender>, + message: NetworkMessage, +) -> Result<(), warp::Rejection> { + network_tx.send(message).map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "unable to publish to network channel: {}", + e + )) + }) +} + +/// Execute some task in a tokio "blocking thread". These threads are ideal for long-running +/// (blocking) tasks since they don't jam up the core executor. +async fn blocking_task(func: F) -> T +where + F: Fn() -> T, +{ + tokio::task::block_in_place(func) +} + +/// A convenience wrapper around `blocking_task` for use with `warp` JSON responses. +async fn blocking_json_task(func: F) -> Result +where + F: Fn() -> Result, + T: Serialize, +{ + blocking_task(func) + .await + .map(|resp| warp::reply::json(&resp)) +} diff --git a/beacon_node/http_api/src/metrics.rs b/beacon_node/http_api/src/metrics.rs new file mode 100644 index 00000000000..c641df6a4a5 --- /dev/null +++ b/beacon_node/http_api/src/metrics.rs @@ -0,0 +1,32 @@ +pub use lighthouse_metrics::*; + +lazy_static::lazy_static! { + pub static ref HTTP_API_PATHS_TOTAL: Result = try_create_int_counter_vec( + "http_api_paths_total", + "Count of HTTP requests received", + &["path"] + ); + pub static ref HTTP_API_STATUS_CODES_TOTAL: Result = try_create_int_counter_vec( + "http_api_status_codes_total", + "Count of HTTP status codes returned", + &["status"] + ); + pub static ref HTTP_API_PATHS_TIMES: Result = try_create_histogram_vec( + "http_api_paths_times", + "Duration to process HTTP requests per path", + &["path"] + ); + + pub static ref HTTP_API_BEACON_PROPOSER_CACHE_TIMES: Result = try_create_histogram( + "http_api_beacon_proposer_cache_build_times", + "Duration to process HTTP requests per path", + ); + pub static ref HTTP_API_BEACON_PROPOSER_CACHE_HITS_TOTAL: Result = try_create_int_counter( + "http_api_beacon_proposer_cache_hits_total", + "Count of times the proposer cache has been hit", + ); + pub static ref HTTP_API_BEACON_PROPOSER_CACHE_MISSES_TOTAL: Result = try_create_int_counter( + "http_api_beacon_proposer_cache_misses_total", + "Count of times the proposer cache has been missed", + ); +} diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs new file mode 100644 index 00000000000..11800648f25 --- /dev/null +++ b/beacon_node/http_api/src/state_id.rs @@ -0,0 +1,118 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::StateId as CoreStateId; +use std::str::FromStr; +use types::{BeaconState, EthSpec, Fork, Hash256, Slot}; + +/// Wraps `eth2::types::StateId` and provides common state-access functionality. E.g., reading +/// states or parts of states from the database. +pub struct StateId(CoreStateId); + +impl StateId { + pub fn head() -> Self { + Self(CoreStateId::Head) + } + + pub fn slot(slot: Slot) -> Self { + Self(CoreStateId::Slot(slot)) + } + + /// Return the state root identified by `self`. + pub fn root( + &self, + chain: &BeaconChain, + ) -> Result { + let slot = match &self.0 { + CoreStateId::Head => { + return chain + .head_info() + .map(|head| head.state_root) + .map_err(warp_utils::reject::beacon_chain_error) + } + CoreStateId::Genesis => return Ok(chain.genesis_state_root), + CoreStateId::Finalized => chain.head_info().map(|head| { + head.finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()) + }), + CoreStateId::Justified => chain.head_info().map(|head| { + head.current_justified_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()) + }), + CoreStateId::Slot(slot) => Ok(*slot), + CoreStateId::Root(root) => return Ok(*root), + } + .map_err(warp_utils::reject::beacon_chain_error)?; + + chain + .state_root_at_slot(slot) + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!("beacon state at slot {}", slot)) + }) + } + + /// Return the `fork` field of the state identified by `self`. + pub fn fork( + &self, + chain: &BeaconChain, + ) -> Result { + self.map_state(chain, |state| Ok(state.fork)) + } + + /// Return the `BeaconState` identified by `self`. + pub fn state( + &self, + chain: &BeaconChain, + ) -> Result, warp::Rejection> { + let (state_root, slot_opt) = match &self.0 { + CoreStateId::Head => { + return chain + .head_beacon_state() + .map_err(warp_utils::reject::beacon_chain_error) + } + CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)), + _ => (self.root(chain)?, None), + }; + + chain + .get_state(&state_root, slot_opt) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|opt| { + opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon state at root {}", + state_root + )) + }) + }) + } + + /// Map a function across the `BeaconState` identified by `self`. + /// + /// This function will avoid instantiating/copying a new state when `self` points to the head + /// of the chain. + pub fn map_state( + &self, + chain: &BeaconChain, + func: F, + ) -> Result + where + F: Fn(&BeaconState) -> Result, + { + match &self.0 { + CoreStateId::Head => chain + .with_head(|snapshot| Ok(func(&snapshot.beacon_state))) + .map_err(warp_utils::reject::beacon_chain_error)?, + _ => func(&self.state(chain)?), + } + } +} + +impl FromStr for StateId { + type Err = String; + + fn from_str(s: &str) -> Result { + CoreStateId::from_str(s).map(Self) + } +} diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs new file mode 100644 index 00000000000..ab787a98bb9 --- /dev/null +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -0,0 +1,90 @@ +use crate::state_id::StateId; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::{ + lighthouse::{GlobalValidatorInclusionData, ValidatorInclusionData}, + types::ValidatorId, +}; +use state_processing::per_epoch_processing::ValidatorStatuses; +use types::{Epoch, EthSpec}; + +/// Returns information about *all validators* (i.e., global) and how they performed during a given +/// epoch. +pub fn global_validator_inclusion_data( + epoch: Epoch, + chain: &BeaconChain, +) -> Result { + // This is the last slot of the given epoch (one prior to the first slot of the next epoch). + let target_slot = (epoch + 1).start_slot(T::EthSpec::slots_per_epoch()) - 1; + + let state = StateId::slot(target_slot).state(chain)?; + + let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec) + .map_err(warp_utils::reject::beacon_state_error)?; + validator_statuses + .process_attestations(&state, &chain.spec) + .map_err(warp_utils::reject::beacon_state_error)?; + + let totals = validator_statuses.total_balances; + + Ok(GlobalValidatorInclusionData { + current_epoch_active_gwei: totals.current_epoch(), + previous_epoch_active_gwei: totals.previous_epoch(), + current_epoch_attesting_gwei: totals.current_epoch_attesters(), + current_epoch_target_attesting_gwei: totals.current_epoch_target_attesters(), + previous_epoch_attesting_gwei: totals.previous_epoch_attesters(), + previous_epoch_target_attesting_gwei: totals.previous_epoch_target_attesters(), + previous_epoch_head_attesting_gwei: totals.previous_epoch_head_attesters(), + }) +} + +/// Returns information about a single validator and how it performed during a given epoch. +pub fn validator_inclusion_data( + epoch: Epoch, + validator_id: &ValidatorId, + chain: &BeaconChain, +) -> Result, warp::Rejection> { + // This is the last slot of the given epoch (one prior to the first slot of the next epoch). + let target_slot = (epoch + 1).start_slot(T::EthSpec::slots_per_epoch()) - 1; + + let mut state = StateId::slot(target_slot).state(chain)?; + + let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec) + .map_err(warp_utils::reject::beacon_state_error)?; + validator_statuses + .process_attestations(&state, &chain.spec) + .map_err(warp_utils::reject::beacon_state_error)?; + + state + .update_pubkey_cache() + .map_err(warp_utils::reject::beacon_state_error)?; + + let validator_index = match validator_id { + ValidatorId::Index(index) => *index as usize, + ValidatorId::PublicKey(pubkey) => { + if let Some(index) = state + .get_validator_index(pubkey) + .map_err(warp_utils::reject::beacon_state_error)? + { + index + } else { + return Ok(None); + } + } + }; + + Ok(validator_statuses + .statuses + .get(validator_index) + .map(|vote| ValidatorInclusionData { + is_slashed: vote.is_slashed, + is_withdrawable_in_current_epoch: vote.is_withdrawable_in_current_epoch, + is_active_in_current_epoch: vote.is_active_in_current_epoch, + is_active_in_previous_epoch: vote.is_active_in_previous_epoch, + current_epoch_effective_balance_gwei: vote.current_epoch_effective_balance, + is_current_epoch_attester: vote.is_current_epoch_attester, + is_current_epoch_target_attester: vote.is_current_epoch_target_attester, + is_previous_epoch_attester: vote.is_previous_epoch_attester, + is_previous_epoch_target_attester: vote.is_previous_epoch_target_attester, + is_previous_epoch_head_attester: vote.is_previous_epoch_head_attester, + })) +} diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs new file mode 100644 index 00000000000..162cdcf149c --- /dev/null +++ b/beacon_node/http_api/tests/tests.rs @@ -0,0 +1,1780 @@ +use beacon_chain::{ + test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, + BlockingMigratorEphemeralHarnessType, + }, + BeaconChain, StateSkipConfig, +}; +use discv5::enr::{CombinedKey, EnrBuilder}; +use environment::null_logger; +use eth2::{types::*, BeaconNodeHttpClient, Url}; +use eth2_libp2p::{rpc::methods::MetaData, types::EnrBitfield, NetworkGlobals}; +use http_api::{Config, Context}; +use network::NetworkMessage; +use state_processing::per_slot_processing; +use std::convert::TryInto; +use std::net::Ipv4Addr; +use std::sync::Arc; +use tokio::sync::mpsc; +use tokio::sync::oneshot; +use tree_hash::TreeHash; +use types::{ + test_utils::generate_deterministic_keypairs, AggregateSignature, BeaconState, BitList, Domain, + EthSpec, Hash256, Keypair, MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot, +}; + +type E = MainnetEthSpec; + +const SLOTS_PER_EPOCH: u64 = 32; +const VALIDATOR_COUNT: usize = SLOTS_PER_EPOCH as usize; +const CHAIN_LENGTH: u64 = SLOTS_PER_EPOCH * 5; +const JUSTIFIED_EPOCH: u64 = 4; +const FINALIZED_EPOCH: u64 = 3; + +/// Skipping the slots around the epoch boundary allows us to check that we're obtaining states +/// from skipped slots for the finalized and justified checkpoints (instead of the state from the +/// block that those roots point to). +const SKIPPED_SLOTS: &[u64] = &[ + JUSTIFIED_EPOCH * SLOTS_PER_EPOCH - 1, + JUSTIFIED_EPOCH * SLOTS_PER_EPOCH, + FINALIZED_EPOCH * SLOTS_PER_EPOCH - 1, + FINALIZED_EPOCH * SLOTS_PER_EPOCH, +]; + +struct ApiTester { + chain: Arc>>, + client: BeaconNodeHttpClient, + next_block: SignedBeaconBlock, + attestations: Vec>, + attester_slashing: AttesterSlashing, + proposer_slashing: ProposerSlashing, + voluntary_exit: SignedVoluntaryExit, + _server_shutdown: oneshot::Sender<()>, + validator_keypairs: Vec, + network_rx: mpsc::UnboundedReceiver>, +} + +impl ApiTester { + pub fn new() -> Self { + let mut harness = BeaconChainHarness::new( + MainnetEthSpec, + generate_deterministic_keypairs(VALIDATOR_COUNT), + ); + + harness.advance_slot(); + + for _ in 0..CHAIN_LENGTH { + let slot = harness.chain.slot().unwrap().as_u64(); + + if !SKIPPED_SLOTS.contains(&slot) { + harness.extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + } + + harness.advance_slot(); + } + + let head = harness.chain.head().unwrap(); + + assert_eq!( + harness.chain.slot().unwrap(), + head.beacon_block.slot() + 1, + "precondition: current slot is one after head" + ); + + let (next_block, _next_state) = + harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + + let attestations = harness + .get_unaggregated_attestations( + &AttestationStrategy::AllValidators, + &head.beacon_state, + head.beacon_block_root, + harness.chain.slot().unwrap(), + ) + .into_iter() + .map(|vec| vec.into_iter().map(|(attestation, _subnet_id)| attestation)) + .flatten() + .collect::>(); + + assert!( + !attestations.is_empty(), + "precondition: attestations for testing" + ); + + let attester_slashing = harness.make_attester_slashing(vec![0, 1]); + let proposer_slashing = harness.make_proposer_slashing(2); + let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); + + // Changing this *after* the chain has been initialized is a bit cheeky, but it shouldn't + // cause issue. + // + // This allows for testing voluntary exits without building out a massive chain. + harness.chain.spec.shard_committee_period = 2; + + let chain = Arc::new(harness.chain); + + assert_eq!( + chain.head_info().unwrap().finalized_checkpoint.epoch, + 3, + "precondition: finality" + ); + assert_eq!( + chain + .head_info() + .unwrap() + .current_justified_checkpoint + .epoch, + 4, + "precondition: justification" + ); + + let (network_tx, network_rx) = mpsc::unbounded_channel(); + + let log = null_logger().unwrap(); + + // Default metadata + let meta_data = MetaData { + seq_number: 0, + attnets: EnrBitfield::::default(), + }; + let enr_key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); + let network_globals = NetworkGlobals::new(enr, 42, 42, meta_data, vec![], &log); + + let context = Arc::new(Context { + config: Config { + enabled: true, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 0, + allow_origin: None, + }, + chain: Some(chain.clone()), + network_tx: Some(network_tx), + network_globals: Some(Arc::new(network_globals)), + log, + }); + let ctx = context.clone(); + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let server_shutdown = async { + // It's not really interesting why this triggered, just that it happened. + let _ = shutdown_rx.await; + }; + let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap(); + + tokio::spawn(async { server.await }); + + let client = BeaconNodeHttpClient::new( + Url::parse(&format!( + "http://{}:{}", + listening_socket.ip(), + listening_socket.port() + )) + .unwrap(), + ); + + Self { + chain, + client, + next_block, + attestations, + attester_slashing, + proposer_slashing, + voluntary_exit, + _server_shutdown: shutdown_tx, + validator_keypairs: harness.validators_keypairs, + network_rx, + } + } + + fn skip_slots(self, count: u64) -> Self { + for _ in 0..count { + self.chain + .slot_clock + .set_slot(self.chain.slot().unwrap().as_u64() + 1); + } + + self + } + + fn interesting_state_ids(&self) -> Vec { + let mut ids = vec![ + StateId::Head, + StateId::Genesis, + StateId::Finalized, + StateId::Justified, + StateId::Slot(Slot::new(0)), + StateId::Slot(Slot::new(32)), + StateId::Slot(Slot::from(SKIPPED_SLOTS[0])), + StateId::Slot(Slot::from(SKIPPED_SLOTS[1])), + StateId::Slot(Slot::from(SKIPPED_SLOTS[2])), + StateId::Slot(Slot::from(SKIPPED_SLOTS[3])), + StateId::Root(Hash256::zero()), + ]; + ids.push(StateId::Root(self.chain.head_info().unwrap().state_root)); + ids + } + + fn interesting_block_ids(&self) -> Vec { + let mut ids = vec![ + BlockId::Head, + BlockId::Genesis, + BlockId::Finalized, + BlockId::Justified, + BlockId::Slot(Slot::new(0)), + BlockId::Slot(Slot::new(32)), + BlockId::Slot(Slot::from(SKIPPED_SLOTS[0])), + BlockId::Slot(Slot::from(SKIPPED_SLOTS[1])), + BlockId::Slot(Slot::from(SKIPPED_SLOTS[2])), + BlockId::Slot(Slot::from(SKIPPED_SLOTS[3])), + BlockId::Root(Hash256::zero()), + ]; + ids.push(BlockId::Root(self.chain.head_info().unwrap().block_root)); + ids + } + + fn get_state(&self, state_id: StateId) -> Option> { + match state_id { + StateId::Head => Some(self.chain.head().unwrap().beacon_state), + StateId::Genesis => self + .chain + .get_state(&self.chain.genesis_state_root, None) + .unwrap(), + StateId::Finalized => { + let finalized_slot = self + .chain + .head_info() + .unwrap() + .finalized_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + let root = self + .chain + .state_root_at_slot(finalized_slot) + .unwrap() + .unwrap(); + + self.chain.get_state(&root, Some(finalized_slot)).unwrap() + } + StateId::Justified => { + let justified_slot = self + .chain + .head_info() + .unwrap() + .current_justified_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + let root = self + .chain + .state_root_at_slot(justified_slot) + .unwrap() + .unwrap(); + + self.chain.get_state(&root, Some(justified_slot)).unwrap() + } + StateId::Slot(slot) => { + let root = self.chain.state_root_at_slot(slot).unwrap().unwrap(); + + self.chain.get_state(&root, Some(slot)).unwrap() + } + StateId::Root(root) => self.chain.get_state(&root, None).unwrap(), + } + } + + pub async fn test_beacon_genesis(self) -> Self { + let result = self.client.get_beacon_genesis().await.unwrap().data; + + let state = self.chain.head().unwrap().beacon_state; + let expected = GenesisData { + genesis_time: state.genesis_time, + genesis_validators_root: state.genesis_validators_root, + genesis_fork_version: self.chain.spec.genesis_fork_version, + }; + + assert_eq!(result, expected); + + self + } + + pub async fn test_beacon_states_root(self) -> Self { + for state_id in self.interesting_state_ids() { + let result = self + .client + .get_beacon_states_root(state_id) + .await + .unwrap() + .map(|res| res.data.root); + + let expected = match state_id { + StateId::Head => Some(self.chain.head_info().unwrap().state_root), + StateId::Genesis => Some(self.chain.genesis_state_root), + StateId::Finalized => { + let finalized_slot = self + .chain + .head_info() + .unwrap() + .finalized_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + self.chain.state_root_at_slot(finalized_slot).unwrap() + } + StateId::Justified => { + let justified_slot = self + .chain + .head_info() + .unwrap() + .current_justified_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + self.chain.state_root_at_slot(justified_slot).unwrap() + } + StateId::Slot(slot) => self.chain.state_root_at_slot(slot).unwrap(), + StateId::Root(root) => Some(root), + }; + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_fork(self) -> Self { + for state_id in self.interesting_state_ids() { + let result = self + .client + .get_beacon_states_fork(state_id) + .await + .unwrap() + .map(|res| res.data); + + let expected = self.get_state(state_id).map(|state| state.fork); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_finality_checkpoints(self) -> Self { + for state_id in self.interesting_state_ids() { + let result = self + .client + .get_beacon_states_finality_checkpoints(state_id) + .await + .unwrap() + .map(|res| res.data); + + let expected = self + .get_state(state_id) + .map(|state| FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint, + current_justified: state.current_justified_checkpoint, + finalized: state.finalized_checkpoint, + }); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_validators(self) -> Self { + for state_id in self.interesting_state_ids() { + let result = self + .client + .get_beacon_states_validators(state_id) + .await + .unwrap() + .map(|res| res.data); + + let expected = self.get_state(state_id).map(|state| { + let epoch = state.current_epoch(); + let finalized_epoch = state.finalized_checkpoint.epoch; + let far_future_epoch = self.chain.spec.far_future_epoch; + + let mut validators = Vec::with_capacity(state.validators.len()); + + for i in 0..state.validators.len() { + let validator = state.validators[i].clone(); + + validators.push(ValidatorData { + index: i as u64, + balance: state.balances[i], + status: ValidatorStatus::from_validator( + Some(&validator), + epoch, + finalized_epoch, + far_future_epoch, + ), + validator, + }) + } + + validators + }); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_validator_id(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_opt = self.get_state(state_id); + let validators = match state_opt.as_ref() { + Some(state) => state.validators.clone().into(), + None => vec![], + }; + + for (i, validator) in validators.into_iter().enumerate() { + let validator_ids = &[ + ValidatorId::PublicKey(validator.pubkey.clone()), + ValidatorId::Index(i as u64), + ]; + + for validator_id in validator_ids { + let result = self + .client + .get_beacon_states_validator_id(state_id, validator_id) + .await + .unwrap() + .map(|res| res.data); + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_ref().expect("result should be none"); + + let expected = { + let epoch = state.current_epoch(); + let finalized_epoch = state.finalized_checkpoint.epoch; + let far_future_epoch = self.chain.spec.far_future_epoch; + + ValidatorData { + index: i as u64, + balance: state.balances[i], + status: ValidatorStatus::from_validator( + Some(&validator), + epoch, + finalized_epoch, + far_future_epoch, + ), + validator: validator.clone(), + } + }; + + assert_eq!(result, Some(expected), "{:?}, {:?}", state_id, validator_id); + } + } + } + + self + } + + pub async fn test_beacon_states_committees(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = self.get_state(state_id); + + let epoch = state_opt + .as_ref() + .map(|state| state.current_epoch()) + .unwrap_or_else(|| Epoch::new(0)); + + let results = self + .client + .get_beacon_states_committees(state_id, epoch, None, None) + .await + .unwrap() + .map(|res| res.data); + + if results.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + state.build_all_committee_caches(&self.chain.spec).unwrap(); + let committees = state + .get_beacon_committees_at_epoch( + RelativeEpoch::from_epoch(state.current_epoch(), epoch).unwrap(), + ) + .unwrap(); + + for (i, result) in results.unwrap().into_iter().enumerate() { + let expected = &committees[i]; + + assert_eq!(result.index, expected.index, "{}", state_id); + assert_eq!(result.slot, expected.slot, "{}", state_id); + assert_eq!( + result + .validators + .into_iter() + .map(|i| i as usize) + .collect::>(), + expected.committee.to_vec(), + "{}", + state_id + ); + } + } + + self + } + + fn get_block_root(&self, block_id: BlockId) -> Option { + match block_id { + BlockId::Head => Some(self.chain.head_info().unwrap().block_root), + BlockId::Genesis => Some(self.chain.genesis_block_root), + BlockId::Finalized => Some(self.chain.head_info().unwrap().finalized_checkpoint.root), + BlockId::Justified => Some( + self.chain + .head_info() + .unwrap() + .current_justified_checkpoint + .root, + ), + BlockId::Slot(slot) => self.chain.block_root_at_slot(slot).unwrap(), + BlockId::Root(root) => Some(root), + } + } + + fn get_block(&self, block_id: BlockId) -> Option> { + let root = self.get_block_root(block_id); + root.and_then(|root| self.chain.get_block(&root).unwrap()) + } + + pub async fn test_beacon_headers_all_slots(self) -> Self { + for slot in 0..CHAIN_LENGTH { + let slot = Slot::from(slot); + + let result = self + .client + .get_beacon_headers(Some(slot), None) + .await + .unwrap() + .map(|res| res.data); + + let root = self.chain.block_root_at_slot(slot).unwrap(); + + if root.is_none() && result.is_none() { + continue; + } + + let root = root.unwrap(); + let block = self.chain.block_at_slot(slot).unwrap().unwrap(); + let header = BlockHeaderData { + root, + canonical: true, + header: BlockHeaderAndSignature { + message: block.message.block_header(), + signature: block.signature.into(), + }, + }; + let expected = vec![header]; + + assert_eq!(result.unwrap(), expected, "slot {:?}", slot); + } + + self + } + + pub async fn test_beacon_headers_all_parents(self) -> Self { + let mut roots = self + .chain + .rev_iter_block_roots() + .unwrap() + .map(Result::unwrap) + .map(|(root, _slot)| root) + .collect::>() + .into_iter() + .rev() + .collect::>(); + + // The iterator natively returns duplicate roots for skipped slots. + roots.dedup(); + + for i in 1..roots.len() { + let parent_root = roots[i - 1]; + let child_root = roots[i]; + + let result = self + .client + .get_beacon_headers(None, Some(parent_root)) + .await + .unwrap() + .unwrap() + .data; + + assert_eq!(result.len(), 1, "i {}", i); + assert_eq!(result[0].root, child_root, "i {}", i); + } + + self + } + + pub async fn test_beacon_headers_block_id(self) -> Self { + for block_id in self.interesting_block_ids() { + let result = self + .client + .get_beacon_headers_block_id(block_id) + .await + .unwrap() + .map(|res| res.data); + + let block_root_opt = self.get_block_root(block_id); + + let block_opt = block_root_opt.and_then(|root| self.chain.get_block(&root).unwrap()); + + if block_opt.is_none() && result.is_none() { + continue; + } + + let result = result.unwrap(); + let block = block_opt.unwrap(); + let block_root = block_root_opt.unwrap(); + let canonical = self + .chain + .block_root_at_slot(block.slot()) + .unwrap() + .map_or(false, |canonical| block_root == canonical); + + assert_eq!(result.canonical, canonical, "{:?}", block_id); + assert_eq!(result.root, block_root, "{:?}", block_id); + assert_eq!( + result.header.message, + block.message.block_header(), + "{:?}", + block_id + ); + assert_eq!( + result.header.signature, + block.signature.into(), + "{:?}", + block_id + ); + } + + self + } + + pub async fn test_beacon_blocks_root(self) -> Self { + for block_id in self.interesting_block_ids() { + let result = self + .client + .get_beacon_blocks_root(block_id) + .await + .unwrap() + .map(|res| res.data.root); + + let expected = self.get_block_root(block_id); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_post_beacon_blocks_valid(mut self) -> Self { + let next_block = &self.next_block; + + self.client.post_beacon_blocks(next_block).await.unwrap(); + + assert!( + self.network_rx.try_recv().is_ok(), + "valid blocks should be sent to network" + ); + + self + } + + pub async fn test_post_beacon_blocks_invalid(mut self) -> Self { + let mut next_block = self.next_block.clone(); + next_block.message.proposer_index += 1; + + assert!(self.client.post_beacon_blocks(&next_block).await.is_err()); + + assert!( + self.network_rx.try_recv().is_ok(), + "invalid blocks should be sent to network" + ); + + self + } + + pub async fn test_beacon_blocks(self) -> Self { + for block_id in self.interesting_block_ids() { + let result = self + .client + .get_beacon_blocks(block_id) + .await + .unwrap() + .map(|res| res.data); + + let expected = self.get_block(block_id); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_beacon_blocks_attestations(self) -> Self { + for block_id in self.interesting_block_ids() { + let result = self + .client + .get_beacon_blocks_attestations(block_id) + .await + .unwrap() + .map(|res| res.data); + + let expected = self + .get_block(block_id) + .map(|block| block.message.body.attestations.into()); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_post_beacon_pool_attestations_valid(mut self) -> Self { + for attestation in &self.attestations { + self.client + .post_beacon_pool_attestations(attestation) + .await + .unwrap(); + + assert!( + self.network_rx.try_recv().is_ok(), + "valid attestation should be sent to network" + ); + } + + self + } + + pub async fn test_post_beacon_pool_attestations_invalid(mut self) -> Self { + for attestation in &self.attestations { + let mut attestation = attestation.clone(); + attestation.data.slot += 1; + + assert!(self + .client + .post_beacon_pool_attestations(&attestation) + .await + .is_err()); + + assert!( + self.network_rx.try_recv().is_err(), + "invalid attestation should not be sent to network" + ); + } + + self + } + + pub async fn test_get_beacon_pool_attestations(self) -> Self { + let result = self + .client + .get_beacon_pool_attestations() + .await + .unwrap() + .data; + + let mut expected = self.chain.op_pool.get_all_attestations(); + expected.extend(self.chain.naive_aggregation_pool.read().iter().cloned()); + + assert_eq!(result, expected); + + self + } + + pub async fn test_post_beacon_pool_attester_slashings_valid(mut self) -> Self { + self.client + .post_beacon_pool_attester_slashings(&self.attester_slashing) + .await + .unwrap(); + + assert!( + self.network_rx.try_recv().is_ok(), + "valid attester slashing should be sent to network" + ); + + self + } + + pub async fn test_post_beacon_pool_attester_slashings_invalid(mut self) -> Self { + let mut slashing = self.attester_slashing.clone(); + slashing.attestation_1.data.slot += 1; + + self.client + .post_beacon_pool_attester_slashings(&slashing) + .await + .unwrap_err(); + + assert!( + self.network_rx.try_recv().is_err(), + "invalid attester slashing should not be sent to network" + ); + + self + } + + pub async fn test_get_beacon_pool_attester_slashings(self) -> Self { + let result = self + .client + .get_beacon_pool_attester_slashings() + .await + .unwrap() + .data; + + let expected = self.chain.op_pool.get_all_attester_slashings(); + + assert_eq!(result, expected); + + self + } + + pub async fn test_post_beacon_pool_proposer_slashings_valid(mut self) -> Self { + self.client + .post_beacon_pool_proposer_slashings(&self.proposer_slashing) + .await + .unwrap(); + + assert!( + self.network_rx.try_recv().is_ok(), + "valid proposer slashing should be sent to network" + ); + + self + } + + pub async fn test_post_beacon_pool_proposer_slashings_invalid(mut self) -> Self { + let mut slashing = self.proposer_slashing.clone(); + slashing.signed_header_1.message.slot += 1; + + self.client + .post_beacon_pool_proposer_slashings(&slashing) + .await + .unwrap_err(); + + assert!( + self.network_rx.try_recv().is_err(), + "invalid proposer slashing should not be sent to network" + ); + + self + } + + pub async fn test_get_beacon_pool_proposer_slashings(self) -> Self { + let result = self + .client + .get_beacon_pool_proposer_slashings() + .await + .unwrap() + .data; + + let expected = self.chain.op_pool.get_all_proposer_slashings(); + + assert_eq!(result, expected); + + self + } + + pub async fn test_post_beacon_pool_voluntary_exits_valid(mut self) -> Self { + self.client + .post_beacon_pool_voluntary_exits(&self.voluntary_exit) + .await + .unwrap(); + + assert!( + self.network_rx.try_recv().is_ok(), + "valid exit should be sent to network" + ); + + self + } + + pub async fn test_post_beacon_pool_voluntary_exits_invalid(mut self) -> Self { + let mut exit = self.voluntary_exit.clone(); + exit.message.epoch += 1; + + self.client + .post_beacon_pool_voluntary_exits(&exit) + .await + .unwrap_err(); + + assert!( + self.network_rx.try_recv().is_err(), + "invalid exit should not be sent to network" + ); + + self + } + + pub async fn test_get_beacon_pool_voluntary_exits(self) -> Self { + let result = self + .client + .get_beacon_pool_voluntary_exits() + .await + .unwrap() + .data; + + let expected = self.chain.op_pool.get_all_voluntary_exits(); + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_config_fork_schedule(self) -> Self { + let result = self.client.get_config_fork_schedule().await.unwrap().data; + + let expected = vec![self.chain.head_info().unwrap().fork]; + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_config_spec(self) -> Self { + let result = self.client.get_config_spec().await.unwrap().data; + + let expected = YamlConfig::from_spec::(&self.chain.spec); + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_config_deposit_contract(self) -> Self { + let result = self + .client + .get_config_deposit_contract() + .await + .unwrap() + .data; + + let expected = DepositContractData { + address: self.chain.spec.deposit_contract_address, + chain_id: eth1::DEFAULT_NETWORK_ID.into(), + }; + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_node_version(self) -> Self { + let result = self.client.get_node_version().await.unwrap().data; + + let expected = VersionData { + version: lighthouse_version::version_with_platform(), + }; + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_node_syncing(self) -> Self { + let result = self.client.get_node_syncing().await.unwrap().data; + let head_slot = self.chain.head_info().unwrap().slot; + let sync_distance = self.chain.slot().unwrap() - head_slot; + + let expected = SyncingData { + is_syncing: false, + head_slot, + sync_distance, + }; + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_debug_beacon_states(self) -> Self { + for state_id in self.interesting_state_ids() { + let result = self + .client + .get_debug_beacon_states(state_id) + .await + .unwrap() + .map(|res| res.data); + + let mut expected = self.get_state(state_id); + expected.as_mut().map(|state| state.drop_all_caches()); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_get_debug_beacon_heads(self) -> Self { + let result = self + .client + .get_debug_beacon_heads() + .await + .unwrap() + .data + .into_iter() + .map(|head| (head.root, head.slot)) + .collect::>(); + + let expected = self.chain.heads(); + + assert_eq!(result, expected); + + self + } + + fn validator_count(&self) -> usize { + self.chain.head().unwrap().beacon_state.validators.len() + } + + fn interesting_validator_indices(&self) -> Vec> { + let validator_count = self.validator_count() as u64; + + let mut interesting = vec![ + vec![], + vec![0], + vec![0, 1], + vec![0, 1, 3], + vec![validator_count], + vec![validator_count, 1], + vec![validator_count, 1, 3], + vec![u64::max_value()], + vec![u64::max_value(), 1], + vec![u64::max_value(), 1, 3], + ]; + + interesting.push((0..validator_count).collect()); + + interesting + } + + pub async fn test_get_validator_duties_attester(self) -> Self { + let current_epoch = self.chain.epoch().unwrap().as_u64(); + + let half = current_epoch / 2; + let first = current_epoch - half; + let last = current_epoch + half; + + for epoch in first..=last { + for indices in self.interesting_validator_indices() { + let epoch = Epoch::from(epoch); + + // The endpoint does not allow getting duties past the next epoch. + if epoch > current_epoch + 1 { + assert_eq!( + self.client + .get_validator_duties_attester(epoch, Some(&indices)) + .await + .unwrap_err() + .status() + .map(Into::into), + Some(400) + ); + continue; + } + + let results = self + .client + .get_validator_duties_attester(epoch, Some(&indices)) + .await + .unwrap() + .data; + + let mut state = self + .chain + .state_at_slot( + epoch.start_slot(E::slots_per_epoch()), + StateSkipConfig::WithStateRoots, + ) + .unwrap(); + state + .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) + .unwrap(); + + let expected_len = indices + .iter() + .filter(|i| **i < state.validators.len() as u64) + .count(); + + assert_eq!(results.len(), expected_len); + + for (indices_set, &i) in indices.iter().enumerate() { + if let Some(duty) = state + .get_attestation_duties(i as usize, RelativeEpoch::Current) + .unwrap() + { + let expected = AttesterData { + pubkey: state.validators[i as usize].pubkey.clone().into(), + validator_index: i, + committees_at_slot: duty.committees_at_slot, + committee_index: duty.index, + committee_length: duty.committee_len as u64, + validator_committee_index: duty.committee_position as u64, + slot: duty.slot, + }; + + let result = results + .iter() + .find(|duty| duty.validator_index == i) + .unwrap(); + + assert_eq!( + *result, expected, + "epoch: {}, indices_set: {}", + epoch, indices_set + ); + } else { + assert!( + !results.iter().any(|duty| duty.validator_index == i), + "validator index should not exist in response" + ); + } + } + } + } + + self + } + + pub async fn test_get_validator_duties_proposer(self) -> Self { + let current_epoch = self.chain.epoch().unwrap(); + + let result = self + .client + .get_validator_duties_proposer(current_epoch) + .await + .unwrap() + .data; + + let mut state = self.chain.head_beacon_state().unwrap(); + + while state.current_epoch() < current_epoch { + per_slot_processing(&mut state, None, &self.chain.spec).unwrap(); + } + + state + .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) + .unwrap(); + + let expected = current_epoch + .slot_iter(E::slots_per_epoch()) + .map(|slot| { + let index = state + .get_beacon_proposer_index(slot, &self.chain.spec) + .unwrap(); + let pubkey = state.validators[index].pubkey.clone().into(); + + ProposerData { pubkey, slot } + }) + .collect::>(); + + assert_eq!(result, expected); + + self + } + + pub async fn test_block_production(self) -> Self { + let fork = self.chain.head_info().unwrap().fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() * 3 { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + + let block = self + .client + .get_validator_blocks::(slot, randao_reveal, None) + .await + .unwrap() + .data; + + let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + + self.client.post_beacon_blocks(&signed_block).await.unwrap(); + + assert_eq!(self.chain.head_beacon_block().unwrap(), signed_block); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + + self + } + + pub async fn test_get_validator_attestation_data(self) -> Self { + let mut state = self.chain.head_beacon_state().unwrap(); + let slot = state.slot; + state + .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) + .unwrap(); + + for index in 0..state.get_committee_count_at_slot(slot).unwrap() { + let result = self + .client + .get_validator_attestation_data(slot, index) + .await + .unwrap() + .data; + + let expected = self + .chain + .produce_unaggregated_attestation(slot, index) + .unwrap() + .data; + + assert_eq!(result, expected); + } + + self + } + + pub async fn test_get_validator_aggregate_attestation(self) -> Self { + let attestation = self + .chain + .head_beacon_block() + .unwrap() + .message + .body + .attestations[0] + .clone(); + + let result = self + .client + .get_validator_aggregate_attestation( + attestation.data.slot, + attestation.data.tree_hash_root(), + ) + .await + .unwrap() + .unwrap() + .data; + + let expected = attestation; + + assert_eq!(result, expected); + + self + } + + pub async fn get_aggregate(&mut self) -> SignedAggregateAndProof { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let mut head = self.chain.head().unwrap(); + while head.beacon_state.current_epoch() < epoch { + per_slot_processing(&mut head.beacon_state, None, &self.chain.spec).unwrap(); + } + head.beacon_state + .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) + .unwrap(); + + let committee_len = head.beacon_state.get_committee_count_at_slot(slot).unwrap(); + let fork = head.beacon_state.fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + let mut duties = vec![]; + for i in 0..self.validator_keypairs.len() { + duties.push( + self.client + .get_validator_duties_attester(epoch, Some(&[i as u64])) + .await + .unwrap() + .data[0] + .clone(), + ) + } + + let (i, kp, duty, proof) = self + .validator_keypairs + .iter() + .enumerate() + .find_map(|(i, kp)| { + let duty = duties[i].clone(); + + let proof = SelectionProof::new::( + duty.slot, + &kp.sk, + &fork, + genesis_validators_root, + &self.chain.spec, + ); + + if proof + .is_aggregator(committee_len as usize, &self.chain.spec) + .unwrap() + { + Some((i, kp, duty, proof)) + } else { + None + } + }) + .expect("there is at least one aggregator for this epoch") + .clone(); + + if duty.slot > slot { + self.chain.slot_clock.set_slot(duty.slot.into()); + } + + let attestation_data = self + .client + .get_validator_attestation_data(duty.slot, duty.committee_index) + .await + .unwrap() + .data; + + let mut attestation = Attestation { + aggregation_bits: BitList::with_capacity(duty.committee_length as usize).unwrap(), + data: attestation_data, + signature: AggregateSignature::infinity(), + }; + + attestation + .sign( + &kp.sk, + duty.validator_committee_index as usize, + &fork, + genesis_validators_root, + &self.chain.spec, + ) + .unwrap(); + + SignedAggregateAndProof::from_aggregate( + i as u64, + attestation, + Some(proof), + &kp.sk, + &fork, + genesis_validators_root, + &self.chain.spec, + ) + } + + pub async fn test_get_validator_aggregate_and_proofs_valid(mut self) -> Self { + let aggregate = self.get_aggregate().await; + + self.client + .post_validator_aggregate_and_proof::(&aggregate) + .await + .unwrap(); + + assert!(self.network_rx.try_recv().is_ok()); + + self + } + + pub async fn test_get_validator_aggregate_and_proofs_invalid(mut self) -> Self { + let mut aggregate = self.get_aggregate().await; + + aggregate.message.aggregate.data.slot += 1; + + self.client + .post_validator_aggregate_and_proof::(&aggregate) + .await + .unwrap_err(); + + assert!(self.network_rx.try_recv().is_err()); + + self + } + + pub async fn test_get_validator_beacon_committee_subscriptions(mut self) -> Self { + let subscription = BeaconCommitteeSubscription { + validator_index: 0, + committee_index: 0, + committees_at_slot: 1, + slot: Slot::new(1), + is_aggregator: true, + }; + + self.client + .post_validator_beacon_committee_subscriptions(&[subscription]) + .await + .unwrap(); + + self.network_rx.try_recv().unwrap(); + + self + } + + #[cfg(target_os = "linux")] + pub async fn test_get_lighthouse_health(self) -> Self { + self.client.get_lighthouse_health().await.unwrap(); + + self + } + + #[cfg(not(target_os = "linux"))] + pub async fn test_get_lighthouse_health(self) -> Self { + self.client.get_lighthouse_health().await.unwrap_err(); + + self + } + + pub async fn test_get_lighthouse_syncing(self) -> Self { + self.client.get_lighthouse_syncing().await.unwrap(); + + self + } + + pub async fn test_get_lighthouse_proto_array(self) -> Self { + self.client.get_lighthouse_proto_array().await.unwrap(); + + self + } + + pub async fn test_get_lighthouse_validator_inclusion_global(self) -> Self { + let epoch = self.chain.epoch().unwrap() - 1; + self.client + .get_lighthouse_validator_inclusion_global(epoch) + .await + .unwrap(); + + self + } + + pub async fn test_get_lighthouse_validator_inclusion(self) -> Self { + let epoch = self.chain.epoch().unwrap() - 1; + self.client + .get_lighthouse_validator_inclusion(epoch, ValidatorId::Index(0)) + .await + .unwrap(); + + self + } +} + +#[tokio::test(core_threads = 2)] +async fn beacon_genesis() { + ApiTester::new().test_beacon_genesis().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_root() { + ApiTester::new().test_beacon_states_root().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_fork() { + ApiTester::new().test_beacon_states_fork().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_finality_checkpoints() { + ApiTester::new() + .test_beacon_states_finality_checkpoints() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_validators() { + ApiTester::new().test_beacon_states_validators().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_committees() { + ApiTester::new().test_beacon_states_committees().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_validator_id() { + ApiTester::new().test_beacon_states_validator_id().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_headers() { + ApiTester::new() + .test_beacon_headers_all_slots() + .await + .test_beacon_headers_all_parents() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_headers_block_id() { + ApiTester::new().test_beacon_headers_block_id().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_blocks() { + ApiTester::new().test_beacon_blocks().await; +} + +#[tokio::test(core_threads = 2)] +async fn post_beacon_blocks_valid() { + ApiTester::new().test_post_beacon_blocks_valid().await; +} + +#[tokio::test(core_threads = 2)] +async fn post_beacon_blocks_invalid() { + ApiTester::new().test_post_beacon_blocks_invalid().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_blocks_root() { + ApiTester::new().test_beacon_blocks_root().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_blocks_attestations() { + ApiTester::new().test_beacon_blocks_attestations().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_get() { + ApiTester::new() + .test_get_beacon_pool_attestations() + .await + .test_get_beacon_pool_attester_slashings() + .await + .test_get_beacon_pool_proposer_slashings() + .await + .test_get_beacon_pool_voluntary_exits() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_attestations_valid() { + ApiTester::new() + .test_post_beacon_pool_attestations_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_attestations_invalid() { + ApiTester::new() + .test_post_beacon_pool_attestations_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_attester_slashings_valid() { + ApiTester::new() + .test_post_beacon_pool_attester_slashings_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_attester_slashings_invalid() { + ApiTester::new() + .test_post_beacon_pool_attester_slashings_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_proposer_slashings_valid() { + ApiTester::new() + .test_post_beacon_pool_proposer_slashings_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_proposer_slashings_invalid() { + ApiTester::new() + .test_post_beacon_pool_proposer_slashings_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_voluntary_exits_valid() { + ApiTester::new() + .test_post_beacon_pool_voluntary_exits_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_voluntary_exits_invalid() { + ApiTester::new() + .test_post_beacon_pool_voluntary_exits_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn config_get() { + ApiTester::new() + .test_get_config_fork_schedule() + .await + .test_get_config_spec() + .await + .test_get_config_deposit_contract() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn debug_get() { + ApiTester::new() + .test_get_debug_beacon_states() + .await + .test_get_debug_beacon_heads() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn node_get() { + ApiTester::new() + .test_get_node_version() + .await + .test_get_node_syncing() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_duties_attester() { + ApiTester::new().test_get_validator_duties_attester().await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_duties_attester_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_duties_attester() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_duties_proposer() { + ApiTester::new().test_get_validator_duties_proposer().await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_duties_proposer_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_duties_proposer() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn block_production() { + ApiTester::new().test_block_production().await; +} + +#[tokio::test(core_threads = 2)] +async fn block_production_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_block_production() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_attestation_data() { + ApiTester::new().test_get_validator_attestation_data().await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_attestation_data_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_attestation_data() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_attestation() { + ApiTester::new() + .test_get_validator_aggregate_attestation() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_attestation_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_aggregate_attestation() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_and_proofs_valid() { + ApiTester::new() + .test_get_validator_aggregate_and_proofs_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_and_proofs_valid_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_aggregate_and_proofs_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_and_proofs_invalid() { + ApiTester::new() + .test_get_validator_aggregate_and_proofs_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_and_proofs_invalid_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_aggregate_and_proofs_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_beacon_committee_subscriptions() { + ApiTester::new() + .test_get_validator_beacon_committee_subscriptions() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn lighthouse_endpoints() { + ApiTester::new() + .test_get_lighthouse_health() + .await + .test_get_lighthouse_syncing() + .await + .test_get_lighthouse_proto_array() + .await + .test_get_lighthouse_validator_inclusion() + .await + .test_get_lighthouse_validator_inclusion_global() + .await; +} diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml new file mode 100644 index 00000000000..482f7a5debc --- /dev/null +++ b/beacon_node/http_metrics/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "http_metrics" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +prometheus = "0.9.0" +warp = "0.2.5" +serde = { version = "1.0.110", features = ["derive"] } +slog = "2.5.2" +beacon_chain = { path = "../beacon_chain" } +store = { path = "../store" } +eth2_libp2p = { path = "../eth2_libp2p" } +slot_clock = { path = "../../common/slot_clock" } +lighthouse_metrics = { path = "../../common/lighthouse_metrics" } +lazy_static = "1.4.0" +eth2 = { path = "../../common/eth2" } +lighthouse_version = { path = "../../common/lighthouse_version" } +warp_utils = { path = "../../common/warp_utils" } + +[dev-dependencies] +tokio = { version = "0.2.21", features = ["sync"] } +reqwest = { version = "0.10.8", features = ["json"] } +environment = { path = "../../lighthouse/environment" } +types = { path = "../../consensus/types" } diff --git a/beacon_node/http_metrics/src/lib.rs b/beacon_node/http_metrics/src/lib.rs new file mode 100644 index 00000000000..37eac82bda4 --- /dev/null +++ b/beacon_node/http_metrics/src/lib.rs @@ -0,0 +1,135 @@ +//! This crate provides a HTTP server that is solely dedicated to serving the `/metrics` endpoint. +//! +//! For other endpoints, see the `http_api` crate. + +#[macro_use] +extern crate lazy_static; + +mod metrics; + +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use lighthouse_version::version_with_platform; +use serde::{Deserialize, Serialize}; +use slog::{crit, info, Logger}; +use std::future::Future; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use std::path::PathBuf; +use std::sync::Arc; +use warp::{http::Response, Filter}; + +#[derive(Debug)] +pub enum Error { + Warp(warp::Error), + Other(String), +} + +impl From for Error { + fn from(e: warp::Error) -> Self { + Error::Warp(e) + } +} + +impl From for Error { + fn from(e: String) -> Self { + Error::Other(e) + } +} + +/// A wrapper around all the items required to spawn the HTTP server. +/// +/// The server will gracefully handle the case where any fields are `None`. +pub struct Context { + pub config: Config, + pub chain: Option>>, + pub db_path: Option, + pub freezer_db_path: Option, + pub log: Logger, +} + +/// Configuration for the HTTP server. +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct Config { + pub enabled: bool, + pub listen_addr: Ipv4Addr, + pub listen_port: u16, + pub allow_origin: Option, +} + +impl Default for Config { + fn default() -> Self { + Self { + enabled: false, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 5054, + allow_origin: None, + } + } +} + +/// Creates a server that will serve requests using information from `ctx`. +/// +/// The server will shut down gracefully when the `shutdown` future resolves. +/// +/// ## Returns +/// +/// This function will bind the server to the provided address and then return a tuple of: +/// +/// - `SocketAddr`: the address that the HTTP server will listen on. +/// - `Future`: the actual server future that will need to be awaited. +/// +/// ## Errors +/// +/// Returns an error if the server is unable to bind or there is another error during +/// configuration. +pub fn serve( + ctx: Arc>, + shutdown: impl Future + Send + Sync + 'static, +) -> Result<(SocketAddr, impl Future), Error> { + let config = &ctx.config; + let log = ctx.log.clone(); + let allow_origin = config.allow_origin.clone(); + + // Sanity check. + if !config.enabled { + crit!(log, "Cannot start disabled metrics HTTP server"); + return Err(Error::Other( + "A disabled metrics server should not be started".to_string(), + )); + } + + let inner_ctx = ctx.clone(); + let routes = warp::get() + .and(warp::path("metrics")) + .map(move || inner_ctx.clone()) + .and_then(|ctx: Arc>| async move { + Ok::<_, warp::Rejection>( + metrics::gather_prometheus_metrics(&ctx) + .map(|body| Response::builder().status(200).body(body).unwrap()) + .unwrap_or_else(|e| { + Response::builder() + .status(500) + .body(format!("Unable to gather metrics: {:?}", e)) + .unwrap() + }), + ) + }) + // Add a `Server` header. + .map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform())) + // Maybe add some CORS headers. + .map(move |reply| warp_utils::reply::maybe_cors(reply, allow_origin.as_ref())); + + let (listening_socket, server) = warp::serve(routes).try_bind_with_graceful_shutdown( + SocketAddrV4::new(config.listen_addr, config.listen_port), + async { + shutdown.await; + }, + )?; + + info!( + log, + "Metrics HTTP server started"; + "listen_address" => listening_socket.to_string(), + ); + + Ok((listening_socket, server)) +} diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs similarity index 69% rename from beacon_node/rest_api/src/metrics.rs rename to beacon_node/http_metrics/src/metrics.rs index 4b1ba737d7a..bcd803c405e 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -1,38 +1,11 @@ -use crate::{ApiError, Context}; +use crate::Context; use beacon_chain::BeaconChainTypes; +use eth2::lighthouse::Health; use lighthouse_metrics::{Encoder, TextEncoder}; -use rest_types::Health; -use std::sync::Arc; pub use lighthouse_metrics::*; lazy_static! { - pub static ref BEACON_HTTP_API_REQUESTS_TOTAL: Result = - try_create_int_counter_vec( - "beacon_http_api_requests_total", - "Count of HTTP requests received", - &["endpoint"] - ); - pub static ref BEACON_HTTP_API_SUCCESS_TOTAL: Result = - try_create_int_counter_vec( - "beacon_http_api_success_total", - "Count of HTTP requests that returned 200 OK", - &["endpoint"] - ); - pub static ref BEACON_HTTP_API_ERROR_TOTAL: Result = try_create_int_counter_vec( - "beacon_http_api_error_total", - "Count of HTTP that did not return 200 OK", - &["endpoint"] - ); - pub static ref BEACON_HTTP_API_TIMES_TOTAL: Result = try_create_histogram_vec( - "beacon_http_api_times_total", - "Duration to process HTTP requests", - &["endpoint"] - ); - pub static ref REQUEST_RESPONSE_TIME: Result = try_create_histogram( - "http_server_request_duration_seconds", - "Time taken to build a response to a HTTP request" - ); pub static ref PROCESS_NUM_THREADS: Result = try_create_int_gauge( "process_num_threads", "Number of threads used by the current process" @@ -67,14 +40,9 @@ lazy_static! { try_create_float_gauge("system_loadavg_15", "Loadavg over 15 minutes"); } -/// Returns the full set of Prometheus metrics for the Beacon Node application. -/// -/// # Note -/// -/// This is a HTTP handler method. -pub fn get_prometheus( - ctx: Arc>, -) -> std::result::Result { +pub fn gather_prometheus_metrics( + ctx: &Context, +) -> std::result::Result { let mut buffer = vec![]; let encoder = TextEncoder::new(); @@ -94,9 +62,17 @@ pub fn get_prometheus( // using `lighthouse_metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into // a string that can be returned via HTTP. - slot_clock::scrape_for_metrics::(&ctx.beacon_chain.slot_clock); - store::scrape_for_metrics(&ctx.db_path, &ctx.freezer_db_path); - beacon_chain::scrape_for_metrics(&ctx.beacon_chain); + if let Some(beacon_chain) = ctx.chain.as_ref() { + slot_clock::scrape_for_metrics::(&beacon_chain.slot_clock); + beacon_chain::scrape_for_metrics(beacon_chain); + } + + if let (Some(db_path), Some(freezer_db_path)) = + (ctx.db_path.as_ref(), ctx.freezer_db_path.as_ref()) + { + store::scrape_for_metrics(db_path, freezer_db_path); + } + eth2_libp2p::scrape_discovery_metrics(); // This will silently fail if we are unable to observe the health. This is desired behaviour @@ -125,6 +101,5 @@ pub fn get_prometheus( .encode(&lighthouse_metrics::gather(), &mut buffer) .unwrap(); - String::from_utf8(buffer) - .map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e))) + String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e)) } diff --git a/beacon_node/http_metrics/tests/tests.rs b/beacon_node/http_metrics/tests/tests.rs new file mode 100644 index 00000000000..18a40d4f849 --- /dev/null +++ b/beacon_node/http_metrics/tests/tests.rs @@ -0,0 +1,46 @@ +use beacon_chain::test_utils::BlockingMigratorEphemeralHarnessType; +use environment::null_logger; +use http_metrics::Config; +use reqwest::StatusCode; +use std::net::Ipv4Addr; +use std::sync::Arc; +use tokio::sync::oneshot; +use types::MainnetEthSpec; + +type Context = http_metrics::Context>; + +#[tokio::test(core_threads = 2)] +async fn returns_200_ok() { + let log = null_logger().unwrap(); + + let context = Arc::new(Context { + config: Config { + enabled: true, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 0, + allow_origin: None, + }, + chain: None, + db_path: None, + freezer_db_path: None, + log, + }); + + let ctx = context.clone(); + let (_shutdown_tx, shutdown_rx) = oneshot::channel::<()>(); + let server_shutdown = async { + // It's not really interesting why this triggered, just that it happened. + let _ = shutdown_rx.await; + }; + let (listening_socket, server) = http_metrics::serve(ctx, server_shutdown).unwrap(); + + tokio::spawn(async { server.await }); + + let url = format!( + "http://{}:{}/metrics", + listening_socket.ip(), + listening_socket.port() + ); + + assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK); +} diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 2ef369e3b45..7e75f42a752 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -17,7 +17,6 @@ beacon_chain = { path = "../beacon_chain" } store = { path = "../store" } eth2_libp2p = { path = "../eth2_libp2p" } hashset_delay = { path = "../../common/hashset_delay" } -rest_types = { path = "../../common/rest_types" } types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } slot_clock = { path = "../../common/slot_clock" } diff --git a/beacon_node/network/src/attestation_service/mod.rs b/beacon_node/network/src/attestation_service/mod.rs index 59f63890a29..7c017d295b9 100644 --- a/beacon_node/network/src/attestation_service/mod.rs +++ b/beacon_node/network/src/attestation_service/mod.rs @@ -15,9 +15,8 @@ use slog::{debug, error, o, trace, warn}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::SubnetDiscovery; use hashset_delay::HashSetDelay; -use rest_types::ValidatorSubscription; use slot_clock::SlotClock; -use types::{Attestation, EthSpec, Slot, SubnetId}; +use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription}; use crate::metrics; diff --git a/beacon_node/network/src/beacon_processor/worker.rs b/beacon_node/network/src/beacon_processor/worker.rs index 6388962e595..442f72f36f5 100644 --- a/beacon_node/network/src/beacon_processor/worker.rs +++ b/beacon_node/network/src/beacon_processor/worker.rs @@ -45,7 +45,7 @@ impl Worker { let attestation = match self .chain - .verify_unaggregated_attestation_for_gossip(attestation, subnet_id) + .verify_unaggregated_attestation_for_gossip(attestation, Some(subnet_id)) { Ok(attestation) => attestation, Err(e) => { diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 84f807007fa..26fa571308c 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -15,13 +15,12 @@ use eth2_libp2p::{ }; use eth2_libp2p::{MessageAcceptance, Service as LibP2PService}; use futures::prelude::*; -use rest_types::ValidatorSubscription; use slog::{debug, error, info, o, trace, warn}; use std::{collections::HashMap, sync::Arc, time::Duration}; use store::HotColdDB; use tokio::sync::mpsc; use tokio::time::Delay; -use types::EthSpec; +use types::{EthSpec, ValidatorSubscription}; mod tests; diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 5b664c87726..6d6a8d1cdc1 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -332,6 +332,51 @@ impl OperationPool { pub fn num_voluntary_exits(&self) -> usize { self.voluntary_exits.read().len() } + + /// Returns all known `Attestation` objects. + /// + /// This method may return objects that are invalid for block inclusion. + pub fn get_all_attestations(&self) -> Vec> { + self.attestations + .read() + .iter() + .map(|(_, attns)| attns.iter().cloned()) + .flatten() + .collect() + } + + /// Returns all known `AttesterSlashing` objects. + /// + /// This method may return objects that are invalid for block inclusion. + pub fn get_all_attester_slashings(&self) -> Vec> { + self.attester_slashings + .read() + .iter() + .map(|(slashing, _)| slashing.clone()) + .collect() + } + + /// Returns all known `ProposerSlashing` objects. + /// + /// This method may return objects that are invalid for block inclusion. + pub fn get_all_proposer_slashings(&self) -> Vec { + self.proposer_slashings + .read() + .iter() + .map(|(_, slashing)| slashing.clone()) + .collect() + } + + /// Returns all known `SignedVoluntaryExit` objects. + /// + /// This method may return objects that are invalid for block inclusion. + pub fn get_all_voluntary_exits(&self) -> Vec { + self.voluntary_exits + .read() + .iter() + .map(|(_, exit)| exit.clone()) + .collect() + } } /// Filter up to a maximum number of operations out of an iterator. diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs deleted file mode 100644 index 0ac95f7ca9b..00000000000 --- a/beacon_node/rest_api/src/beacon.rs +++ /dev/null @@ -1,500 +0,0 @@ -use crate::helpers::*; -use crate::validator::get_state_for_epoch; -use crate::Context; -use crate::{ApiError, UrlQuery}; -use beacon_chain::{ - observed_operations::ObservationOutcome, BeaconChain, BeaconChainTypes, StateSkipConfig, -}; -use futures::executor::block_on; -use hyper::body::Bytes; -use hyper::{Body, Request}; -use rest_types::{ - BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse, - ValidatorRequest, ValidatorResponse, -}; -use std::io::Write; -use std::sync::Arc; - -use slog::error; -use types::{ - AttesterSlashing, BeaconState, EthSpec, Hash256, ProposerSlashing, PublicKeyBytes, - RelativeEpoch, SignedBeaconBlockHash, Slot, -}; - -/// Returns a summary of the head of the beacon chain. -pub fn get_head( - ctx: Arc>, -) -> Result { - let beacon_chain = &ctx.beacon_chain; - let chain_head = beacon_chain.head()?; - - Ok(CanonicalHeadResponse { - slot: chain_head.beacon_state.slot, - block_root: chain_head.beacon_block_root, - state_root: chain_head.beacon_state_root, - finalized_slot: chain_head - .beacon_state - .finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - finalized_block_root: chain_head.beacon_state.finalized_checkpoint.root, - justified_slot: chain_head - .beacon_state - .current_justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - justified_block_root: chain_head.beacon_state.current_justified_checkpoint.root, - previous_justified_slot: chain_head - .beacon_state - .previous_justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - previous_justified_block_root: chain_head.beacon_state.previous_justified_checkpoint.root, - }) -} - -/// Return the list of heads of the beacon chain. -pub fn get_heads(ctx: Arc>) -> Vec { - ctx.beacon_chain - .heads() - .into_iter() - .map(|(beacon_block_root, beacon_block_slot)| HeadBeaconBlock { - beacon_block_root, - beacon_block_slot, - }) - .collect() -} - -/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. -pub fn get_block( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let beacon_chain = &ctx.beacon_chain; - let query_params = ["root", "slot"]; - let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?; - - let block_root = match (key.as_ref(), value) { - ("slot", value) => { - let target = parse_slot(&value)?; - - block_root_at_slot(beacon_chain, target)?.ok_or_else(|| { - ApiError::NotFound(format!( - "Unable to find SignedBeaconBlock for slot {:?}", - target - )) - })? - } - ("root", value) => parse_root(&value)?, - _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), - }; - - let block = beacon_chain.store.get_block(&block_root)?.ok_or_else(|| { - ApiError::NotFound(format!( - "Unable to find SignedBeaconBlock for root {:?}", - block_root - )) - })?; - - Ok(BlockResponse { - root: block_root, - beacon_block: block, - }) -} - -/// HTTP handler to return a `SignedBeaconBlock` root at a given `slot`. -pub fn get_block_root( - req: Request>, - ctx: Arc>, -) -> Result { - let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; - let target = parse_slot(&slot_string)?; - - block_root_at_slot(&ctx.beacon_chain, target)?.ok_or_else(|| { - ApiError::NotFound(format!( - "Unable to find SignedBeaconBlock for slot {:?}", - target - )) - }) -} - -fn make_sse_response_chunk(new_head_hash: SignedBeaconBlockHash) -> std::io::Result { - let mut buffer = Vec::new(); - { - let mut sse_message = uhttp_sse::SseMessage::new(&mut buffer); - let untyped_hash: Hash256 = new_head_hash.into(); - write!(sse_message.data()?, "{:?}", untyped_hash)?; - } - let bytes: Bytes = buffer.into(); - Ok(bytes) -} - -pub fn stream_forks(ctx: Arc>) -> Result { - let mut events = ctx.events.lock().add_rx(); - let (mut sender, body) = Body::channel(); - std::thread::spawn(move || { - while let Ok(new_head_hash) = events.recv() { - let chunk = match make_sse_response_chunk(new_head_hash) { - Ok(chunk) => chunk, - Err(e) => { - error!(ctx.log, "Failed to make SSE chunk"; "error" => e.to_string()); - sender.abort(); - break; - } - }; - match block_on(sender.send_data(chunk)) { - Err(e) if e.is_closed() => break, - Err(e) => error!(ctx.log, "Couldn't stream piece {:?}", e), - Ok(_) => (), - } - } - }); - Ok(body) -} - -/// HTTP handler to which accepts a query string of a list of validator pubkeys and maps it to a -/// `ValidatorResponse`. -/// -/// This method is limited to as many `pubkeys` that can fit in a URL. See `post_validators` for -/// doing bulk requests. -pub fn get_validators( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let validator_pubkeys = query - .all_of("validator_pubkeys")? - .iter() - .map(|validator_pubkey_str| parse_pubkey_bytes(validator_pubkey_str)) - .collect::, _>>()?; - - let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) { - Some(parse_root(&value)?) - } else { - None - }; - - validator_responses_by_pubkey(&ctx.beacon_chain, state_root_opt, validator_pubkeys) -} - -/// HTTP handler to return all validators, each as a `ValidatorResponse`. -pub fn get_all_validators( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) { - Some(parse_root(&value)?) - } else { - None - }; - - let mut state = get_state_from_root_opt(&ctx.beacon_chain, state_root_opt)?; - state.update_pubkey_cache()?; - - state - .validators - .iter() - .map(|validator| validator_response_by_pubkey(&state, validator.pubkey.clone())) - .collect::, _>>() -} - -/// HTTP handler to return all active validators, each as a `ValidatorResponse`. -pub fn get_active_validators( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) { - Some(parse_root(&value)?) - } else { - None - }; - - let mut state = get_state_from_root_opt(&ctx.beacon_chain, state_root_opt)?; - state.update_pubkey_cache()?; - - state - .validators - .iter() - .filter(|validator| validator.is_active_at(state.current_epoch())) - .map(|validator| validator_response_by_pubkey(&state, validator.pubkey.clone())) - .collect::, _>>() -} - -/// HTTP handler to which accepts a `ValidatorRequest` and returns a `ValidatorResponse` for -/// each of the given `pubkeys`. When `state_root` is `None`, the canonical head is used. -/// -/// This method allows for a basically unbounded list of `pubkeys`, where as the `get_validators` -/// request is limited by the max number of pubkeys you can fit in a URL. -pub fn post_validators( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - serde_json::from_slice::(&req.into_body()) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorRequest: {:?}", - e - )) - }) - .and_then(|bulk_request| { - validator_responses_by_pubkey( - &ctx.beacon_chain, - bulk_request.state_root, - bulk_request.pubkeys, - ) - }) -} - -/// Returns either the state given by `state_root_opt`, or the canonical head state if it is -/// `None`. -fn get_state_from_root_opt( - beacon_chain: &BeaconChain, - state_root_opt: Option, -) -> Result, ApiError> { - if let Some(state_root) = state_root_opt { - beacon_chain - .get_state(&state_root, None) - .map_err(|e| { - ApiError::ServerError(format!( - "Database error when reading state root {}: {:?}", - state_root, e - )) - })? - .ok_or_else(|| ApiError::NotFound(format!("No state exists with root: {}", state_root))) - } else { - Ok(beacon_chain.head()?.beacon_state) - } -} - -/// Maps a vec of `validator_pubkey` to a vec of `ValidatorResponse`, using the state at the given -/// `state_root`. If `state_root.is_none()`, uses the canonial head state. -fn validator_responses_by_pubkey( - beacon_chain: &BeaconChain, - state_root_opt: Option, - validator_pubkeys: Vec, -) -> Result, ApiError> { - let mut state = get_state_from_root_opt(beacon_chain, state_root_opt)?; - state.update_pubkey_cache()?; - - validator_pubkeys - .into_iter() - .map(|validator_pubkey| validator_response_by_pubkey(&state, validator_pubkey)) - .collect::, ApiError>>() -} - -/// Maps a `validator_pubkey` to a `ValidatorResponse`, using the given state. -/// -/// The provided `state` must have a fully up-to-date pubkey cache. -fn validator_response_by_pubkey( - state: &BeaconState, - validator_pubkey: PublicKeyBytes, -) -> Result { - let validator_index_opt = state - .get_validator_index(&validator_pubkey) - .map_err(|e| ApiError::ServerError(format!("Unable to read pubkey cache: {:?}", e)))?; - - if let Some(validator_index) = validator_index_opt { - let balance = state.balances.get(validator_index).ok_or_else(|| { - ApiError::ServerError(format!("Invalid balances index: {:?}", validator_index)) - })?; - - let validator = state - .validators - .get(validator_index) - .ok_or_else(|| { - ApiError::ServerError(format!("Invalid validator index: {:?}", validator_index)) - })? - .clone(); - - Ok(ValidatorResponse { - pubkey: validator_pubkey, - validator_index: Some(validator_index), - balance: Some(*balance), - validator: Some(validator), - }) - } else { - Ok(ValidatorResponse { - pubkey: validator_pubkey, - validator_index: None, - balance: None, - validator: None, - }) - } -} - -/// HTTP handler -pub fn get_committees( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let epoch = query.epoch()?; - - let mut state = - get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; - - let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch).map_err(|e| { - ApiError::ServerError(format!("Failed to get state suitable for epoch: {:?}", e)) - })?; - - state - .build_committee_cache(relative_epoch, &ctx.beacon_chain.spec) - .map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?; - - Ok(state - .get_beacon_committees_at_epoch(relative_epoch) - .map_err(|e| ApiError::ServerError(format!("Unable to get all committees: {:?}", e)))? - .into_iter() - .map(|c| Committee { - slot: c.slot, - index: c.index, - committee: c.committee.to_vec(), - }) - .collect::>()) -} - -/// HTTP handler to return a `BeaconState` at a given `root` or `slot`. -/// -/// Will not return a state if the request slot is in the future. Will return states higher than -/// the current head by skipping slots. -pub fn get_state( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let head_state = ctx.beacon_chain.head()?.beacon_state; - - let (key, value) = match UrlQuery::from_request(&req) { - Ok(query) => { - // We have *some* parameters, just check them. - let query_params = ["root", "slot"]; - query.first_of(&query_params)? - } - Err(ApiError::BadRequest(_)) => { - // No parameters provided at all, use current slot. - (String::from("slot"), head_state.slot.to_string()) - } - Err(e) => { - return Err(e); - } - }; - - let (root, state): (Hash256, BeaconState) = match (key.as_ref(), value) { - ("slot", value) => state_at_slot(&ctx.beacon_chain, parse_slot(&value)?)?, - ("root", value) => { - let root = &parse_root(&value)?; - - let state = ctx - .beacon_chain - .store - .get_state(root, None)? - .ok_or_else(|| ApiError::NotFound(format!("No state for root: {:?}", root)))?; - - (*root, state) - } - _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), - }; - - Ok(StateResponse { - root, - beacon_state: state, - }) -} - -/// HTTP handler to return a `BeaconState` root at a given `slot`. -/// -/// Will not return a state if the request slot is in the future. Will return states higher than -/// the current head by skipping slots. -pub fn get_state_root( - req: Request>, - ctx: Arc>, -) -> Result { - let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; - let slot = parse_slot(&slot_string)?; - - state_root_at_slot(&ctx.beacon_chain, slot, StateSkipConfig::WithStateRoots) -} - -/// HTTP handler to return a `BeaconState` at the genesis block. -/// -/// This is an undocumented convenience method used during testing. For production, simply do a -/// state request at slot 0. -pub fn get_genesis_state( - ctx: Arc>, -) -> Result, ApiError> { - state_at_slot(&ctx.beacon_chain, Slot::new(0)).map(|(_root, state)| state) -} - -pub fn proposer_slashing( - req: Request>, - ctx: Arc>, -) -> Result { - let body = req.into_body(); - - serde_json::from_slice::(&body) - .map_err(|e| format!("Unable to parse JSON into ProposerSlashing: {:?}", e)) - .and_then(move |proposer_slashing| { - if ctx.beacon_chain.eth1_chain.is_some() { - let obs_outcome = ctx - .beacon_chain - .verify_proposer_slashing_for_gossip(proposer_slashing) - .map_err(|e| format!("Error while verifying proposer slashing: {:?}", e))?; - if let ObservationOutcome::New(verified_proposer_slashing) = obs_outcome { - ctx.beacon_chain - .import_proposer_slashing(verified_proposer_slashing); - Ok(()) - } else { - Err("Proposer slashing for that validator index already known".into()) - } - } else { - Err("Cannot insert proposer slashing on node without Eth1 connection.".to_string()) - } - }) - .map_err(ApiError::BadRequest)?; - - Ok(true) -} - -pub fn attester_slashing( - req: Request>, - ctx: Arc>, -) -> Result { - let body = req.into_body(); - serde_json::from_slice::>(&body) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into AttesterSlashing: {:?}", - e - )) - }) - .and_then(move |attester_slashing| { - if ctx.beacon_chain.eth1_chain.is_some() { - ctx.beacon_chain - .verify_attester_slashing_for_gossip(attester_slashing) - .map_err(|e| format!("Error while verifying attester slashing: {:?}", e)) - .and_then(|outcome| { - if let ObservationOutcome::New(verified_attester_slashing) = outcome { - ctx.beacon_chain - .import_attester_slashing(verified_attester_slashing) - .map_err(|e| { - format!("Error while importing attester slashing: {:?}", e) - }) - } else { - Err("Attester slashing only covers already slashed indices".to_string()) - } - }) - .map_err(ApiError::BadRequest) - } else { - Err(ApiError::BadRequest( - "Cannot insert attester slashing on node without Eth1 connection.".to_string(), - )) - } - })?; - - Ok(true) -} diff --git a/beacon_node/rest_api/src/config.rs b/beacon_node/rest_api/src/config.rs deleted file mode 100644 index 815fccfd01c..00000000000 --- a/beacon_node/rest_api/src/config.rs +++ /dev/null @@ -1,55 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::net::Ipv4Addr; - -/// Defines the encoding for the API. -#[derive(Clone, Serialize, Deserialize, Copy)] -pub enum ApiEncodingFormat { - JSON, - YAML, - SSZ, -} - -impl ApiEncodingFormat { - pub fn get_content_type(&self) -> &str { - match self { - ApiEncodingFormat::JSON => "application/json", - ApiEncodingFormat::YAML => "application/yaml", - ApiEncodingFormat::SSZ => "application/ssz", - } - } -} - -impl From<&str> for ApiEncodingFormat { - fn from(f: &str) -> ApiEncodingFormat { - match f { - "application/yaml" => ApiEncodingFormat::YAML, - "application/ssz" => ApiEncodingFormat::SSZ, - _ => ApiEncodingFormat::JSON, - } - } -} - -/// HTTP REST API Configuration -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Config { - /// Enable the REST API server. - pub enabled: bool, - /// The IPv4 address the REST API HTTP server will listen on. - pub listen_address: Ipv4Addr, - /// The port the REST API HTTP server will listen on. - pub port: u16, - /// If something else than "", a 'Access-Control-Allow-Origin' header will be present in - /// responses. Put *, to allow any origin. - pub allow_origin: String, -} - -impl Default for Config { - fn default() -> Self { - Config { - enabled: false, - listen_address: Ipv4Addr::new(127, 0, 0, 1), - port: 5052, - allow_origin: "".to_string(), - } - } -} diff --git a/beacon_node/rest_api/src/consensus.rs b/beacon_node/rest_api/src/consensus.rs deleted file mode 100644 index d82b05b7a7f..00000000000 --- a/beacon_node/rest_api/src/consensus.rs +++ /dev/null @@ -1,130 +0,0 @@ -use crate::helpers::*; -use crate::{ApiError, Context, UrlQuery}; -use beacon_chain::BeaconChainTypes; -use hyper::Request; -use rest_types::{IndividualVotesRequest, IndividualVotesResponse}; -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use state_processing::per_epoch_processing::{TotalBalances, ValidatorStatuses}; -use std::sync::Arc; -use types::EthSpec; - -/// The results of validators voting during an epoch. -/// -/// Provides information about the current and previous epochs. -#[derive(Serialize, Deserialize, Encode, Decode)] -pub struct VoteCount { - /// The total effective balance of all active validators during the _current_ epoch. - pub current_epoch_active_gwei: u64, - /// The total effective balance of all active validators during the _previous_ epoch. - pub previous_epoch_active_gwei: u64, - /// The total effective balance of all validators who attested during the _current_ epoch. - pub current_epoch_attesting_gwei: u64, - /// The total effective balance of all validators who attested during the _current_ epoch and - /// agreed with the state about the beacon block at the first slot of the _current_ epoch. - pub current_epoch_target_attesting_gwei: u64, - /// The total effective balance of all validators who attested during the _previous_ epoch. - pub previous_epoch_attesting_gwei: u64, - /// The total effective balance of all validators who attested during the _previous_ epoch and - /// agreed with the state about the beacon block at the first slot of the _previous_ epoch. - pub previous_epoch_target_attesting_gwei: u64, - /// The total effective balance of all validators who attested during the _previous_ epoch and - /// agreed with the state about the beacon block at the time of attestation. - pub previous_epoch_head_attesting_gwei: u64, -} - -impl Into for TotalBalances { - fn into(self) -> VoteCount { - VoteCount { - current_epoch_active_gwei: self.current_epoch(), - previous_epoch_active_gwei: self.previous_epoch(), - current_epoch_attesting_gwei: self.current_epoch_attesters(), - current_epoch_target_attesting_gwei: self.current_epoch_target_attesters(), - previous_epoch_attesting_gwei: self.previous_epoch_attesters(), - previous_epoch_target_attesting_gwei: self.previous_epoch_target_attesters(), - previous_epoch_head_attesting_gwei: self.previous_epoch_head_attesters(), - } - } -} - -/// HTTP handler return a `VoteCount` for some given `Epoch`. -pub fn get_vote_count( - req: Request>, - ctx: Arc>, -) -> Result { - let query = UrlQuery::from_request(&req)?; - - let epoch = query.epoch()?; - // This is the last slot of the given epoch (one prior to the first slot of the next epoch). - let target_slot = (epoch + 1).start_slot(T::EthSpec::slots_per_epoch()) - 1; - - let (_root, state) = state_at_slot(&ctx.beacon_chain, target_slot)?; - let spec = &ctx.beacon_chain.spec; - - let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; - validator_statuses.process_attestations(&state, spec)?; - - Ok(validator_statuses.total_balances.into()) -} - -pub fn post_individual_votes( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let body = req.into_body(); - - serde_json::from_slice::(&body) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorDutiesRequest: {:?}", - e - )) - }) - .and_then(move |body| { - let epoch = body.epoch; - - // This is the last slot of the given epoch (one prior to the first slot of the next epoch). - let target_slot = (epoch + 1).start_slot(T::EthSpec::slots_per_epoch()) - 1; - - let (_root, mut state) = state_at_slot(&ctx.beacon_chain, target_slot)?; - let spec = &ctx.beacon_chain.spec; - - let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; - validator_statuses.process_attestations(&state, spec)?; - - state.update_pubkey_cache().map_err(|e| { - ApiError::ServerError(format!("Unable to build pubkey cache: {:?}", e)) - })?; - - body.pubkeys - .into_iter() - .map(|pubkey| { - let validator_index_opt = state.get_validator_index(&pubkey).map_err(|e| { - ApiError::ServerError(format!("Unable to read pubkey cache: {:?}", e)) - })?; - - if let Some(validator_index) = validator_index_opt { - let vote = validator_statuses - .statuses - .get(validator_index) - .cloned() - .map(Into::into); - - Ok(IndividualVotesResponse { - epoch, - pubkey, - validator_index: Some(validator_index), - vote, - }) - } else { - Ok(IndividualVotesResponse { - epoch, - pubkey, - validator_index: None, - vote: None, - }) - } - }) - .collect::, _>>() - }) -} diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs deleted file mode 100644 index 66b5bd1a0c1..00000000000 --- a/beacon_node/rest_api/src/helpers.rs +++ /dev/null @@ -1,260 +0,0 @@ -use crate::{ApiError, NetworkChannel}; -use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig}; -use bls::PublicKeyBytes; -use eth2_libp2p::PubsubMessage; -use itertools::process_results; -use network::NetworkMessage; -use ssz::Decode; -use store::iter::AncestorIter; -use types::{ - BeaconState, CommitteeIndex, Epoch, EthSpec, Hash256, RelativeEpoch, SignedBeaconBlock, Slot, -}; - -/// Parse a slot. -/// -/// E.g., `"1234"` -pub fn parse_slot(string: &str) -> Result { - string - .parse::() - .map(Slot::from) - .map_err(|e| ApiError::BadRequest(format!("Unable to parse slot: {:?}", e))) -} - -/// Parse an epoch. -/// -/// E.g., `"13"` -pub fn parse_epoch(string: &str) -> Result { - string - .parse::() - .map(Epoch::from) - .map_err(|e| ApiError::BadRequest(format!("Unable to parse epoch: {:?}", e))) -} - -/// Parse a CommitteeIndex. -/// -/// E.g., `"18"` -pub fn parse_committee_index(string: &str) -> Result { - string - .parse::() - .map_err(|e| ApiError::BadRequest(format!("Unable to parse committee index: {:?}", e))) -} - -/// Parse an SSZ object from some hex-encoded bytes. -/// -/// E.g., A signature is `"0x0000000000000000000000000000000000000000000000000000000000000000"` -pub fn parse_hex_ssz_bytes(string: &str) -> Result { - const PREFIX: &str = "0x"; - - if string.starts_with(PREFIX) { - let trimmed = string.trim_start_matches(PREFIX); - let bytes = hex::decode(trimmed) - .map_err(|e| ApiError::BadRequest(format!("Unable to parse SSZ hex: {:?}", e)))?; - T::from_ssz_bytes(&bytes) - .map_err(|e| ApiError::BadRequest(format!("Unable to parse SSZ bytes: {:?}", e))) - } else { - Err(ApiError::BadRequest( - "Hex bytes must have a 0x prefix".to_string(), - )) - } -} - -/// Parse a root from a `0x` prefixed string. -/// -/// E.g., `"0x0000000000000000000000000000000000000000000000000000000000000000"` -pub fn parse_root(string: &str) -> Result { - const PREFIX: &str = "0x"; - - if string.starts_with(PREFIX) { - let trimmed = string.trim_start_matches(PREFIX); - trimmed - .parse() - .map_err(|e| ApiError::BadRequest(format!("Unable to parse root: {:?}", e))) - } else { - Err(ApiError::BadRequest( - "Root must have a 0x prefix".to_string(), - )) - } -} - -/// Parse a PublicKey from a `0x` prefixed hex string -pub fn parse_pubkey_bytes(string: &str) -> Result { - const PREFIX: &str = "0x"; - if string.starts_with(PREFIX) { - let pubkey_bytes = hex::decode(string.trim_start_matches(PREFIX)) - .map_err(|e| ApiError::BadRequest(format!("Invalid hex string: {:?}", e)))?; - let pubkey = PublicKeyBytes::deserialize(pubkey_bytes.as_slice()).map_err(|e| { - ApiError::BadRequest(format!("Unable to deserialize public key: {:?}.", e)) - })?; - Ok(pubkey) - } else { - Err(ApiError::BadRequest( - "Public key must have a 0x prefix".to_string(), - )) - } -} - -/// Returns the root of the `SignedBeaconBlock` in the canonical chain of `beacon_chain` at the given -/// `slot`, if possible. -/// -/// May return a root for a previous slot, in the case of skip slots. -pub fn block_root_at_slot( - beacon_chain: &BeaconChain, - target: Slot, -) -> Result, ApiError> { - Ok(process_results( - beacon_chain.rev_iter_block_roots()?, - |iter| { - iter.take_while(|(_, slot)| *slot >= target) - .find(|(_, slot)| *slot == target) - .map(|(root, _)| root) - }, - )?) -} - -/// Returns a `BeaconState` and it's root in the canonical chain of `beacon_chain` at the given -/// `slot`, if possible. -/// -/// Will not return a state if the request slot is in the future. Will return states higher than -/// the current head by skipping slots. -pub fn state_at_slot( - beacon_chain: &BeaconChain, - slot: Slot, -) -> Result<(Hash256, BeaconState), ApiError> { - let head = beacon_chain.head()?; - - if head.beacon_state.slot == slot { - Ok((head.beacon_state_root, head.beacon_state)) - } else { - let root = state_root_at_slot(beacon_chain, slot, StateSkipConfig::WithStateRoots)?; - - let state: BeaconState = beacon_chain - .store - .get_state(&root, Some(slot))? - .ok_or_else(|| ApiError::NotFound(format!("Unable to find state at root {}", root)))?; - - Ok((root, state)) - } -} - -/// Returns the root of the `BeaconState` in the canonical chain of `beacon_chain` at the given -/// `slot`, if possible. -/// -/// Will not return a state root if the request slot is in the future. Will return state roots -/// higher than the current head by skipping slots. -pub fn state_root_at_slot( - beacon_chain: &BeaconChain, - slot: Slot, - config: StateSkipConfig, -) -> Result { - let head_state = &beacon_chain.head()?.beacon_state; - let current_slot = beacon_chain - .slot() - .map_err(|_| ApiError::ServerError("Unable to read slot clock".to_string()))?; - - // There are four scenarios when obtaining a state for a given slot: - // - // 1. The request slot is in the future. - // 2. The request slot is the same as the best block (head) slot. - // 3. The request slot is prior to the head slot. - // 4. The request slot is later than the head slot. - if current_slot < slot { - // 1. The request slot is in the future. Reject the request. - // - // We could actually speculate about future state roots by skipping slots, however that's - // likely to cause confusion for API users. - Err(ApiError::BadRequest(format!( - "Requested slot {} is past the current slot {}", - slot, current_slot - ))) - } else if head_state.slot == slot { - // 2. The request slot is the same as the best block (head) slot. - // - // The head state root is stored in memory, return a reference. - Ok(beacon_chain.head()?.beacon_state_root) - } else if head_state.slot > slot { - // 3. The request slot is prior to the head slot. - // - // Iterate through the state roots on the head state to find the root for that - // slot. Once the root is found, load it from the database. - process_results( - head_state - .try_iter_ancestor_roots(beacon_chain.store.clone()) - .ok_or_else(|| { - ApiError::ServerError("Failed to create roots iterator".to_string()) - })?, - |mut iter| iter.find(|(_, s)| *s == slot).map(|(root, _)| root), - )? - .ok_or_else(|| ApiError::NotFound(format!("Unable to find state at slot {}", slot))) - } else { - // 4. The request slot is later than the head slot. - // - // Use `per_slot_processing` to advance the head state to the present slot, - // assuming that all slots do not contain a block (i.e., they are skipped slots). - let mut state = beacon_chain.head()?.beacon_state; - let spec = &T::EthSpec::default_spec(); - - let skip_state_root = match config { - StateSkipConfig::WithStateRoots => None, - StateSkipConfig::WithoutStateRoots => Some(Hash256::zero()), - }; - - for _ in state.slot.as_u64()..slot.as_u64() { - // Ensure the next epoch state caches are built in case of an epoch transition. - state.build_committee_cache(RelativeEpoch::Next, spec)?; - - state_processing::per_slot_processing(&mut state, skip_state_root, spec)?; - } - - // Note: this is an expensive operation. Once the tree hash cache is implement it may be - // used here. - Ok(state.canonical_root()) - } -} - -pub fn publish_beacon_block_to_network( - chan: &NetworkChannel, - block: SignedBeaconBlock, -) -> Result<(), ApiError> { - // send the block via SSZ encoding - let messages = vec![PubsubMessage::BeaconBlock(Box::new(block))]; - - // Publish the block to the p2p network via gossipsub. - if let Err(e) = chan.send(NetworkMessage::Publish { messages }) { - return Err(ApiError::ServerError(format!( - "Unable to send new block to network: {:?}", - e - ))); - } - - Ok(()) -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn parse_root_works() { - assert_eq!( - parse_root("0x0000000000000000000000000000000000000000000000000000000000000000"), - Ok(Hash256::zero()) - ); - assert_eq!( - parse_root("0x000000000000000000000000000000000000000000000000000000000000002a"), - Ok(Hash256::from_low_u64_be(42)) - ); - assert!( - parse_root("0000000000000000000000000000000000000000000000000000000000000042").is_err() - ); - assert!(parse_root("0x").is_err()); - assert!(parse_root("0x00").is_err()); - } - - #[test] - fn parse_slot_works() { - assert_eq!(parse_slot("0"), Ok(Slot::new(0))); - assert_eq!(parse_slot("42"), Ok(Slot::new(42))); - assert_eq!(parse_slot("10000000"), Ok(Slot::new(10_000_000))); - assert!(parse_slot("cats").is_err()); - } -} diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs deleted file mode 100644 index 405e08e217d..00000000000 --- a/beacon_node/rest_api/src/lib.rs +++ /dev/null @@ -1,127 +0,0 @@ -#[macro_use] -extern crate lazy_static; -mod router; -extern crate network as client_network; - -mod beacon; -pub mod config; -mod consensus; -mod helpers; -mod lighthouse; -mod metrics; -mod node; -mod url_query; -mod validator; - -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use bus::Bus; -use client_network::NetworkMessage; -pub use config::ApiEncodingFormat; -use eth2_config::Eth2Config; -use eth2_libp2p::NetworkGlobals; -use futures::future::TryFutureExt; -use hyper::server::conn::AddrStream; -use hyper::service::{make_service_fn, service_fn}; -use hyper::{Body, Request, Server}; -use parking_lot::Mutex; -use rest_types::ApiError; -use slog::{info, warn}; -use std::net::SocketAddr; -use std::path::PathBuf; -use std::sync::Arc; -use tokio::sync::mpsc; -use types::SignedBeaconBlockHash; -use url_query::UrlQuery; - -pub use crate::helpers::parse_pubkey_bytes; -pub use config::Config; -pub use router::Context; - -pub type NetworkChannel = mpsc::UnboundedSender>; - -pub struct NetworkInfo { - pub network_globals: Arc>, - pub network_chan: NetworkChannel, -} - -// Allowing more than 7 arguments. -#[allow(clippy::too_many_arguments)] -pub fn start_server( - executor: environment::TaskExecutor, - config: &Config, - beacon_chain: Arc>, - network_info: NetworkInfo, - db_path: PathBuf, - freezer_db_path: PathBuf, - eth2_config: Eth2Config, - events: Arc>>, -) -> Result { - let log = executor.log(); - let eth2_config = Arc::new(eth2_config); - - let context = Arc::new(Context { - executor: executor.clone(), - config: config.clone(), - beacon_chain, - network_globals: network_info.network_globals.clone(), - network_chan: network_info.network_chan, - eth2_config, - log: log.clone(), - db_path, - freezer_db_path, - events, - }); - - // Define the function that will build the request handler. - let make_service = make_service_fn(move |_socket: &AddrStream| { - let ctx = context.clone(); - - async move { - Ok::<_, hyper::Error>(service_fn(move |req: Request| { - router::on_http_request(req, ctx.clone()) - })) - } - }); - - let bind_addr = (config.listen_address, config.port).into(); - let server = Server::bind(&bind_addr).serve(make_service); - - // Determine the address the server is actually listening on. - // - // This may be different to `bind_addr` if bind port was 0 (this allows the OS to choose a free - // port). - let actual_listen_addr = server.local_addr(); - - // Build a channel to kill the HTTP server. - let exit = executor.exit(); - let inner_log = log.clone(); - let server_exit = async move { - let _ = exit.await; - info!(inner_log, "HTTP service shutdown"); - }; - - // Configure the `hyper` server to gracefully shutdown when the shutdown channel is triggered. - let inner_log = log.clone(); - let server_future = server - .with_graceful_shutdown(async { - server_exit.await; - }) - .map_err(move |e| { - warn!( - inner_log, - "HTTP server failed to start, Unable to bind"; "address" => format!("{:?}", e) - ) - }) - .unwrap_or_else(|_| ()); - - info!( - log, - "HTTP API started"; - "address" => format!("{}", actual_listen_addr.ip()), - "port" => actual_listen_addr.port(), - ); - - executor.spawn_without_exit(server_future, "http"); - - Ok(actual_listen_addr) -} diff --git a/beacon_node/rest_api/src/lighthouse.rs b/beacon_node/rest_api/src/lighthouse.rs deleted file mode 100644 index 4d0fae926df..00000000000 --- a/beacon_node/rest_api/src/lighthouse.rs +++ /dev/null @@ -1,48 +0,0 @@ -//! This contains a collection of lighthouse specific HTTP endpoints. - -use crate::{ApiError, Context}; -use beacon_chain::BeaconChainTypes; -use eth2_libp2p::PeerInfo; -use serde::Serialize; -use std::sync::Arc; -use types::EthSpec; - -/// Returns all known peers and corresponding information -pub fn peers(ctx: Arc>) -> Result>, ApiError> { - Ok(ctx - .network_globals - .peers - .read() - .peers() - .map(|(peer_id, peer_info)| Peer { - peer_id: peer_id.to_string(), - peer_info: peer_info.clone(), - }) - .collect()) -} - -/// Returns all known connected peers and their corresponding information -pub fn connected_peers( - ctx: Arc>, -) -> Result>, ApiError> { - Ok(ctx - .network_globals - .peers - .read() - .connected_peers() - .map(|(peer_id, peer_info)| Peer { - peer_id: peer_id.to_string(), - peer_info: peer_info.clone(), - }) - .collect()) -} - -/// Information returned by `peers` and `connected_peers`. -#[derive(Clone, Debug, Serialize)] -#[serde(bound = "T: EthSpec")] -pub struct Peer { - /// The Peer's ID - peer_id: String, - /// The PeerInfo associated with the peer. - peer_info: PeerInfo, -} diff --git a/beacon_node/rest_api/src/node.rs b/beacon_node/rest_api/src/node.rs deleted file mode 100644 index bd5615de34d..00000000000 --- a/beacon_node/rest_api/src/node.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::{ApiError, Context}; -use beacon_chain::BeaconChainTypes; -use eth2_libp2p::types::SyncState; -use rest_types::{SyncingResponse, SyncingStatus}; -use std::sync::Arc; -use types::Slot; - -/// Returns a syncing status. -pub fn syncing(ctx: Arc>) -> Result { - let current_slot = ctx - .beacon_chain - .head_info() - .map_err(|e| ApiError::ServerError(format!("Unable to read head slot: {:?}", e)))? - .slot; - - let (starting_slot, highest_slot) = match ctx.network_globals.sync_state() { - SyncState::SyncingFinalized { - start_slot, - head_slot, - .. - } - | SyncState::SyncingHead { - start_slot, - head_slot, - } => (start_slot, head_slot), - SyncState::Synced | SyncState::Stalled => (Slot::from(0u64), current_slot), - }; - - let sync_status = SyncingStatus { - starting_slot, - current_slot, - highest_slot, - }; - - Ok(SyncingResponse { - is_syncing: ctx.network_globals.is_syncing(), - sync_status, - }) -} diff --git a/beacon_node/rest_api/src/router.rs b/beacon_node/rest_api/src/router.rs deleted file mode 100644 index bed7ba77aa6..00000000000 --- a/beacon_node/rest_api/src/router.rs +++ /dev/null @@ -1,322 +0,0 @@ -use crate::{ - beacon, config::Config, consensus, lighthouse, metrics, node, validator, NetworkChannel, -}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use bus::Bus; -use environment::TaskExecutor; -use eth2_config::Eth2Config; -use eth2_libp2p::{NetworkGlobals, PeerId}; -use hyper::header::HeaderValue; -use hyper::{Body, Method, Request, Response}; -use lighthouse_version::version_with_platform; -use operation_pool::PersistedOperationPool; -use parking_lot::Mutex; -use rest_types::{ApiError, Handler, Health}; -use slog::debug; -use std::path::PathBuf; -use std::sync::Arc; -use std::time::Instant; -use types::{EthSpec, SignedBeaconBlockHash}; - -pub struct Context { - pub executor: TaskExecutor, - pub config: Config, - pub beacon_chain: Arc>, - pub network_globals: Arc>, - pub network_chan: NetworkChannel, - pub eth2_config: Arc, - pub log: slog::Logger, - pub db_path: PathBuf, - pub freezer_db_path: PathBuf, - pub events: Arc>>, -} - -pub async fn on_http_request( - req: Request, - ctx: Arc>, -) -> Result, ApiError> { - let path = req.uri().path().to_string(); - - let _timer = metrics::start_timer_vec(&metrics::BEACON_HTTP_API_TIMES_TOTAL, &[&path]); - metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_REQUESTS_TOTAL, &[&path]); - - let received_instant = Instant::now(); - let log = ctx.log.clone(); - let allow_origin = ctx.config.allow_origin.clone(); - - match route(req, ctx).await { - Ok(mut response) => { - metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_SUCCESS_TOTAL, &[&path]); - - if allow_origin != "" { - let headers = response.headers_mut(); - headers.insert( - hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, - HeaderValue::from_str(&allow_origin)?, - ); - headers.insert(hyper::header::VARY, HeaderValue::from_static("Origin")); - } - - debug!( - log, - "HTTP API request successful"; - "path" => path, - "duration_ms" => Instant::now().duration_since(received_instant).as_millis() - ); - Ok(response) - } - - Err(error) => { - metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_ERROR_TOTAL, &[&path]); - - debug!( - log, - "HTTP API request failure"; - "path" => path, - "duration_ms" => Instant::now().duration_since(received_instant).as_millis() - ); - Ok(error.into()) - } - } -} - -async fn route( - req: Request, - ctx: Arc>, -) -> Result, ApiError> { - let path = req.uri().path().to_string(); - let ctx = ctx.clone(); - let method = req.method().clone(); - let executor = ctx.executor.clone(); - let handler = Handler::new(req, ctx, executor)?; - - match (method, path.as_ref()) { - (Method::GET, "/node/version") => handler - .static_value(version_with_platform()) - .await? - .serde_encodings(), - (Method::GET, "/node/health") => handler - .static_value(Health::observe().map_err(ApiError::ServerError)?) - .await? - .serde_encodings(), - (Method::GET, "/node/syncing") => handler - .allow_body() - .in_blocking_task(|_, ctx| node::syncing(ctx)) - .await? - .serde_encodings(), - (Method::GET, "/network/enr") => handler - .in_core_task(|_, ctx| Ok(ctx.network_globals.local_enr().to_base64())) - .await? - .serde_encodings(), - (Method::GET, "/network/peer_count") => handler - .in_core_task(|_, ctx| Ok(ctx.network_globals.connected_peers())) - .await? - .serde_encodings(), - (Method::GET, "/network/peer_id") => handler - .in_core_task(|_, ctx| Ok(ctx.network_globals.local_peer_id().to_base58())) - .await? - .serde_encodings(), - (Method::GET, "/network/peers") => handler - .in_blocking_task(|_, ctx| { - Ok(ctx - .network_globals - .peers - .read() - .connected_peer_ids() - .map(PeerId::to_string) - .collect::>()) - }) - .await? - .serde_encodings(), - (Method::GET, "/network/listen_port") => handler - .in_core_task(|_, ctx| Ok(ctx.network_globals.listen_port_tcp())) - .await? - .serde_encodings(), - (Method::GET, "/network/listen_addresses") => handler - .in_blocking_task(|_, ctx| Ok(ctx.network_globals.listen_multiaddrs())) - .await? - .serde_encodings(), - (Method::GET, "/beacon/head") => handler - .in_blocking_task(|_, ctx| beacon::get_head(ctx)) - .await? - .all_encodings(), - (Method::GET, "/beacon/heads") => handler - .in_blocking_task(|_, ctx| Ok(beacon::get_heads(ctx))) - .await? - .all_encodings(), - (Method::GET, "/beacon/block") => handler - .in_blocking_task(beacon::get_block) - .await? - .all_encodings(), - (Method::GET, "/beacon/block_root") => handler - .in_blocking_task(beacon::get_block_root) - .await? - .all_encodings(), - (Method::GET, "/beacon/fork") => handler - .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.fork)) - .await? - .all_encodings(), - (Method::GET, "/beacon/fork/stream") => { - handler.sse_stream(|_, ctx| beacon::stream_forks(ctx)).await - } - (Method::GET, "/beacon/genesis_time") => handler - .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.genesis_time)) - .await? - .all_encodings(), - (Method::GET, "/beacon/genesis_validators_root") => handler - .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.genesis_validators_root)) - .await? - .all_encodings(), - (Method::GET, "/beacon/validators") => handler - .in_blocking_task(beacon::get_validators) - .await? - .all_encodings(), - (Method::POST, "/beacon/validators") => handler - .allow_body() - .in_blocking_task(beacon::post_validators) - .await? - .all_encodings(), - (Method::GET, "/beacon/validators/all") => handler - .in_blocking_task(beacon::get_all_validators) - .await? - .all_encodings(), - (Method::GET, "/beacon/validators/active") => handler - .in_blocking_task(beacon::get_active_validators) - .await? - .all_encodings(), - (Method::GET, "/beacon/state") => handler - .in_blocking_task(beacon::get_state) - .await? - .all_encodings(), - (Method::GET, "/beacon/state_root") => handler - .in_blocking_task(beacon::get_state_root) - .await? - .all_encodings(), - (Method::GET, "/beacon/state/genesis") => handler - .in_blocking_task(|_, ctx| beacon::get_genesis_state(ctx)) - .await? - .all_encodings(), - (Method::GET, "/beacon/committees") => handler - .in_blocking_task(beacon::get_committees) - .await? - .all_encodings(), - (Method::POST, "/beacon/proposer_slashing") => handler - .allow_body() - .in_blocking_task(beacon::proposer_slashing) - .await? - .serde_encodings(), - (Method::POST, "/beacon/attester_slashing") => handler - .allow_body() - .in_blocking_task(beacon::attester_slashing) - .await? - .serde_encodings(), - (Method::POST, "/validator/duties") => handler - .allow_body() - .in_blocking_task(validator::post_validator_duties) - .await? - .serde_encodings(), - (Method::POST, "/validator/subscribe") => handler - .allow_body() - .in_blocking_task(validator::post_validator_subscriptions) - .await? - .serde_encodings(), - (Method::GET, "/validator/duties/all") => handler - .in_blocking_task(validator::get_all_validator_duties) - .await? - .serde_encodings(), - (Method::GET, "/validator/duties/active") => handler - .in_blocking_task(validator::get_active_validator_duties) - .await? - .serde_encodings(), - (Method::GET, "/validator/block") => handler - .in_blocking_task(validator::get_new_beacon_block) - .await? - .serde_encodings(), - (Method::POST, "/validator/block") => handler - .allow_body() - .in_blocking_task(validator::publish_beacon_block) - .await? - .serde_encodings(), - (Method::GET, "/validator/attestation") => handler - .in_blocking_task(validator::get_new_attestation) - .await? - .serde_encodings(), - (Method::GET, "/validator/aggregate_attestation") => handler - .in_blocking_task(validator::get_aggregate_attestation) - .await? - .serde_encodings(), - (Method::POST, "/validator/attestations") => handler - .allow_body() - .in_blocking_task(validator::publish_attestations) - .await? - .serde_encodings(), - (Method::POST, "/validator/aggregate_and_proofs") => handler - .allow_body() - .in_blocking_task(validator::publish_aggregate_and_proofs) - .await? - .serde_encodings(), - (Method::GET, "/consensus/global_votes") => handler - .allow_body() - .in_blocking_task(consensus::get_vote_count) - .await? - .serde_encodings(), - (Method::POST, "/consensus/individual_votes") => handler - .allow_body() - .in_blocking_task(consensus::post_individual_votes) - .await? - .serde_encodings(), - (Method::GET, "/spec") => handler - // TODO: this clone is not ideal. - .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.spec.clone())) - .await? - .serde_encodings(), - (Method::GET, "/spec/slots_per_epoch") => handler - .static_value(T::EthSpec::slots_per_epoch()) - .await? - .serde_encodings(), - (Method::GET, "/spec/eth2_config") => handler - // TODO: this clone is not ideal. - .in_blocking_task(|_, ctx| Ok(ctx.eth2_config.as_ref().clone())) - .await? - .serde_encodings(), - (Method::GET, "/advanced/fork_choice") => handler - .in_blocking_task(|_, ctx| { - Ok(ctx - .beacon_chain - .fork_choice - .read() - .proto_array() - .core_proto_array() - .clone()) - }) - .await? - .serde_encodings(), - (Method::GET, "/advanced/operation_pool") => handler - .in_blocking_task(|_, ctx| { - Ok(PersistedOperationPool::from_operation_pool( - &ctx.beacon_chain.op_pool, - )) - }) - .await? - .serde_encodings(), - (Method::GET, "/metrics") => handler - .in_blocking_task(|_, ctx| metrics::get_prometheus(ctx)) - .await? - .text_encoding(), - (Method::GET, "/lighthouse/syncing") => handler - .in_blocking_task(|_, ctx| Ok(ctx.network_globals.sync_state())) - .await? - .serde_encodings(), - (Method::GET, "/lighthouse/peers") => handler - .in_blocking_task(|_, ctx| lighthouse::peers(ctx)) - .await? - .serde_encodings(), - (Method::GET, "/lighthouse/connected_peers") => handler - .in_blocking_task(|_, ctx| lighthouse::connected_peers(ctx)) - .await? - .serde_encodings(), - _ => Err(ApiError::NotFound( - "Request path and/or method not found.".to_owned(), - )), - } -} diff --git a/beacon_node/rest_api/src/url_query.rs b/beacon_node/rest_api/src/url_query.rs deleted file mode 100644 index fee0cf437e6..00000000000 --- a/beacon_node/rest_api/src/url_query.rs +++ /dev/null @@ -1,166 +0,0 @@ -use crate::helpers::{parse_committee_index, parse_epoch, parse_hex_ssz_bytes, parse_slot}; -use crate::ApiError; -use hyper::Request; -use types::{AttestationData, CommitteeIndex, Epoch, Signature, Slot}; - -/// Provides handy functions for parsing the query parameters of a URL. - -#[derive(Clone, Copy)] -pub struct UrlQuery<'a>(url::form_urlencoded::Parse<'a>); - -impl<'a> UrlQuery<'a> { - /// Instantiate from an existing `Request`. - /// - /// Returns `Err` if `req` does not contain any query parameters. - pub fn from_request(req: &'a Request) -> Result { - let query_str = req.uri().query().unwrap_or_else(|| ""); - - Ok(UrlQuery(url::form_urlencoded::parse(query_str.as_bytes()))) - } - - /// Returns the first `(key, value)` pair found where the `key` is in `keys`. - /// - /// If no match is found, an `InvalidQueryParams` error is returned. - pub fn first_of(mut self, keys: &[&str]) -> Result<(String, String), ApiError> { - self.0 - .find(|(key, _value)| keys.contains(&&**key)) - .map(|(key, value)| (key.into_owned(), value.into_owned())) - .ok_or_else(|| { - ApiError::BadRequest(format!( - "URL query must be valid and contain at least one of the following keys: {:?}", - keys - )) - }) - } - - /// Returns the first `(key, value)` pair found where the `key` is in `keys`, if any. - /// - /// Returns `None` if no match is found. - pub fn first_of_opt(mut self, keys: &[&str]) -> Option<(String, String)> { - self.0 - .find(|(key, _value)| keys.contains(&&**key)) - .map(|(key, value)| (key.into_owned(), value.into_owned())) - } - - /// Returns the value for `key`, if and only if `key` is the only key present in the query - /// parameters. - pub fn only_one(self, key: &str) -> Result { - let queries: Vec<_> = self - .0 - .map(|(k, v)| (k.into_owned(), v.into_owned())) - .collect(); - - if queries.len() == 1 { - let (first_key, first_value) = &queries[0]; // Must have 0 index if len is 1. - if first_key == key { - Ok(first_value.to_string()) - } else { - Err(ApiError::BadRequest(format!( - "Only the {} query parameter is supported", - key - ))) - } - } else { - Err(ApiError::BadRequest(format!( - "Only one query parameter is allowed, {} supplied", - queries.len() - ))) - } - } - - /// Returns a vector of all values present where `key` is in `keys - /// - /// If no match is found, an `InvalidQueryParams` error is returned. - pub fn all_of(self, key: &str) -> Result, ApiError> { - let queries: Vec<_> = self - .0 - .filter_map(|(k, v)| { - if k.eq(key) { - Some(v.into_owned()) - } else { - None - } - }) - .collect(); - Ok(queries) - } - - /// Returns the value of the first occurrence of the `epoch` key. - pub fn epoch(self) -> Result { - self.first_of(&["epoch"]) - .and_then(|(_key, value)| parse_epoch(&value)) - } - - /// Returns the value of the first occurrence of the `slot` key. - pub fn slot(self) -> Result { - self.first_of(&["slot"]) - .and_then(|(_key, value)| parse_slot(&value)) - } - - /// Returns the value of the first occurrence of the `committee_index` key. - pub fn committee_index(self) -> Result { - self.first_of(&["committee_index"]) - .and_then(|(_key, value)| parse_committee_index(&value)) - } - - /// Returns the value of the first occurrence of the `randao_reveal` key. - pub fn randao_reveal(self) -> Result { - self.first_of(&["randao_reveal"]) - .and_then(|(_key, value)| parse_hex_ssz_bytes(&value)) - } - - /// Returns the value of the first occurrence of the `attestation_data` key. - pub fn attestation_data(self) -> Result { - self.first_of(&["attestation_data"]) - .and_then(|(_key, value)| parse_hex_ssz_bytes(&value)) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn only_one() { - let get_result = |addr: &str, key: &str| -> Result { - UrlQuery(url::Url::parse(addr).unwrap().query_pairs()).only_one(key) - }; - - assert_eq!(get_result("http://cat.io/?a=42", "a"), Ok("42".to_string())); - assert!(get_result("http://cat.io/?a=42", "b").is_err()); - assert!(get_result("http://cat.io/?a=42&b=12", "a").is_err()); - assert!(get_result("http://cat.io/", "").is_err()); - } - - #[test] - fn first_of() { - let url = url::Url::parse("http://lighthouse.io/cats?a=42&b=12&c=100").unwrap(); - let get_query = || UrlQuery(url.query_pairs()); - - assert_eq!( - get_query().first_of(&["a"]), - Ok(("a".to_string(), "42".to_string())) - ); - assert_eq!( - get_query().first_of(&["a", "b", "c"]), - Ok(("a".to_string(), "42".to_string())) - ); - assert_eq!( - get_query().first_of(&["a", "a", "a"]), - Ok(("a".to_string(), "42".to_string())) - ); - assert_eq!( - get_query().first_of(&["a", "b", "c"]), - Ok(("a".to_string(), "42".to_string())) - ); - assert_eq!( - get_query().first_of(&["b", "c"]), - Ok(("b".to_string(), "12".to_string())) - ); - assert_eq!( - get_query().first_of(&["c"]), - Ok(("c".to_string(), "100".to_string())) - ); - assert!(get_query().first_of(&["nothing"]).is_err()); - } -} diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs deleted file mode 100644 index e1c3c37dbf7..00000000000 --- a/beacon_node/rest_api/src/validator.rs +++ /dev/null @@ -1,750 +0,0 @@ -use crate::helpers::{parse_hex_ssz_bytes, publish_beacon_block_to_network}; -use crate::{ApiError, Context, NetworkChannel, UrlQuery}; -use beacon_chain::{ - attestation_verification::Error as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, - BlockError, ForkChoiceError, StateSkipConfig, -}; -use bls::PublicKeyBytes; -use eth2_libp2p::PubsubMessage; -use hyper::Request; -use network::NetworkMessage; -use rest_types::{ValidatorDutiesRequest, ValidatorDutyBytes, ValidatorSubscription}; -use slog::{error, info, trace, warn, Logger}; -use std::sync::Arc; -use types::beacon_state::EthSpec; -use types::{ - Attestation, AttestationData, BeaconBlock, BeaconState, Epoch, RelativeEpoch, SelectionProof, - SignedAggregateAndProof, SignedBeaconBlock, SubnetId, -}; - -/// HTTP Handler to retrieve the duties for a set of validators during a particular epoch. This -/// method allows for collecting bulk sets of validator duties without risking exceeding the max -/// URL length with query pairs. -pub fn post_validator_duties( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let body = req.into_body(); - - serde_json::from_slice::(&body) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorDutiesRequest: {:?}", - e - )) - }) - .and_then(|bulk_request| { - return_validator_duties( - &ctx.beacon_chain.clone(), - bulk_request.epoch, - bulk_request.pubkeys.into_iter().map(Into::into).collect(), - ) - }) -} - -/// HTTP Handler to retrieve subscriptions for a set of validators. This allows the node to -/// organise peer discovery and topic subscription for known validators. -pub fn post_validator_subscriptions( - req: Request>, - ctx: Arc>, -) -> Result<(), ApiError> { - let body = req.into_body(); - - serde_json::from_slice(&body) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorSubscriptions: {:?}", - e - )) - }) - .and_then(move |subscriptions: Vec| { - ctx.network_chan - .send(NetworkMessage::Subscribe { subscriptions }) - .map_err(|e| { - ApiError::ServerError(format!( - "Unable to subscriptions to the network: {:?}", - e - )) - })?; - Ok(()) - }) -} - -/// HTTP Handler to retrieve all validator duties for the given epoch. -pub fn get_all_validator_duties( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let epoch = query.epoch()?; - - let state = get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; - - let validator_pubkeys = state - .validators - .iter() - .map(|validator| validator.pubkey.clone()) - .collect(); - - return_validator_duties(&ctx.beacon_chain, epoch, validator_pubkeys) -} - -/// HTTP Handler to retrieve all active validator duties for the given epoch. -pub fn get_active_validator_duties( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let epoch = query.epoch()?; - - let state = get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; - - let validator_pubkeys = state - .validators - .iter() - .filter(|validator| validator.is_active_at(state.current_epoch())) - .map(|validator| validator.pubkey.clone()) - .collect(); - - return_validator_duties(&ctx.beacon_chain, epoch, validator_pubkeys) -} - -/// Helper function to return the state that can be used to determine the duties for some `epoch`. -pub fn get_state_for_epoch( - beacon_chain: &BeaconChain, - epoch: Epoch, - config: StateSkipConfig, -) -> Result, ApiError> { - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let head = beacon_chain.head()?; - let current_epoch = beacon_chain.epoch()?; - let head_epoch = head.beacon_state.current_epoch(); - - if head_epoch == current_epoch && RelativeEpoch::from_epoch(current_epoch, epoch).is_ok() { - Ok(head.beacon_state) - } else { - // If epoch is ahead of current epoch, then it should be a "next epoch" request for - // attestation duties. So, go to the start slot of the epoch prior to that, - // which should be just the next wall-clock epoch. - let slot = if epoch > current_epoch { - (epoch - 1).start_slot(slots_per_epoch) - } - // Otherwise, go to the start of the request epoch. - else { - epoch.start_slot(slots_per_epoch) - }; - - beacon_chain.state_at_slot(slot, config).map_err(|e| { - ApiError::ServerError(format!("Unable to load state for epoch {}: {:?}", epoch, e)) - }) - } -} - -/// Helper function to get the duties for some `validator_pubkeys` in some `epoch`. -fn return_validator_duties( - beacon_chain: &BeaconChain, - epoch: Epoch, - validator_pubkeys: Vec, -) -> Result, ApiError> { - let mut state = get_state_for_epoch(&beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; - - let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch) - .map_err(|_| ApiError::ServerError(String::from("Loaded state is in the wrong epoch")))?; - - state - .build_committee_cache(relative_epoch, &beacon_chain.spec) - .map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?; - state - .update_pubkey_cache() - .map_err(|e| ApiError::ServerError(format!("Unable to build pubkey cache: {:?}", e)))?; - - // Get a list of all validators for this epoch. - // - // Used for quickly determining the slot for a proposer. - let validator_proposers = if epoch == state.current_epoch() { - Some( - epoch - .slot_iter(T::EthSpec::slots_per_epoch()) - .map(|slot| { - state - .get_beacon_proposer_index(slot, &beacon_chain.spec) - .map(|i| (i, slot)) - .map_err(|e| { - ApiError::ServerError(format!( - "Unable to get proposer index for validator: {:?}", - e - )) - }) - }) - .collect::, _>>()?, - ) - } else { - None - }; - - validator_pubkeys - .into_iter() - .map(|validator_pubkey| { - // The `beacon_chain` can return a validator index that does not exist in all states. - // Therefore, we must check to ensure that the validator index is valid for our - // `state`. - let validator_index = beacon_chain - .validator_index(&validator_pubkey) - .map_err(|e| { - ApiError::ServerError(format!("Unable to get validator index: {:?}", e)) - })? - .filter(|i| *i < state.validators.len()); - - if let Some(validator_index) = validator_index { - let duties = state - .get_attestation_duties(validator_index, relative_epoch) - .map_err(|e| { - ApiError::ServerError(format!( - "Unable to obtain attestation duties: {:?}", - e - )) - })?; - - let committee_count_at_slot = duties - .map(|d| state.get_committee_count_at_slot(d.slot)) - .transpose() - .map_err(|e| { - ApiError::ServerError(format!( - "Unable to find committee count at slot: {:?}", - e - )) - })?; - - let aggregator_modulo = duties - .map(|duties| SelectionProof::modulo(duties.committee_len, &beacon_chain.spec)) - .transpose() - .map_err(|e| { - ApiError::ServerError(format!("Unable to find modulo: {:?}", e)) - })?; - - let block_proposal_slots = validator_proposers.as_ref().map(|proposers| { - proposers - .iter() - .filter(|(i, _slot)| validator_index == *i) - .map(|(_i, slot)| *slot) - .collect() - }); - - Ok(ValidatorDutyBytes { - validator_pubkey, - validator_index: Some(validator_index as u64), - attestation_slot: duties.map(|d| d.slot), - attestation_committee_index: duties.map(|d| d.index), - committee_count_at_slot, - attestation_committee_position: duties.map(|d| d.committee_position), - block_proposal_slots, - aggregator_modulo, - }) - } else { - Ok(ValidatorDutyBytes { - validator_pubkey, - validator_index: None, - attestation_slot: None, - attestation_committee_index: None, - attestation_committee_position: None, - block_proposal_slots: None, - committee_count_at_slot: None, - aggregator_modulo: None, - }) - } - }) - .collect::, ApiError>>() -} - -/// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator. -pub fn get_new_beacon_block( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let slot = query.slot()?; - let randao_reveal = query.randao_reveal()?; - - let validator_graffiti = if let Some((_key, value)) = query.first_of_opt(&["graffiti"]) { - Some(parse_hex_ssz_bytes(&value)?) - } else { - None - }; - - let (new_block, _state) = ctx - .beacon_chain - .produce_block(randao_reveal, slot, validator_graffiti) - .map_err(|e| { - error!( - ctx.log, - "Error whilst producing block"; - "error" => format!("{:?}", e) - ); - - ApiError::ServerError(format!( - "Beacon node is not able to produce a block: {:?}", - e - )) - })?; - - Ok(new_block) -} - -/// HTTP Handler to publish a SignedBeaconBlock, which has been signed by a validator. -pub fn publish_beacon_block( - req: Request>, - ctx: Arc>, -) -> Result<(), ApiError> { - let body = req.into_body(); - - serde_json::from_slice(&body).map_err(|e| { - ApiError::BadRequest(format!("Unable to parse JSON into SignedBeaconBlock: {:?}", e)) - }) - .and_then(move |block: SignedBeaconBlock| { - let slot = block.slot(); - match ctx.beacon_chain.process_block(block.clone()) { - Ok(block_root) => { - // Block was processed, publish via gossipsub - info!( - ctx.log, - "Block from local validator"; - "block_root" => format!("{}", block_root), - "block_slot" => slot, - ); - - publish_beacon_block_to_network::(&ctx.network_chan, block)?; - - // Run the fork choice algorithm and enshrine a new canonical head, if - // found. - // - // The new head may or may not be the block we just received. - if let Err(e) = ctx.beacon_chain.fork_choice() { - error!( - ctx.log, - "Failed to find beacon chain head"; - "error" => format!("{:?}", e) - ); - } else { - // In the best case, validators should produce blocks that become the - // head. - // - // Potential reasons this may not be the case: - // - // - A quick re-org between block produce and publish. - // - Excessive time between block produce and publish. - // - A validator is using another beacon node to produce blocks and - // submitting them here. - if ctx.beacon_chain.head()?.beacon_block_root != block_root { - warn!( - ctx.log, - "Block from validator is not head"; - "desc" => "potential re-org", - ); - - } - } - - Ok(()) - } - Err(BlockError::BeaconChainError(e)) => { - error!( - ctx.log, - "Error whilst processing block"; - "error" => format!("{:?}", e) - ); - - Err(ApiError::ServerError(format!( - "Error while processing block: {:?}", - e - ))) - } - Err(other) => { - warn!( - ctx.log, - "Invalid block from local validator"; - "outcome" => format!("{:?}", other) - ); - - Err(ApiError::ProcessingError(format!( - "The SignedBeaconBlock could not be processed and has not been published: {:?}", - other - ))) - } - } - }) -} - -/// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. -pub fn get_new_attestation( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let slot = query.slot()?; - let index = query.committee_index()?; - - ctx.beacon_chain - .produce_unaggregated_attestation(slot, index) - .map_err(|e| ApiError::BadRequest(format!("Unable to produce attestation: {:?}", e))) -} - -/// HTTP Handler to retrieve the aggregate attestation for a slot -pub fn get_aggregate_attestation( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let attestation_data = query.attestation_data()?; - - match ctx - .beacon_chain - .get_aggregated_attestation(&attestation_data) - { - Ok(Some(attestation)) => Ok(attestation), - Ok(None) => Err(ApiError::NotFound(format!( - "No matching aggregate attestation for slot {:?} is known in slot {:?}", - attestation_data.slot, - ctx.beacon_chain.slot() - ))), - Err(e) => Err(ApiError::ServerError(format!( - "Unable to obtain attestation: {:?}", - e - ))), - } -} - -/// HTTP Handler to publish a list of Attestations, which have been signed by a number of validators. -pub fn publish_attestations( - req: Request>, - ctx: Arc>, -) -> Result<(), ApiError> { - let bytes = req.into_body(); - - serde_json::from_slice(&bytes) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to deserialize JSON into a list of attestations: {:?}", - e - )) - }) - // Process all of the aggregates _without_ exiting early if one fails. - .map( - move |attestations: Vec<(Attestation, SubnetId)>| { - attestations - .into_iter() - .enumerate() - .map(|(i, (attestation, subnet_id))| { - process_unaggregated_attestation( - &ctx.beacon_chain, - ctx.network_chan.clone(), - attestation, - subnet_id, - i, - &ctx.log, - ) - }) - .collect::>>() - }, - ) - // Iterate through all the results and return on the first `Err`. - // - // Note: this will only provide info about the _first_ failure, not all failures. - .and_then(|processing_results| processing_results.into_iter().try_for_each(|result| result)) - .map(|_| ()) -} - -/// Processes an unaggregrated attestation that was included in a list of attestations with the -/// index `i`. -#[allow(clippy::redundant_clone)] // false positives in this function. -fn process_unaggregated_attestation( - beacon_chain: &BeaconChain, - network_chan: NetworkChannel, - attestation: Attestation, - subnet_id: SubnetId, - i: usize, - log: &Logger, -) -> Result<(), ApiError> { - let data = &attestation.data.clone(); - - // Verify that the attestation is valid to included on the gossip network. - let verified_attestation = beacon_chain - .verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id) - .map_err(|e| { - handle_attestation_error( - e, - &format!("unaggregated attestation {} failed gossip verification", i), - data, - log, - ) - })?; - - // Publish the attestation to the network - if let Err(e) = network_chan.send(NetworkMessage::Publish { - messages: vec![PubsubMessage::Attestation(Box::new(( - subnet_id, - attestation, - )))], - }) { - return Err(ApiError::ServerError(format!( - "Unable to send unaggregated attestation {} to network: {:?}", - i, e - ))); - } - - beacon_chain - .apply_attestation_to_fork_choice(&verified_attestation) - .map_err(|e| { - handle_fork_choice_error( - e, - &format!( - "unaggregated attestation {} was unable to be added to fork choice", - i - ), - data, - log, - ) - })?; - - beacon_chain - .add_to_naive_aggregation_pool(verified_attestation) - .map_err(|e| { - handle_attestation_error( - e, - &format!( - "unaggregated attestation {} was unable to be added to aggregation pool", - i - ), - data, - log, - ) - })?; - - Ok(()) -} - -/// HTTP Handler to publish an Attestation, which has been signed by a validator. -pub fn publish_aggregate_and_proofs( - req: Request>, - ctx: Arc>, -) -> Result<(), ApiError> { - let body = req.into_body(); - - serde_json::from_slice(&body) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to deserialize JSON into a list of SignedAggregateAndProof: {:?}", - e - )) - }) - // Process all of the aggregates _without_ exiting early if one fails. - .map( - move |signed_aggregates: Vec>| { - signed_aggregates - .into_iter() - .enumerate() - .map(|(i, signed_aggregate)| { - process_aggregated_attestation( - &ctx.beacon_chain, - ctx.network_chan.clone(), - signed_aggregate, - i, - &ctx.log, - ) - }) - .collect::>>() - }, - ) - // Iterate through all the results and return on the first `Err`. - // - // Note: this will only provide info about the _first_ failure, not all failures. - .and_then(|processing_results| processing_results.into_iter().try_for_each(|result| result)) -} - -/// Processes an aggregrated attestation that was included in a list of attestations with the index -/// `i`. -#[allow(clippy::redundant_clone)] // false positives in this function. -fn process_aggregated_attestation( - beacon_chain: &BeaconChain, - network_chan: NetworkChannel, - signed_aggregate: SignedAggregateAndProof, - i: usize, - log: &Logger, -) -> Result<(), ApiError> { - let data = &signed_aggregate.message.aggregate.data.clone(); - - // Verify that the attestation is valid to be included on the gossip network. - // - // Using this gossip check for local validators is not necessarily ideal, there will be some - // attestations that we reject that could possibly be included in a block (e.g., attestations - // that late by more than 1 epoch but less than 2). We can come pick this back up if we notice - // that it's materially affecting validator profits. Until then, I'm hesitant to introduce yet - // _another_ attestation verification path. - let verified_attestation = - match beacon_chain.verify_aggregated_attestation_for_gossip(signed_aggregate.clone()) { - Ok(verified_attestation) => verified_attestation, - Err(AttnError::AttestationAlreadyKnown(attestation_root)) => { - trace!( - log, - "Ignored known attn from local validator"; - "attn_root" => format!("{}", attestation_root) - ); - - // Exit early with success for a known attestation, there's no need to re-process - // an aggregate we already know. - return Ok(()); - } - /* - * It's worth noting that we don't check for `Error::AggregatorAlreadyKnown` since (at - * the time of writing) we check for `AttestationAlreadyKnown` first. - * - * Given this, it's impossible to hit `Error::AggregatorAlreadyKnown` without that - * aggregator having already produced a conflicting aggregation. This is not slashable - * but I think it's still the sort of condition we should error on, at least for now. - */ - Err(e) => { - return Err(handle_attestation_error( - e, - &format!("aggregated attestation {} failed gossip verification", i), - data, - log, - )) - } - }; - - // Publish the attestation to the network - if let Err(e) = network_chan.send(NetworkMessage::Publish { - messages: vec![PubsubMessage::AggregateAndProofAttestation(Box::new( - signed_aggregate, - ))], - }) { - return Err(ApiError::ServerError(format!( - "Unable to send aggregated attestation {} to network: {:?}", - i, e - ))); - } - - beacon_chain - .apply_attestation_to_fork_choice(&verified_attestation) - .map_err(|e| { - handle_fork_choice_error( - e, - &format!( - "aggregated attestation {} was unable to be added to fork choice", - i - ), - data, - log, - ) - })?; - - beacon_chain - .add_to_block_inclusion_pool(verified_attestation) - .map_err(|e| { - handle_attestation_error( - e, - &format!( - "aggregated attestation {} was unable to be added to op pool", - i - ), - data, - log, - ) - })?; - - Ok(()) -} - -/// Common handler for `AttnError` during attestation verification. -fn handle_attestation_error( - e: AttnError, - detail: &str, - data: &AttestationData, - log: &Logger, -) -> ApiError { - match e { - AttnError::BeaconChainError(e) => { - error!( - log, - "Internal error verifying local attestation"; - "detail" => detail, - "error" => format!("{:?}", e), - "target" => data.target.epoch, - "source" => data.source.epoch, - "index" => data.index, - "slot" => data.slot, - ); - - ApiError::ServerError(format!( - "Internal error verifying local attestation. Error: {:?}. Detail: {}", - e, detail - )) - } - e => { - error!( - log, - "Invalid local attestation"; - "detail" => detail, - "reason" => format!("{:?}", e), - "target" => data.target.epoch, - "source" => data.source.epoch, - "index" => data.index, - "slot" => data.slot, - ); - - ApiError::ProcessingError(format!( - "Invalid local attestation. Error: {:?} Detail: {}", - e, detail - )) - } - } -} - -/// Common handler for `ForkChoiceError` during attestation verification. -fn handle_fork_choice_error( - e: BeaconChainError, - detail: &str, - data: &AttestationData, - log: &Logger, -) -> ApiError { - match e { - BeaconChainError::ForkChoiceError(ForkChoiceError::InvalidAttestation(e)) => { - error!( - log, - "Local attestation invalid for fork choice"; - "detail" => detail, - "reason" => format!("{:?}", e), - "target" => data.target.epoch, - "source" => data.source.epoch, - "index" => data.index, - "slot" => data.slot, - ); - - ApiError::ProcessingError(format!( - "Invalid local attestation. Error: {:?} Detail: {}", - e, detail - )) - } - e => { - error!( - log, - "Internal error applying attn to fork choice"; - "detail" => detail, - "error" => format!("{:?}", e), - "target" => data.target.epoch, - "source" => data.source.epoch, - "index" => data.index, - "slot" => data.slot, - ); - - ApiError::ServerError(format!( - "Internal error verifying local attestation. Error: {:?}. Detail: {}", - e, detail - )) - } - } -} diff --git a/beacon_node/rest_api/tests/test.rs b/beacon_node/rest_api/tests/test.rs deleted file mode 100644 index 160ee667ccf..00000000000 --- a/beacon_node/rest_api/tests/test.rs +++ /dev/null @@ -1,1345 +0,0 @@ -#![cfg(test)] - -#[macro_use] -extern crate assert_matches; - -use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig}; -use node_test_rig::{ - environment::{Environment, EnvironmentBuilder}, - testing_client_config, ClientConfig, ClientGenesis, LocalBeaconNode, -}; -use remote_beacon_node::{ - Committee, HeadBeaconBlock, PersistedOperationPool, PublishStatus, ValidatorResponse, -}; -use rest_types::ValidatorDutyBytes; -use std::convert::TryInto; -use std::sync::Arc; -use std::time::{SystemTime, UNIX_EPOCH}; -use types::{ - test_utils::{ - build_double_vote_attester_slashing, build_proposer_slashing, - generate_deterministic_keypair, AttesterSlashingTestTask, ProposerSlashingTestTask, - }, - BeaconBlock, BeaconState, ChainSpec, Domain, Epoch, EthSpec, MinimalEthSpec, PublicKey, - RelativeEpoch, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedRoot, Slot, - SubnetId, Validator, -}; - -type E = MinimalEthSpec; - -fn build_env() -> Environment { - EnvironmentBuilder::minimal() - .null_logger() - .expect("should build env logger") - .single_thread_tokio_runtime() - .expect("should start tokio runtime") - .build() - .expect("environment should build") -} - -fn build_node(env: &mut Environment, config: ClientConfig) -> LocalBeaconNode { - let context = env.core_context(); - env.runtime() - .block_on(LocalBeaconNode::production(context, config)) - .expect("should block until node created") -} - -/// Returns the randao reveal for the given slot (assuming the given `beacon_chain` uses -/// deterministic keypairs). -fn get_randao_reveal( - beacon_chain: Arc>, - slot: Slot, - spec: &ChainSpec, -) -> Signature { - let head = beacon_chain.head().expect("should get head"); - let fork = head.beacon_state.fork; - let genesis_validators_root = head.beacon_state.genesis_validators_root; - let proposer_index = beacon_chain - .block_proposer(slot) - .expect("should get proposer index"); - let keypair = generate_deterministic_keypair(proposer_index); - let epoch = slot.epoch(E::slots_per_epoch()); - let domain = spec.get_domain(epoch, Domain::Randao, &fork, genesis_validators_root); - let message = epoch.signing_root(domain); - keypair.sk.sign(message) -} - -/// Signs the given block (assuming the given `beacon_chain` uses deterministic keypairs). -fn sign_block( - beacon_chain: Arc>, - block: BeaconBlock, - spec: &ChainSpec, -) -> SignedBeaconBlock { - let head = beacon_chain.head().expect("should get head"); - let fork = head.beacon_state.fork; - let genesis_validators_root = head.beacon_state.genesis_validators_root; - let proposer_index = beacon_chain - .block_proposer(block.slot) - .expect("should get proposer index"); - let keypair = generate_deterministic_keypair(proposer_index); - block.sign(&keypair.sk, &fork, genesis_validators_root, spec) -} - -#[test] -fn validator_produce_attestation() { - let mut env = build_env(); - - let spec = &E::default_spec(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - let genesis_validators_root = beacon_chain.genesis_validators_root; - let state = beacon_chain.head().expect("should get head").beacon_state; - - // Find a validator that has duties in the current slot of the chain. - let mut validator_index = 0; - let duties = loop { - let duties = state - .get_attestation_duties(validator_index, RelativeEpoch::Current) - .expect("should have attestation duties cache") - .expect("should have attestation duties"); - - if duties.slot == node.client.beacon_chain().unwrap().slot().unwrap() { - break duties; - } else { - validator_index += 1 - } - }; - - let mut attestation = env - .runtime() - .block_on( - remote_node - .http - .validator() - .produce_attestation(duties.slot, duties.index), - ) - .expect("should fetch attestation from http api"); - - assert_eq!( - attestation.data.index, duties.index, - "should have same index" - ); - assert_eq!(attestation.data.slot, duties.slot, "should have same slot"); - assert_eq!( - attestation.aggregation_bits.num_set_bits(), - 0, - "should have empty aggregation bits" - ); - - let keypair = generate_deterministic_keypair(validator_index); - - // Fetch the duties again, but via HTTP for authenticity. - let duties = env - .runtime() - .block_on(remote_node.http.validator().get_duties( - attestation.data.slot.epoch(E::slots_per_epoch()), - &[keypair.pk.clone()], - )) - .expect("should fetch duties from http api"); - let duties = &duties[0]; - let committee_count = duties - .committee_count_at_slot - .expect("should have committee count"); - let subnet_id = SubnetId::compute_subnet::( - attestation.data.slot, - attestation.data.index, - committee_count, - spec, - ) - .unwrap(); - // Try publishing the attestation without a signature or a committee bit set, ensure it is - // raises an error. - let publish_status = env - .runtime() - .block_on( - remote_node - .http - .validator() - .publish_attestations(vec![(attestation.clone(), subnet_id)]), - ) - .expect("should publish unsigned attestation"); - assert!( - !publish_status.is_valid(), - "the unsigned published attestation should be invalid" - ); - - // Set the aggregation bit. - attestation - .aggregation_bits - .set( - duties - .attestation_committee_position - .expect("should have committee position"), - true, - ) - .expect("should set attestation bit"); - - // Try publishing with an aggreagation bit set, but an invalid signature. - let publish_status = env - .runtime() - .block_on( - remote_node - .http - .validator() - .publish_attestations(vec![(attestation.clone(), subnet_id)]), - ) - .expect("should publish attestation with invalid signature"); - assert!( - !publish_status.is_valid(), - "the unsigned published attestation should not be valid" - ); - - // Un-set the aggregation bit, so signing doesn't error. - attestation - .aggregation_bits - .set( - duties - .attestation_committee_position - .expect("should have committee position"), - false, - ) - .expect("should un-set attestation bit"); - - attestation - .sign( - &keypair.sk, - duties - .attestation_committee_position - .expect("should have committee position"), - &state.fork, - state.genesis_validators_root, - spec, - ) - .expect("should sign attestation"); - - // Try publishing the valid attestation. - let publish_status = env - .runtime() - .block_on( - remote_node - .http - .validator() - .publish_attestations(vec![(attestation.clone(), subnet_id)]), - ) - .expect("should publish attestation"); - assert!( - publish_status.is_valid(), - "the signed published attestation should be valid" - ); - - // Try obtaining an aggregated attestation with a matching attestation data to the previous - // one. - let aggregated_attestation = env - .runtime() - .block_on( - remote_node - .http - .validator() - .produce_aggregate_attestation(&attestation.data), - ) - .expect("should fetch aggregated attestation from http api"); - - let signed_aggregate_and_proof = SignedAggregateAndProof::from_aggregate( - validator_index as u64, - aggregated_attestation, - None, - &keypair.sk, - &state.fork, - genesis_validators_root, - spec, - ); - - // Publish the signed aggregate. - let publish_status = env - .runtime() - .block_on( - remote_node - .http - .validator() - .publish_aggregate_and_proof(vec![signed_aggregate_and_proof]), - ) - .expect("should publish aggregate and proof"); - assert!( - publish_status.is_valid(), - "the signed aggregate and proof should be valid" - ); -} - -#[test] -fn validator_duties() { - let mut env = build_env(); - - let spec = &E::default_spec(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - - let mut epoch = Epoch::new(0); - - let validators = beacon_chain - .head() - .expect("should get head") - .beacon_state - .validators - .iter() - .map(|v| (&v.pubkey).try_into().expect("pubkey should be valid")) - .collect::>(); - - let duties = env - .runtime() - .block_on(remote_node.http.validator().get_duties(epoch, &validators)) - .expect("should fetch duties from http api"); - - // 1. Check at the current epoch. - check_duties( - duties, - epoch, - validators.clone(), - beacon_chain.clone(), - spec, - ); - - epoch += 4; - let duties = env - .runtime() - .block_on(remote_node.http.validator().get_duties(epoch, &validators)) - .expect("should fetch duties from http api"); - - // 2. Check with a long skip forward. - check_duties(duties, epoch, validators, beacon_chain, spec); - - // TODO: test an epoch in the past. Blocked because the `LocalBeaconNode` cannot produce a - // chain, yet. -} - -fn check_duties( - duties: Vec, - epoch: Epoch, - validators: Vec, - beacon_chain: Arc>, - spec: &ChainSpec, -) { - assert_eq!( - validators.len(), - duties.len(), - "there should be a duty for each validator" - ); - - // Are the duties from the current epoch of the beacon chain, and thus are proposer indices - // known? - let proposers_known = epoch == beacon_chain.epoch().unwrap(); - - let mut state = beacon_chain - .state_at_slot( - epoch.start_slot(T::EthSpec::slots_per_epoch()), - StateSkipConfig::WithStateRoots, - ) - .expect("should get state at slot"); - - state.build_all_caches(spec).expect("should build caches"); - - validators - .iter() - .zip(duties.iter()) - .for_each(|(validator, duty)| { - assert_eq!( - *validator, - (&duty.validator_pubkey) - .try_into() - .expect("should be valid pubkey"), - "pubkey should match" - ); - - let validator_index = state - .get_validator_index(&validator.clone().into()) - .expect("should have pubkey cache") - .expect("pubkey should exist"); - - let attestation_duty = state - .get_attestation_duties(validator_index, RelativeEpoch::Current) - .expect("should have attestation duties cache") - .expect("should have attestation duties"); - - assert_eq!( - Some(attestation_duty.slot), - duty.attestation_slot, - "attestation slot should match" - ); - - assert_eq!( - Some(attestation_duty.index), - duty.attestation_committee_index, - "attestation index should match" - ); - - if proposers_known { - let block_proposal_slots = duty.block_proposal_slots.as_ref().unwrap(); - - if !block_proposal_slots.is_empty() { - for slot in block_proposal_slots { - let expected_proposer = state - .get_beacon_proposer_index(*slot, spec) - .expect("should know proposer"); - assert_eq!( - expected_proposer, validator_index, - "should get correct proposal slot" - ); - } - } else { - epoch.slot_iter(E::slots_per_epoch()).for_each(|slot| { - let slot_proposer = state - .get_beacon_proposer_index(slot, spec) - .expect("should know proposer"); - assert_ne!( - slot_proposer, validator_index, - "validator should not have proposal slot in this epoch" - ) - }) - } - } else { - assert_eq!(duty.block_proposal_slots, None); - } - }); - - if proposers_known { - // Validator duties should include a proposer for every slot of the epoch. - let mut all_proposer_slots: Vec = duties - .iter() - .flat_map(|duty| duty.block_proposal_slots.clone().unwrap()) - .collect(); - all_proposer_slots.sort(); - - let all_slots: Vec = epoch.slot_iter(E::slots_per_epoch()).collect(); - assert_eq!(all_proposer_slots, all_slots); - } -} - -#[test] -fn validator_block_post() { - let mut env = build_env(); - - let spec = &E::default_spec(); - - let two_slots_secs = (spec.milliseconds_per_slot / 1_000) * 2; - - let mut config = testing_client_config(); - config.genesis = ClientGenesis::Interop { - validator_count: 8, - genesis_time: SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs() - - two_slots_secs, - }; - - let node = build_node(&mut env, config); - let remote_node = node.remote_node().expect("should produce remote node"); - - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - - let slot = Slot::new(1); - let randao_reveal = get_randao_reveal(beacon_chain.clone(), slot, spec); - - let block = env - .runtime() - .block_on( - remote_node - .http - .validator() - .produce_block(slot, randao_reveal, None), - ) - .expect("should fetch block from http api"); - - // Try publishing the block without a signature, ensure it is flagged as invalid. - let empty_sig_block = SignedBeaconBlock { - message: block.clone(), - signature: Signature::empty(), - }; - let publish_status = env - .runtime() - .block_on(remote_node.http.validator().publish_block(empty_sig_block)) - .expect("should publish block"); - if cfg!(not(feature = "fake_crypto")) { - assert!( - !publish_status.is_valid(), - "the unsigned published block should not be valid" - ); - } - - let signed_block = sign_block(beacon_chain.clone(), block, spec); - let block_root = signed_block.canonical_root(); - - let publish_status = env - .runtime() - .block_on(remote_node.http.validator().publish_block(signed_block)) - .expect("should publish block"); - - if cfg!(not(feature = "fake_crypto")) { - assert_eq!( - publish_status, - PublishStatus::Valid, - "the signed published block should be valid" - ); - } - - let head = env - .runtime() - .block_on(remote_node.http.beacon().get_head()) - .expect("should get head"); - - assert_eq!( - head.block_root, block_root, - "the published block should become the head block" - ); - - // Note: this heads check is not super useful for this test, however it is include so it get - // _some_ testing. If you remove this call, make sure it's tested somewhere else. - let heads = env - .runtime() - .block_on(remote_node.http.beacon().get_heads()) - .expect("should get heads"); - - assert_eq!(heads.len(), 1, "there should be only one head"); - assert_eq!( - heads, - vec![HeadBeaconBlock { - beacon_block_root: head.block_root, - beacon_block_slot: head.slot, - }], - "there should be only one head" - ); -} - -#[test] -fn validator_block_get() { - let mut env = build_env(); - - let spec = &E::default_spec(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - - let slot = Slot::new(1); - let randao_reveal = get_randao_reveal(beacon_chain, slot, spec); - - let block = env - .runtime() - .block_on( - remote_node - .http - .validator() - .produce_block(slot, randao_reveal.clone(), None), - ) - .expect("should fetch block from http api"); - - let (expected_block, _state) = node - .client - .beacon_chain() - .expect("client should have beacon chain") - .produce_block(randao_reveal, slot, None) - .expect("should produce block"); - - assert_eq!( - block, expected_block, - "the block returned from the API should be as expected" - ); -} - -#[test] -fn validator_block_get_with_graffiti() { - let mut env = build_env(); - - let spec = &E::default_spec(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - - let slot = Slot::new(1); - let randao_reveal = get_randao_reveal(beacon_chain, slot, spec); - - let block = env - .runtime() - .block_on(remote_node.http.validator().produce_block( - slot, - randao_reveal.clone(), - Some(*b"test-graffiti-test-graffiti-test"), - )) - .expect("should fetch block from http api"); - - let (expected_block, _state) = node - .client - .beacon_chain() - .expect("client should have beacon chain") - .produce_block( - randao_reveal, - slot, - Some(*b"test-graffiti-test-graffiti-test"), - ) - .expect("should produce block"); - - assert_eq!( - block, expected_block, - "the block returned from the API should be as expected" - ); -} - -#[test] -fn beacon_state() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let (state_by_slot, root) = env - .runtime() - .block_on(remote_node.http.beacon().get_state_by_slot(Slot::new(0))) - .expect("should fetch state from http api"); - - let (state_by_root, root_2) = env - .runtime() - .block_on(remote_node.http.beacon().get_state_by_root(root)) - .expect("should fetch state from http api"); - - let mut db_state = node - .client - .beacon_chain() - .expect("client should have beacon chain") - .state_at_slot(Slot::new(0), StateSkipConfig::WithStateRoots) - .expect("should find state"); - db_state.drop_all_caches(); - - assert_eq!( - root, root_2, - "the two roots returned from the api should be identical" - ); - assert_eq!( - root, - db_state.canonical_root(), - "root from database should match that from the API" - ); - assert_eq!( - state_by_slot, db_state, - "genesis state by slot from api should match that from the DB" - ); - assert_eq!( - state_by_root, db_state, - "genesis state by root from api should match that from the DB" - ); -} - -#[test] -fn beacon_block() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let (block_by_slot, root) = env - .runtime() - .block_on(remote_node.http.beacon().get_block_by_slot(Slot::new(0))) - .expect("should fetch block from http api"); - - let (block_by_root, root_2) = env - .runtime() - .block_on(remote_node.http.beacon().get_block_by_root(root)) - .expect("should fetch block from http api"); - - let db_block = node - .client - .beacon_chain() - .expect("client should have beacon chain") - .block_at_slot(Slot::new(0)) - .expect("should find block") - .expect("block should not be none"); - - assert_eq!( - root, root_2, - "the two roots returned from the api should be identical" - ); - assert_eq!( - root, - db_block.canonical_root(), - "root from database should match that from the API" - ); - assert_eq!( - block_by_slot, db_block, - "genesis block by slot from api should match that from the DB" - ); - assert_eq!( - block_by_root, db_block, - "genesis block by root from api should match that from the DB" - ); -} - -#[test] -fn genesis_time() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let genesis_time = env - .runtime() - .block_on(remote_node.http.beacon().get_genesis_time()) - .expect("should fetch genesis time from http api"); - - assert_eq!( - node.client - .beacon_chain() - .expect("should have beacon chain") - .head() - .expect("should get head") - .beacon_state - .genesis_time, - genesis_time, - "should match genesis time from head state" - ); -} - -#[test] -fn genesis_validators_root() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let genesis_validators_root = env - .runtime() - .block_on(remote_node.http.beacon().get_genesis_validators_root()) - .expect("should fetch genesis time from http api"); - - assert_eq!( - node.client - .beacon_chain() - .expect("should have beacon chain") - .head() - .expect("should get head") - .beacon_state - .genesis_validators_root, - genesis_validators_root, - "should match genesis time from head state" - ); -} - -#[test] -fn fork() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let fork = env - .runtime() - .block_on(remote_node.http.beacon().get_fork()) - .expect("should fetch from http api"); - - assert_eq!( - node.client - .beacon_chain() - .expect("should have beacon chain") - .head() - .expect("should get head") - .beacon_state - .fork, - fork, - "should match head state" - ); -} - -#[test] -fn eth2_config() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let eth2_config = env - .runtime() - .block_on(remote_node.http.spec().get_eth2_config()) - .expect("should fetch eth2 config from http api"); - - // TODO: check the entire eth2_config, not just the spec. - - assert_eq!( - node.client - .beacon_chain() - .expect("should have beacon chain") - .spec, - eth2_config.spec, - "should match genesis time from head state" - ); -} - -#[test] -fn get_version() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let version = env - .runtime() - .block_on(remote_node.http.node().get_version()) - .expect("should fetch version from http api"); - - assert_eq!( - lighthouse_version::version_with_platform(), - version, - "result should be as expected" - ); -} - -#[test] -fn get_genesis_state_root() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let slot = Slot::new(0); - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_state_root(slot)) - .expect("should fetch from http api"); - - let expected = node - .client - .beacon_chain() - .expect("should have beacon chain") - .rev_iter_state_roots() - .expect("should get iter") - .map(Result::unwrap) - .find(|(_cur_root, cur_slot)| slot == *cur_slot) - .map(|(cur_root, _)| cur_root) - .expect("chain should have state root at slot"); - - assert_eq!(result, expected, "result should be as expected"); -} - -#[test] -fn get_genesis_block_root() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let slot = Slot::new(0); - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_block_root(slot)) - .expect("should fetch from http api"); - - let expected = node - .client - .beacon_chain() - .expect("should have beacon chain") - .rev_iter_block_roots() - .expect("should get iter") - .map(Result::unwrap) - .find(|(_cur_root, cur_slot)| slot == *cur_slot) - .map(|(cur_root, _)| cur_root) - .expect("chain should have state root at slot"); - - assert_eq!(result, expected, "result should be as expected"); -} - -#[test] -fn get_validators() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - let state = &chain.head().expect("should get head").beacon_state; - - let validators = state.validators.iter().take(2).collect::>(); - let pubkeys = validators - .iter() - .map(|v| (&v.pubkey).try_into().expect("should decode pubkey bytes")) - .collect(); - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_validators(pubkeys, None)) - .expect("should fetch from http api"); - - result - .iter() - .zip(validators.iter()) - .for_each(|(response, validator)| compare_validator_response(state, response, validator)); -} - -#[test] -fn get_all_validators() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - let state = &chain.head().expect("should get head").beacon_state; - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_all_validators(None)) - .expect("should fetch from http api"); - - result - .iter() - .zip(state.validators.iter()) - .for_each(|(response, validator)| compare_validator_response(state, response, validator)); -} - -#[test] -fn get_active_validators() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - let state = &chain.head().expect("should get head").beacon_state; - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_active_validators(None)) - .expect("should fetch from http api"); - - /* - * This test isn't comprehensive because all of the validators in the state are active (i.e., - * there is no one to exclude. - * - * This should be fixed once we can generate more interesting scenarios with the - * `NodeTestRig`. - */ - - let validators = state - .validators - .iter() - .filter(|validator| validator.is_active_at(state.current_epoch())); - - result - .iter() - .zip(validators) - .for_each(|(response, validator)| compare_validator_response(state, response, validator)); -} - -#[test] -fn get_committees() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - - let epoch = Epoch::new(0); - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_committees(epoch)) - .expect("should fetch from http api"); - - let expected = chain - .head() - .expect("should get head") - .beacon_state - .get_beacon_committees_at_epoch(RelativeEpoch::Current) - .expect("should get committees") - .iter() - .map(|c| Committee { - slot: c.slot, - index: c.index, - committee: c.committee.to_vec(), - }) - .collect::>(); - - assert_eq!(result, expected, "result should be as expected"); -} - -#[test] -fn get_fork_choice() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let fork_choice = env - .runtime() - .block_on(remote_node.http.advanced().get_fork_choice()) - .expect("should not error when getting fork choice"); - - assert_eq!( - fork_choice, - *node - .client - .beacon_chain() - .expect("node should have beacon chain") - .fork_choice - .read() - .proto_array() - .core_proto_array(), - "result should be as expected" - ); -} - -#[test] -fn get_operation_pool() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let result = env - .runtime() - .block_on(remote_node.http.advanced().get_operation_pool()) - .expect("should not error when getting fork choice"); - - let expected = PersistedOperationPool::from_operation_pool( - &node - .client - .beacon_chain() - .expect("node should have chain") - .op_pool, - ); - - assert_eq!(result, expected, "result should be as expected"); -} - -fn compare_validator_response( - state: &BeaconState, - response: &ValidatorResponse, - validator: &Validator, -) { - let response_validator = response.validator.clone().expect("should have validator"); - let i = response - .validator_index - .expect("should have validator index"); - let balance = response.balance.expect("should have balance"); - - assert_eq!(response.pubkey, validator.pubkey, "pubkey"); - assert_eq!(response_validator, *validator, "validator"); - assert_eq!(state.balances[i], balance, "balances"); - assert_eq!(state.validators[i], *validator, "validator index"); -} - -#[test] -fn proposer_slashing() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - - let state = chain - .head() - .expect("should have retrieved state") - .beacon_state; - - let spec = &chain.spec; - - // Check that there are no proposer slashings before insertion - let (proposer_slashings, _attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(proposer_slashings.len(), 0); - - let slot = state.slot; - let proposer_index = chain - .block_proposer(slot) - .expect("should get proposer index"); - let keypair = generate_deterministic_keypair(proposer_index); - let key = &keypair.sk; - let fork = &state.fork; - let proposer_slashing = build_proposer_slashing::( - ProposerSlashingTestTask::Valid, - proposer_index as u64, - &key, - fork, - state.genesis_validators_root, - spec, - ); - - let result = env - .runtime() - .block_on( - remote_node - .http - .beacon() - .proposer_slashing(proposer_slashing.clone()), - ) - .expect("should fetch from http api"); - assert!(result, true); - - // Length should be just one as we've inserted only one proposer slashing - let (proposer_slashings, _attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(proposer_slashings.len(), 1); - assert_eq!(proposer_slashing.clone(), proposer_slashings[0]); - - let mut invalid_proposer_slashing = build_proposer_slashing::( - ProposerSlashingTestTask::Valid, - proposer_index as u64, - &key, - fork, - state.genesis_validators_root, - spec, - ); - invalid_proposer_slashing.signed_header_2 = invalid_proposer_slashing.signed_header_1.clone(); - - let result = env.runtime().block_on( - remote_node - .http - .beacon() - .proposer_slashing(invalid_proposer_slashing), - ); - assert!(result.is_err()); - - // Length should still be one as we've inserted nothing since last time. - let (proposer_slashings, _attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(proposer_slashings.len(), 1); - assert_eq!(proposer_slashing, proposer_slashings[0]); -} - -#[test] -fn attester_slashing() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - - let state = chain - .head() - .expect("should have retrieved state") - .beacon_state; - let slot = state.slot; - let spec = &chain.spec; - - let proposer_index = chain - .block_proposer(slot) - .expect("should get proposer index"); - let keypair = generate_deterministic_keypair(proposer_index); - - let secret_keys = vec![&keypair.sk]; - let validator_indices = vec![proposer_index as u64]; - let fork = &state.fork; - - // Checking there are no attester slashings before insertion - let (_proposer_slashings, attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(attester_slashings.len(), 0); - - let attester_slashing = build_double_vote_attester_slashing( - AttesterSlashingTestTask::Valid, - &validator_indices[..], - &secret_keys[..], - fork, - state.genesis_validators_root, - spec, - ); - - let result = env - .runtime() - .block_on( - remote_node - .http - .beacon() - .attester_slashing(attester_slashing.clone()), - ) - .expect("should fetch from http api"); - assert!(result, true); - - // Length should be just one as we've inserted only one attester slashing - let (_proposer_slashings, attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(attester_slashings.len(), 1); - assert_eq!(attester_slashing, attester_slashings[0]); - - // Building an invalid attester slashing - let mut invalid_attester_slashing = build_double_vote_attester_slashing( - AttesterSlashingTestTask::Valid, - &validator_indices[..], - &secret_keys[..], - fork, - state.genesis_validators_root, - spec, - ); - invalid_attester_slashing.attestation_2 = invalid_attester_slashing.attestation_1.clone(); - - let result = env.runtime().block_on( - remote_node - .http - .beacon() - .attester_slashing(invalid_attester_slashing), - ); - result.unwrap_err(); - - // Length should still be one as we've failed to insert the attester slashing. - let (_proposer_slashings, attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(attester_slashings.len(), 1); - assert_eq!(attester_slashing, attester_slashings[0]); -} - -mod validator_attestation { - use super::*; - use http::StatusCode; - use node_test_rig::environment::Environment; - use remote_beacon_node::{Error::DidNotSucceed, HttpClient}; - use types::{Attestation, AttestationDuty, MinimalEthSpec}; - use url::Url; - - fn setup() -> ( - Environment, - LocalBeaconNode, - HttpClient, - Url, - AttestationDuty, - ) { - let mut env = build_env(); - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let client = remote_node.http.clone(); - let socket_addr = node - .client - .http_listen_addr() - .expect("A remote beacon node must have a http server"); - let url = Url::parse(&format!( - "http://{}:{}/validator/attestation", - socket_addr.ip(), - socket_addr.port() - )) - .expect("should be valid endpoint"); - - // Find a validator that has duties in the current slot of the chain. - let mut validator_index = 0; - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - let state = beacon_chain.head().expect("should get head").beacon_state; - let duties = loop { - let duties = state - .get_attestation_duties(validator_index, RelativeEpoch::Current) - .expect("should have attestation duties cache") - .expect("should have attestation duties"); - - if duties.slot == node.client.beacon_chain().unwrap().slot().unwrap() { - break duties; - } else { - validator_index += 1 - } - }; - - (env, node, client, url, duties) - } - - #[test] - fn requires_query_parameters() { - let (mut env, _node, client, url, _duties) = setup(); - - let attestation = env.runtime().block_on( - // query parameters are missing - client.json_get::>(url.clone(), vec![]), - ); - - assert_matches!( - attestation.expect_err("should not succeed"), - DidNotSucceed { status, body } => { - assert_eq!(status, StatusCode::BAD_REQUEST); - assert_eq!(body, "URL query must be valid and contain at least one of the following keys: [\"slot\"]".to_owned()); - } - ); - } - - #[test] - fn requires_slot() { - let (mut env, _node, client, url, duties) = setup(); - - let attestation = env.runtime().block_on( - // `slot` is missing - client.json_get::>( - url.clone(), - vec![("committee_index".into(), format!("{}", duties.index))], - ), - ); - - assert_matches!( - attestation.expect_err("should not succeed"), - DidNotSucceed { status, body } => { - assert_eq!(status, StatusCode::BAD_REQUEST); - assert_eq!(body, "URL query must be valid and contain at least one of the following keys: [\"slot\"]".to_owned()); - } - ); - } - - #[test] - fn requires_committee_index() { - let (mut env, _node, client, url, duties) = setup(); - - let attestation = env.runtime().block_on( - // `committee_index` is missing. - client.json_get::>( - url.clone(), - vec![("slot".into(), format!("{}", duties.slot))], - ), - ); - - assert_matches!( - attestation.expect_err("should not succeed"), - DidNotSucceed { status, body } => { - assert_eq!(status, StatusCode::BAD_REQUEST); - assert_eq!(body, "URL query must be valid and contain at least one of the following keys: [\"committee_index\"]".to_owned()); - } - ); - } -} - -#[cfg(target_os = "linux")] -#[test] -fn get_health() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - env.runtime() - .block_on(remote_node.http.node().get_health()) - .unwrap(); -} diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index ff64a600cda..a9df358bf08 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -142,7 +142,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("http") .long("http") - .help("Enable RESTful HTTP API server. Disabled by default.") + .help("Enable the RESTful HTTP API server. Disabled by default.") .takes_value(false), ) .arg( @@ -169,6 +169,38 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("") .takes_value(true), ) + /* Prometheus metrics HTTP server related arguments */ + .arg( + Arg::with_name("metrics") + .long("metrics") + .help("Enable the Prometheus metrics HTTP server. Disabled by default.") + .takes_value(false), + ) + .arg( + Arg::with_name("metrics-address") + .long("metrics-address") + .value_name("ADDRESS") + .help("Set the listen address for the Prometheus metrics HTTP server.") + .default_value("127.0.0.1") + .takes_value(true), + ) + .arg( + Arg::with_name("metrics-port") + .long("metrics-port") + .value_name("PORT") + .help("Set the listen TCP port for the Prometheus metrics HTTP server.") + .default_value("5054") + .takes_value(true), + ) + .arg( + Arg::with_name("metrics-allow-origin") + .long("metrics-allow-origin") + .value_name("ORIGIN") + .help("Set the value of the Access-Control-Allow-Origin response HTTP header for the Prometheus metrics HTTP server. \ + Use * to allow any origin (not recommended in production)") + .default_value("") + .takes_value(true), + ) /* Websocket related arguments */ .arg( Arg::with_name("ws") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index b33a140011e..53134a67e93 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -84,21 +84,21 @@ pub fn get_config( )?; /* - * Http server + * Http API server */ if cli_args.is_present("http") { - client_config.rest_api.enabled = true; + client_config.http_api.enabled = true; } if let Some(address) = cli_args.value_of("http-address") { - client_config.rest_api.listen_address = address + client_config.http_api.listen_addr = address .parse::() .map_err(|_| "http-address is not a valid IPv4 address.")?; } if let Some(port) = cli_args.value_of("http-port") { - client_config.rest_api.port = port + client_config.http_api.listen_port = port .parse::() .map_err(|_| "http-port is not a valid u16.")?; } @@ -109,7 +109,36 @@ pub fn get_config( hyper::header::HeaderValue::from_str(allow_origin) .map_err(|_| "Invalid allow-origin value")?; - client_config.rest_api.allow_origin = allow_origin.to_string(); + client_config.http_api.allow_origin = Some(allow_origin.to_string()); + } + + /* + * Prometheus metrics HTTP server + */ + + if cli_args.is_present("metrics") { + client_config.http_metrics.enabled = true; + } + + if let Some(address) = cli_args.value_of("metrics-address") { + client_config.http_metrics.listen_addr = address + .parse::() + .map_err(|_| "metrics-address is not a valid IPv4 address.")?; + } + + if let Some(port) = cli_args.value_of("metrics-port") { + client_config.http_metrics.listen_port = port + .parse::() + .map_err(|_| "metrics-port is not a valid u16.")?; + } + + if let Some(allow_origin) = cli_args.value_of("metrics-allow-origin") { + // Pre-validate the config value to give feedback to the user on node startup, instead of + // as late as when the first API response is produced. + hyper::header::HeaderValue::from_str(allow_origin) + .map_err(|_| "Invalid allow-origin value")?; + + client_config.http_metrics.allow_origin = Some(allow_origin.to_string()); } /* @@ -202,7 +231,8 @@ pub fn get_config( unused_port("tcp").map_err(|e| format!("Failed to get port for libp2p: {}", e))?; client_config.network.discovery_port = unused_port("udp").map_err(|e| format!("Failed to get port for discovery: {}", e))?; - client_config.rest_api.port = 0; + client_config.http_api.listen_port = 0; + client_config.http_metrics.listen_port = 0; client_config.websocket_server.port = 0; } @@ -213,6 +243,11 @@ pub fn get_config( client_config.eth1.deposit_contract_address = format!("{:?}", eth2_testnet_config.deposit_contract_address()?); + let spec_contract_address = format!("{:?}", spec.deposit_contract_address); + if client_config.eth1.deposit_contract_address != spec_contract_address { + return Err("Testnet contract address does not match spec".into()); + } + client_config.eth1.deposit_contract_deploy_block = eth2_testnet_config.deposit_contract_deploy_block; client_config.eth1.lowest_cached_block_number = @@ -248,7 +283,7 @@ pub fn get_config( }; let trimmed_graffiti_len = cmp::min(raw_graffiti.len(), GRAFFITI_BYTES_LEN); - client_config.graffiti[..trimmed_graffiti_len] + client_config.graffiti.0[..trimmed_graffiti_len] .copy_from_slice(&raw_graffiti[..trimmed_graffiti_len]); if let Some(max_skip_slots) = cli_args.value_of("max-skip-slots") { diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 19931916013..feff1e3206f 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -7,7 +7,7 @@ mod config; pub use beacon_chain; pub use cli::cli_app; pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; -pub use config::{get_data_dir, get_eth2_testnet_config, set_network_config}; +pub use config::{get_config, get_data_dir, get_eth2_testnet_config, set_network_config}; pub use eth2_config::Eth2Config; use beacon_chain::events::TeeEventHandler; @@ -17,7 +17,6 @@ use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, slot_clock::SystemTimeSlotClock, }; use clap::ArgMatches; -use config::get_config; use environment::RuntimeContext; use slog::{info, warn}; use std::ops::{Deref, DerefMut}; @@ -54,7 +53,7 @@ impl ProductionBeaconNode { /// configurations hosted remotely. pub async fn new_from_cli( context: RuntimeContext, - matches: &ArgMatches<'_>, + matches: ArgMatches<'static>, ) -> Result { let client_config = get_config::( &matches, @@ -72,7 +71,6 @@ impl ProductionBeaconNode { context: RuntimeContext, mut client_config: ClientConfig, ) -> Result { - let http_eth2_config = context.eth2_config().clone(); let spec = context.eth2_config().spec.clone(); let client_config_1 = client_config.clone(); let client_genesis = client_config.genesis.clone(); @@ -119,26 +117,22 @@ impl ProductionBeaconNode { builder.no_eth1_backend()? }; - let (builder, events) = builder + let (builder, _events) = builder .system_time_slot_clock()? .tee_event_handler(client_config.websocket_server.clone())?; // Inject the executor into the discv5 network config. client_config.network.discv5_config.executor = Some(Box::new(executor)); - let builder = builder + builder .build_beacon_chain()? .network(&client_config.network) .await? - .notifier()?; - - let builder = if client_config.rest_api.enabled { - builder.http_server(&client_config, &http_eth2_config, events)? - } else { - builder - }; - - Ok(Self(builder.build())) + .notifier()? + .http_api_config(client_config.http_api.clone()) + .http_metrics_config(client_config.http_metrics.clone()) + .build() + .map(Self) } pub fn into_inner(self) -> ProductionClient { diff --git a/beacon_node/tests/test.rs b/beacon_node/tests/test.rs index a845acf04df..7d860538f9b 100644 --- a/beacon_node/tests/test.rs +++ b/beacon_node/tests/test.rs @@ -3,6 +3,7 @@ use beacon_chain::StateSkipConfig; use node_test_rig::{ environment::{Environment, EnvironmentBuilder}, + eth2::types::StateId, testing_client_config, LocalBeaconNode, }; use types::{EthSpec, MinimalEthSpec, Slot}; @@ -34,10 +35,12 @@ fn http_server_genesis_state() { let node = build_node(&mut env); let remote_node = node.remote_node().expect("should produce remote node"); - let (api_state, _root) = env + let api_state = env .runtime() - .block_on(remote_node.http.beacon().get_state_by_slot(Slot::new(0))) - .expect("should fetch state from http api"); + .block_on(remote_node.get_debug_beacon_states(StateId::Slot(Slot::new(0)))) + .expect("should fetch state from http api") + .unwrap() + .data; let mut db_state = node .client diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 18e0ccad246..eb7e02a54a7 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -14,20 +14,15 @@ * [Key recovery](./key-recovery.md) * [Validator Management](./validator-management.md) * [Importing from the Eth2 Launchpad](./validator-import-launchpad.md) -* [Local Testnets](./local-testnets.md) -* [API](./api.md) - * [HTTP (RESTful JSON)](./http.md) - * [/node](./http/node.md) - * [/beacon](./http/beacon.md) - * [/validator](./http/validator.md) - * [/consensus](./http/consensus.md) - * [/network](./http/network.md) - * [/spec](./http/spec.md) - * [/advanced](./http/advanced.md) - * [/lighthouse](./http/lighthouse.md) - * [WebSocket](./websockets.md) +* [APIs](./api.md) + * [Beacon Node API](./api-bn.md) + * [/lighthouse](./api-lighthouse.md) + * [Validator Client API](./api-vc.md) * [Advanced Usage](./advanced.md) * [Database Configuration](./advanced_database.md) + * [Prometheus Metrics](./advanced_metrics.md) + * [Validator Inclusion APIs](./validator-inclusion.md) + * [Local Testnets](./local-testnets.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/advanced_metrics.md b/book/src/advanced_metrics.md new file mode 100644 index 00000000000..6c901862ee0 --- /dev/null +++ b/book/src/advanced_metrics.md @@ -0,0 +1,34 @@ +# Prometheus Metrics + +Lighthouse provides an extensive suite of metrics and monitoring in the +[Prometheus](https://prometheus.io/docs/introduction/overview/) export format +via a HTTP server built into Lighthouse. + +These metrics are generally consumed by a Prometheus server and displayed via a +Grafana dashboard. These components are available in a docker-compose format at +[sigp/lighthouse-metrics](https://github.com/sigp/lighthouse-metrics). + +## Beacon Node Metrics + +By default, these metrics are disabled but can be enabled with the `--metrics` +flag. Use the `--metrics-address`, `--metrics-port` and +`--metrics-allow-origin` flags to customize the metrics server. + +### Example + +Start a beacon node with the metrics server enabled: + +```bash +lighthouse bn --metrics +``` + +Check to ensure that the metrics are available on the default port: + +```bash +curl localhost:5054/metrics +``` + +## Validator Client Metrics + +The validator client does not *yet* expose metrics, however this functionality +is expected to be implemented in late-September 2020. diff --git a/book/src/api-bn.md b/book/src/api-bn.md new file mode 100644 index 00000000000..d957e43768e --- /dev/null +++ b/book/src/api-bn.md @@ -0,0 +1,130 @@ +# Beacon Node API + +Lighthouse implements the standard [Eth2 Beacon Node API +specification][OpenAPI]. Please follow that link for a full description of each API endpoint. + +> **Warning:** the standard API specification is still in flux and the Lighthouse implementation is partially incomplete. You can track the status of each endpoint at [#1434](https://github.com/sigp/lighthouse/issues/1434). + +## Starting the server + +A Lighthouse beacon node can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `127.0.0.1:5052`. + +The following CLI flags control the HTTP server: + +- `--http`: enable the HTTP server (required even if the following flags are + provided). +- `--http-port`: specify the listen port of the server. +- `--http-address`: specify the listen address of the server. +- `--http-allow-origin`: specify the value of the `Access-Control-Allow-Origin` + header. The default is to not supply a header. + +The schema of the API aligns with the standard Eth2 Beacon Node API as defined +at [github.com/ethereum/eth2.0-APIs](https://github.com/ethereum/eth2.0-APIs). +An interactive specification is available [here][OpenAPI]. + +### CLI Example + +Start the beacon node with the HTTP server listening on [http://localhost:5052](http://localhost:5052): + +```bash +lighthouse bn --http +``` + +## HTTP Request/Response Examples + +This section contains some simple examples of using the HTTP API via `curl`. +All endpoints are documented in the [Eth2 Beacon Node API +specification][OpenAPI]. + +### View the head of the beacon chain + +Returns the block header at the head of the canonical chain. + +```bash +curl -X GET "http://localhost:5052/eth/v1/beacon/headers/head" -H "accept: +application/json" +``` + +```json +{ + "data": { + "root": "0x4381454174fc28c7095077e959dcab407ae5717b5dca447e74c340c1b743d7b2", + "canonical": true, + "header": { + "message": { + "slot": 3199, + "proposer_index": "19077", + "parent_root": "0xf1934973041c5896d0d608e52847c3cd9a5f809c59c64e76f6020e3d7cd0c7cd", + "state_root": "0xe8e468f9f5961655dde91968f66480868dab8d4147de9498111df2b7e4e6fe60", + "body_root": "0x6f183abc6c4e97f832900b00d4e08d4373bfdc819055d76b0f4ff850f559b883" + }, + "signature": "0x988064a2f9cf13fe3aae051a3d85f6a4bca5a8ff6196f2f504e32f1203b549d5f86a39c6509f7113678880701b1881b50925a0417c1c88a750c8da7cd302dda5aabae4b941e3104d0cf19f5043c4f22a7d75d0d50dad5dbdaf6991381dc159ab" + } + } +} +``` + +### View the status of a validator + +Shows the status of validator at index `1` at the `head` state. + +```bash +curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json" +``` + +```json +{ + "data": { + "index": "1", + "balance": "63985937939", + "status": "Active", + "validator": { + "pubkey": "0x873e73ee8b3e4fcf1d2fb0f1036ba996ac9910b5b348f6438b5f8ef50857d4da9075d0218a9d1b99a9eae235a39703e1", + "withdrawal_credentials": "0x00b8cdcf79ba7e74300a07e9d8f8121dd0d8dd11dcfd6d3f2807c45b426ac968", + "effective_balance": 32000000000, + "slashed": false, + "activation_eligibility_epoch": 0, + "activation_epoch": 0, + "exit_epoch": 18446744073709552000, + "withdrawable_epoch": 18446744073709552000 + } + } +} +``` + +## Troubleshooting + +### HTTP API is unavailable or refusing connections + +Ensure the `--http` flag has been supplied at the CLI. + +You can quickly check that the HTTP endpoint is up using `curl`: + +```bash +curl -X GET "http://localhost:5052/eth/v1/node/version" -H "accept: application/json" +``` + +The beacon node should respond with its version: + +```json +{"data":{"version":"Lighthouse/v0.2.9-6f7b4768a/x86_64-linux"}} +``` + +If this doesn't work, the server might not be started or there might be a +network connection error. + +### I cannot query my node from a web browser (e.g., Swagger) + +By default, the API does not provide an `Access-Control-Allow-Origin` header, +which causes browsers to reject responses with a CORS error. + +The `--http-allow-origin` flag can be used to add a wild-card CORS header: + +```bash +lighthouse bn --http --http-allow-origin "*" +``` + +> **Warning:** Adding the wild-card allow-origin flag can pose a security risk. +> Only use it in production if you understand the risks of a loose CORS policy. + +[OpenAPI]: https://ethereum.github.io/eth2.0-APIs/#/ diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md new file mode 100644 index 00000000000..3f37673fa9d --- /dev/null +++ b/book/src/api-lighthouse.md @@ -0,0 +1,179 @@ +# Lighthouse Non-Standard APIs + +Lighthouse fully supports the standardization efforts at +[github.com/ethereum/eth2.0-APIs](https://github.com/ethereum/eth2.0-APIs), +however sometimes development requires additional endpoints that shouldn't +necessarily be defined as a broad-reaching standard. Such endpoints are placed +behind the `/lighthouse` path. + +The endpoints behind the `/lighthouse` path are: + +- Not intended to be stable. +- Not guaranteed to be safe. +- For testing and debugging purposes only. + +Although we don't recommend that users rely on these endpoints, we +document them briefly so they can be utilized by developers and +researchers. + +### `/lighthouse/health` + +*Presently only available on Linux.* + +```bash +curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/json" | jq +``` + +```json +{ + "data": { + "pid": 1728254, + "pid_num_threads": 47, + "pid_mem_resident_set_size": 510054400, + "pid_mem_virtual_memory_size": 3963158528, + "sys_virt_mem_total": 16715530240, + "sys_virt_mem_available": 4065374208, + "sys_virt_mem_used": 11383402496, + "sys_virt_mem_free": 1368662016, + "sys_virt_mem_percent": 75.67906, + "sys_loadavg_1": 4.92, + "sys_loadavg_5": 5.53, + "sys_loadavg_15": 5.58 + } +} +``` + +### `/lighthouse/syncing` + +```bash +curl -X GET "http://localhost:5052/lighthouse/syncing" -H "accept: application/json" | jq +``` + +```json +{ + "data": { + "SyncingFinalized": { + "start_slot": 3104, + "head_slot": 343744, + "head_root": "0x1b434b5ed702338df53eb5e3e24336a90373bb51f74b83af42840be7421dd2bf" + } + } +} +``` + +### `/lighthouse/peers` + +```bash +curl -X GET "http://localhost:5052/lighthouse/peers" -H "accept: application/json" | jq +``` + +```json +[ + { + "peer_id": "16Uiu2HAmA9xa11dtNv2z5fFbgF9hER3yq35qYNTPvN7TdAmvjqqv", + "peer_info": { + "_status": "Healthy", + "score": { + "score": 0 + }, + "client": { + "kind": "Lighthouse", + "version": "v0.2.9-1c9a055c", + "os_version": "aarch64-linux", + "protocol_version": "lighthouse/libp2p", + "agent_string": "Lighthouse/v0.2.9-1c9a055c/aarch64-linux" + }, + "connection_status": { + "status": "disconnected", + "connections_in": 0, + "connections_out": 0, + "last_seen": 1082, + "banned_ips": [] + }, + "listening_addresses": [ + "/ip4/80.109.35.174/tcp/9000", + "/ip4/127.0.0.1/tcp/9000", + "/ip4/192.168.0.73/tcp/9000", + "/ip4/172.17.0.1/tcp/9000", + "/ip6/::1/tcp/9000" + ], + "sync_status": { + "Advanced": { + "info": { + "status_head_slot": 343829, + "status_head_root": "0xe34e43efc2bb462d9f364bc90e1f7f0094e74310fd172af698b5a94193498871", + "status_finalized_epoch": 10742, + "status_finalized_root": "0x1b434b5ed702338df53eb5e3e24336a90373bb51f74b83af42840be7421dd2bf" + } + } + }, + "meta_data": { + "seq_number": 160, + "attnets": "0x0000000800000080" + } + } + } +] +``` + +### `/lighthouse/peers/connected` + +```bash +curl -X GET "http://localhost:5052/lighthouse/peers/connected" -H "accept: application/json" | jq +``` + +```json +[ + { + "peer_id": "16Uiu2HAkzJC5TqDSKuLgVUsV4dWat9Hr8EjNZUb6nzFb61mrfqBv", + "peer_info": { + "_status": "Healthy", + "score": { + "score": 0 + }, + "client": { + "kind": "Lighthouse", + "version": "v0.2.8-87181204+", + "os_version": "x86_64-linux", + "protocol_version": "lighthouse/libp2p", + "agent_string": "Lighthouse/v0.2.8-87181204+/x86_64-linux" + }, + "connection_status": { + "status": "connected", + "connections_in": 1, + "connections_out": 0, + "last_seen": 0, + "banned_ips": [] + }, + "listening_addresses": [ + "/ip4/34.204.178.218/tcp/9000", + "/ip4/127.0.0.1/tcp/9000", + "/ip4/172.31.67.58/tcp/9000", + "/ip4/172.17.0.1/tcp/9000", + "/ip6/::1/tcp/9000" + ], + "sync_status": "Unknown", + "meta_data": { + "seq_number": 1819, + "attnets": "0xffffffffffffffff" + } + } + } +] +``` + +### `/lighthouse/proto_array` + +```bash +curl -X GET "http://localhost:5052/lighthouse/proto_array" -H "accept: application/json" | jq +``` + +*Example omitted for brevity.* + +### `/lighthouse/validator_inclusion/{epoch}/{validator_id}` + +See [Validator Inclusion APIs](./validator-inclusion.md). + +### `/lighthouse/validator_inclusion/{epoch}/global` + +See [Validator Inclusion APIs](./validator-inclusion.md). diff --git a/book/src/api-vc.md b/book/src/api-vc.md new file mode 100644 index 00000000000..e120f69bf5c --- /dev/null +++ b/book/src/api-vc.md @@ -0,0 +1,3 @@ +# Validator Client API + +The validator client API is planned for release in late September 2020. diff --git a/book/src/api.md b/book/src/api.md index 0fa6c300129..15bfee37253 100644 --- a/book/src/api.md +++ b/book/src/api.md @@ -1,13 +1,9 @@ # APIs -The Lighthouse `beacon_node` provides two APIs for local consumption: +Lighthouse allows users to query the state of Eth2.0 using a web-standard, +RESTful HTTP/JSON APIs. -- A [RESTful JSON HTTP API](http.html) which provides beacon chain, node and network - information. -- A read-only [WebSocket API](websockets.html) providing beacon chain events, as they occur. +There are two APIs served by Lighthouse: - -## Security - -These endpoints are not designed to be exposed to the public Internet or -untrusted users. They may pose a considerable DoS attack vector when used improperly. +- [Beacon Node API](./api-bn.md) +- [Validator Client API](./api-vc.md) (not yet released). diff --git a/book/src/http.md b/book/src/http.md index e07440e8da8..700535c2ac2 100644 --- a/book/src/http.md +++ b/book/src/http.md @@ -1,5 +1,9 @@ # HTTP API +[OpenAPI Specification](https://ethereum.github.io/eth2.0-APIs/#/) + +## Beacon Node + A Lighthouse beacon node can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `localhost:5052`. The following CLI flags control the HTTP server: @@ -9,24 +13,10 @@ The following CLI flags control the HTTP server: - `--http-port`: specify the listen port of the server. - `--http-address`: specify the listen address of the server. -The API is logically divided into several core endpoints, each documented in -detail: - -Endpoint | Description | -| --- | -- | -[`/node`](./http/node.md) | General information about the beacon node. -[`/beacon`](./http/beacon.md) | General information about the beacon chain. -[`/validator`](./http/validator.md) | Provides functionality to validator clients. -[`/consensus`](./http/consensus.md) | Proof-of-stake voting statistics. -[`/network`](./http/network.md) | Information about the p2p network. -[`/spec`](./http/spec.md) | Information about the specs that the client is running. -[`/advanced`](./http/advanced.md) | Provides endpoints for advanced inspection of Lighthouse specific objects. -[`/lighthouse`](./http/lighthouse.md) | Provides lighthouse specific endpoints. - -_Please note: The OpenAPI format at -[SwaggerHub: Lighthouse REST -API](https://app.swaggerhub.com/apis-docs/spble/lighthouse_rest_api/0.2.0) has -been **deprecated**. This documentation is now the source of truth for the REST API._ +The schema of the API aligns with the standard Eth2 Beacon Node API as defined +at [github.com/ethereum/eth2.0-APIs](https://github.com/ethereum/eth2.0-APIs). +It is an easy-to-use RESTful HTTP/JSON API. An interactive specification is +available [here](https://ethereum.github.io/eth2.0-APIs/#/). ## Troubleshooting diff --git a/book/src/http/advanced.md b/book/src/http/advanced.md deleted file mode 100644 index 822b6ffffd6..00000000000 --- a/book/src/http/advanced.md +++ /dev/null @@ -1,115 +0,0 @@ -# Lighthouse REST API: `/advanced` - -The `/advanced` endpoints provide information Lighthouse specific data structures for advanced debugging. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/advanced/fork_choice`](#advancedfork_choice) | Get the `proto_array` fork choice object. -[`/advanced/operation_pool`](#advancedoperation_pool) | Get the Lighthouse `PersistedOperationPool` object. - - -## `/advanced/fork_choice` - -Requests the `proto_array` fork choice object as represented in Lighthouse. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/advanced/fork_choice` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "prune_threshold": 256, - "justified_epoch": 25, - "finalized_epoch": 24, - "nodes": [ - { - "slot": 544, - "root": "0x27103c56d4427cb4309dd202920ead6381d54d43277c29cf0572ddf0d528e6ea", - "parent": null, - "justified_epoch": 16, - "finalized_epoch": 15, - "weight": 256000000000, - "best_child": 1, - "best_descendant": 296 - }, - { - "slot": 545, - "root": "0x09af0e8d4e781ea4280c9c969d168839c564fab3a03942e7db0bfbede7d4c745", - "parent": 0, - "justified_epoch": 16, - "finalized_epoch": 15, - "weight": 256000000000, - "best_child": 2, - "best_descendant": 296 - }, - ], - "indices": { - "0xb935bb3651eeddcb2d2961bf307156850de982021087062033f02576d5df00a3": 59, - "0x8f4ec47a34c6c1d69ede64d27165d195f7e2a97c711808ce51f1071a6e12d5b9": 189, - "0xf675eba701ef77ee2803a130dda89c3c5673a604d2782c9e25ea2be300d7d2da": 173, - "0x488a483c8d5083faaf5f9535c051b9f373ba60d5a16e77ddb1775f248245b281": 37 - } -} -``` -_Truncated for brevity._ - -## `/advanced/operation_pool` - -Requests the `PersistedOperationPool` object as represented in Lighthouse. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/advanced/operation_pool` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "attestations": [ - [ - { - "v": [39, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 112, 118, 215, 252, 51, 186, 76, 156, 157, 99, 91, 4, 137, 195, 209, 224, 26, 233, 233, 184, 38, 89, 215, 177, 247, 97, 243, 119, 229, 69, 50, 90, 24, 0, 0, 0, 0, 0, 0, 0, 79, 37, 38, 210, 96, 235, 121, 142, 129, 136, 206, 214, 179, 132, 22, 19, 222, 213, 203, 46, 112, 192, 26, 5, 254, 26, 103, 170, 158, 205, 72, 3, 25, 0, 0, 0, 0, 0, 0, 0, 164, 50, 214, 67, 98, 13, 50, 180, 108, 232, 248, 109, 128, 45, 177, 23, 221, 24, 218, 211, 8, 152, 172, 120, 24, 86, 198, 103, 68, 164, 67, 202, 1, 0, 0, 0, 0, 0, 0, 0] - }, - [ - { - "aggregation_bits": "0x03", - "data": { - "slot": 807, - "index": 0, - "beacon_block_root": "0x7076d7fc33ba4c9c9d635b0489c3d1e01ae9e9b82659d7b1f761f377e545325a", - "source": { - "epoch": 24, - "root": "0x4f2526d260eb798e8188ced6b3841613ded5cb2e70c01a05fe1a67aa9ecd4803" - }, - "target": { - "epoch": 25, - "root": "0xa432d643620d32b46ce8f86d802db117dd18dad30898ac781856c66744a443ca" - } - }, - "signature": "0x8b1d624b0cd5a7a0e13944e90826878a230e3901db34ea87dbef5b145ade2fedbc830b6752a38a0937a1594211ab85b615d65f9eef0baccd270acca945786036695f4db969d9ff1693c505c0fe568b2fe9831ea78a74cbf7c945122231f04026" - } - ] - ] - ], - "attester_slashings": [], - "proposer_slashings": [], - "voluntary_exits": [] -} -``` -_Truncated for brevity._ diff --git a/book/src/http/beacon.md b/book/src/http/beacon.md deleted file mode 100644 index 2149f444448..00000000000 --- a/book/src/http/beacon.md +++ /dev/null @@ -1,784 +0,0 @@ -# Lighthouse REST API: `/beacon` - -The `/beacon` endpoints provide information about the canonical head of the -beacon chain and also historical information about beacon blocks and states. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/beacon/head`](#beaconhead) | Info about the block at the head of the chain. -[`/beacon/heads`](#beaconheads) | Returns a list of all known chain heads. -[`/beacon/block`](#beaconblock) | Get a `BeaconBlock` by slot or root. -[`/beacon/block_root`](#beaconblock_root) | Resolve a slot to a block root. -[`/beacon/fork`](#beaconfork) | Get the fork of the head of the chain. -[`/beacon/genesis_time`](#beacongenesis_time) | Get the genesis time from the beacon state. -[`/beacon/genesis_validators_root`](#beacongenesis_validators_root) | Get the genesis validators root. -[`/beacon/validators`](#beaconvalidators) | Query for one or more validators. -[`/beacon/validators/all`](#beaconvalidatorsall) | Get all validators. -[`/beacon/validators/active`](#beaconvalidatorsactive) | Get all active validators. -[`/beacon/state`](#beaconstate) | Get a `BeaconState` by slot or root. -[`/beacon/state_root`](#beaconstate_root) | Resolve a slot to a state root. -[`/beacon/state/genesis`](#beaconstategenesis) | Get a `BeaconState` at genesis. -[`/beacon/committees`](#beaconcommittees) | Get the shuffling for an epoch. -[`/beacon/proposer_slashing`](#beaconproposer_slashing) | Insert a proposer slashing -[`/beacon/attester_slashing`](#beaconattester_slashing) | Insert an attester slashing - -## `/beacon/head` - -Requests information about the head of the beacon chain, from the node's -perspective. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/head` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "slot": 37923, - "block_root": "0xe865d4805395a0776b8abe46d714a9e64914ab8dc5ff66624e5a1776bcc1684b", - "state_root": "0xe500e3567ab273c9a6f8a057440deff476ab236f0983da27f201ee9494a879f0", - "finalized_slot": 37856, - "finalized_block_root": "0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86", - "justified_slot": 37888, - "justified_block_root": "0x01c2f516a407d8fdda23cad4ed4381e4ab8913d638f935a2fe9bd00d6ced5ec4", - "previous_justified_slot": 37856, - "previous_justified_block_root": "0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86" -} -``` - -## `/beacon/heads` - -Returns the roots of all known head blocks. Only one of these roots is the -canonical head and that is decided by the fork choice algorithm. See [`/beacon/head`](#beaconhead) for the canonical head. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/heads` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ - { - "beacon_block_root": "0x226b2fd7c5f3d31dbb21444b96dfafe715f0017cd16545ecc4ffa87229496a69", - "beacon_block_slot": 38373 - }, - { - "beacon_block_root": "0x41ed5b253c4fc841cba8a6d44acbe101866bc674c3cfa3c4e9f7388f465aa15b", - "beacon_block_slot": 38375 - } -] -``` - -## `/beacon/block` - -Request that the node return a beacon chain block that matches the provided -criteria (a block `root` or beacon chain `slot`). Only one of the parameters -should be provided as a criteria. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/block` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `root` -Typical Responses | 200, 404 - -### Parameters - -Accepts **only one** of the following parameters: - -- `slot` (`Slot`): Query by slot number. Any block returned must be in the canonical chain (i.e., -either the head or an ancestor of the head). -- `root` (`Bytes32`): Query by tree hash root. A returned block is not required to be in the -canonical chain. - -### Returns - -Returns an object containing a single [`SignedBeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#signedbeaconblock) and the block root of the inner [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#beaconblock). - -### Example Response - -```json -{ - "root": "0xc35ddf4e71c31774e0594bd7eb32dfe50b54dbc40abd594944254b4ec8895196", - "beacon_block": { - "message": { - "slot": 0, - "proposer_index": 14, - "parent_root": "0x0000000000000000000000000000000000000000000000000000000000000000", - "state_root": "0xf15690b6be4ed42ea1ee0741eb4bfd4619d37be8229b84b4ddd480fb028dcc8f", - "body": { - "randao_reveal": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "eth1_data": { - "deposit_root": "0x0000000000000000000000000000000000000000000000000000000000000000", - "deposit_count": 0, - "block_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" - }, - "graffiti": "0x0000000000000000000000000000000000000000000000000000000000000000", - "proposer_slashings": [], - "attester_slashings": [], - "attestations": [], - "deposits": [], - "voluntary_exits": [] - } - }, - "signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" - } -} -``` - -## `/beacon/block_root` - -Returns the block root for the given slot in the canonical chain. If there -is a re-org, the same slot may return a different root. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/block_root` -Method | GET -JSON Encoding | Object -Query Parameters | `slot` -Typical Responses | 200, 404 - -## Parameters - -- `slot` (`Slot`): the slot to be resolved to a root. - -### Example Response - -```json -"0xc35ddf4e71c31774e0594bd7eb32dfe50b54dbc40abd594944254b4ec8895196" -``` - -## `/beacon/committees` - -Request the committees (a.k.a. "shuffling") for all slots and committee indices -in a given `epoch`. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/committees` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200/500 - -### Parameters - -The `epoch` (`Epoch`) query parameter is required and defines the epoch for -which the committees will be returned. All slots contained within the response will -be inside this epoch. - -### Returns - -A list of beacon committees. - -### Example Response - -```json -[ - { - "slot": 4768, - "index": 0, - "committee": [ - 1154, - 492, - 9667, - 3089, - 8987, - 1421, - 224, - 11243, - 2127, - 2329, - 188, - 482, - 486 - ] - }, - { - "slot": 4768, - "index": 1, - "committee": [ - 5929, - 8482, - 5528, - 6130, - 14343, - 9777, - 10808, - 12739, - 15234, - 12819, - 5423, - 6320, - 9991 - ] - } -] -``` - -_Truncated for brevity._ - -## `/beacon/fork` - -Request that the node return the `fork` of the current head. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/fork` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - - -### Returns - -Returns an object containing the [`Fork`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#fork) of the current head. - -### Example Response - -```json -{ - "previous_version": "0x00000000", - "current_version": "0x00000000", - "epoch": 0 -} -``` - -## `/beacon/genesis_time` - -Request that the node return the genesis time from the beacon state. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/genesis_time` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - - -### Returns - -Returns an object containing the genesis time. - -### Example Response - -```json -1581576353 -``` - -## `/beacon/genesis_validators_root` - -Request that the node return the genesis validators root from the beacon state. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/genesis_validators_root` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - - -### Returns - -Returns an object containing the genesis validators root. - -### Example Response - -```json -0x4fbf23439a7a9b9dd91650e64e8124012dde5e2ea2940c552b86f04eb47f95de -``` - -## `/beacon/validators` - -Request that the node returns information about one or more validator public -keys. This request takes the form of a `POST` request to allow sending a large -number of pubkeys in the request. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/validators` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Request Body - -Expects the following object in the POST request body: - -``` -{ - state_root: Bytes32, - pubkeys: [PublicKey] -} -``` - -The `state_root` field indicates which `BeaconState` should be used to collect -the information. The `state_root` is optional and omitting it will result in -the canonical head state being used. - - -### Returns - -Returns an object describing several aspects of the given validator. - -### Example - -### Request Body - -```json -{ - "pubkeys": [ - "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42" - ] -} -``` - -_Note: for demonstration purposes the second pubkey is some unknown pubkey._ - -### Response Body - -```json -[ - { - "pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "validator_index": 14935, - "balance": 3228885987, - "validator": { - "pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "withdrawal_credentials": "0x00b7bec22d5bda6b2cca1343d4f640d0e9ccc204a06a73703605c590d4c0d28e", - "effective_balance": 3200000000, - "slashed": false, - "activation_eligibility_epoch": 0, - "activation_epoch": 0, - "exit_epoch": 18446744073709551615, - "withdrawable_epoch": 18446744073709551615 - } - }, - { - "pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42", - "validator_index": null, - "balance": null, - "validator": null - } -] -``` - -## `/beacon/validators/all` - -Returns all validators. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/validators/all` -Method | GET -JSON Encoding | Object -Query Parameters | `state_root` (optional) -Typical Responses | 200 - -### Parameters - -The optional `state_root` (`Bytes32`) query parameter indicates which -`BeaconState` should be used to collect the information. When omitted, the -canonical head state will be used. - -### Returns - -The return format is identical to the [`/beacon/validators`](#beaconvalidators) response body. - - -## `/beacon/validators/active` - -Returns all validators that are active in the state defined by `state_root`. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/validators/active` -Method | GET -JSON Encoding | Object -Query Parameters | `state_root` (optional) -Typical Responses | 200 - -### Parameters - -The optional `state_root` (`Bytes32`) query parameter indicates which -`BeaconState` should be used to collect the information. When omitted, the -canonical head state will be used. - -### Returns - -The return format is identical to the [`/beacon/validators`](#beaconvalidators) response body. - - -## `/beacon/state` - -Request that the node return a beacon chain state that matches the provided -criteria (a state `root` or beacon chain `slot`). Only one of the parameters -should be provided as a criteria. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/state` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `root` -Typical Responses | 200, 404 - -### Parameters - -Accepts **only one** of the following parameters: - -- `slot` (`Slot`): Query by slot number. Any state returned must be in the canonical chain (i.e., -either the head or an ancestor of the head). -- `root` (`Bytes32`): Query by tree hash root. A returned state is not required to be in the -canonical chain. - -### Returns - -Returns an object containing a single -[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconstate) -and its tree hash root. - -### Example Response - -```json -{ - "root": "0x528e54ca5d4c957729a73f40fc513ae312e054c7295775c4a2b21f423416a72b", - "beacon_state": { - "genesis_time": 1575652800, - "genesis_validators_root": "0xa8a9226edee1b2627fb4117d7dea4996e64dec2998f37f6e824f74f2ce39a538", - "slot": 18478 - } -} -``` - -_Truncated for brevity._ - -## `/beacon/state_root` - -Returns the state root for the given slot in the canonical chain. If there -is a re-org, the same slot may return a different root. - - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/state_root` -Method | GET -JSON Encoding | Object -Query Parameters | `slot` -Typical Responses | 200, 404 - -## Parameters - -- `slot` (`Slot`): the slot to be resolved to a root. - -### Example Response - -```json -"0xf15690b6be4ed42ea1ee0741eb4bfd4619d37be8229b84b4ddd480fb028dcc8f" -``` - -## `/beacon/state/genesis` - -Request that the node return a beacon chain state at genesis (slot 0). - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/state/genesis` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - - -### Returns - -Returns an object containing the genesis -[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconstate). - -### Example Response - -```json -{ - "genesis_time": 1581576353, - "slot": 0, - "fork": { - "previous_version": "0x00000000", - "current_version": "0x00000000", - "epoch": 0 - }, -} -``` - -_Truncated for brevity._ - - -## `/beacon/state/committees` - -Request that the node return a beacon chain state at genesis (slot 0). - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/state/genesis` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200 - - -### Returns - -Returns an object containing the committees for a given epoch. - -### Example Response - -```json -[ - {"slot":64,"index":0,"committee":[]}, - {"slot":65,"index":0,"committee":[3]}, - {"slot":66,"index":0,"committee":[]}, - {"slot":67,"index":0,"committee":[14]}, - {"slot":68,"index":0,"committee":[]}, - {"slot":69,"index":0,"committee":[9]}, - {"slot":70,"index":0,"committee":[]}, - {"slot":71,"index":0,"committee":[11]}, - {"slot":72,"index":0,"committee":[]}, - {"slot":73,"index":0,"committee":[5]}, - {"slot":74,"index":0,"committee":[]}, - {"slot":75,"index":0,"committee":[15]}, - {"slot":76,"index":0,"committee":[]}, - {"slot":77,"index":0,"committee":[0]} -] -``` - -_Truncated for brevity._ - - -## `/beacon/attester_slashing` - -Accepts an `attester_slashing` and verifies it. If it is valid, it is added to the operations pool for potential inclusion in a future block. Returns a 400 error if the `attester_slashing` is invalid. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/attester_slashing` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/400 - -### Parameters - -Expects the following object in the POST request body: - -``` -{ - attestation_1: { - attesting_indices: [u64], - data: { - slot: Slot, - index: u64, - beacon_block_root: Bytes32, - source: { - epoch: Epoch, - root: Bytes32 - }, - target: { - epoch: Epoch, - root: Bytes32 - } - } - signature: Bytes32 - }, - attestation_2: { - attesting_indices: [u64], - data: { - slot: Slot, - index: u64, - beacon_block_root: Bytes32, - source: { - epoch: Epoch, - root: Bytes32 - }, - target: { - epoch: Epoch, - root: Bytes32 - } - } - signature: Bytes32 - } -} -``` - -### Returns - -Returns `true` if the attester slashing was inserted successfully, or the corresponding error if it failed. - -### Example - -### Request Body - -```json -{ - "attestation_1": { - "attesting_indices": [0], - "data": { - "slot": 1, - "index": 0, - "beacon_block_root": "0x0000000000000000000000000000000000000000000000000100000000000000", - "source": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000100000000000000" - }, - "target": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000100000000000000" - } - }, - "signature": "0xb47f7397cd944b8d5856a13352166bbe74c85625a45b14b7347fc2c9f6f6f82acee674c65bc9ceb576fcf78387a6731c0b0eb3f8371c70db2da4e7f5dfbc451730c159d67263d3db56b6d0e009e4287a8ba3efcacac30b3ae3447e89dc71b5b9" - }, - "attestation_2": { - "attesting_indices": [0], - "data": { - "slot": 1, - "index": 0, - "beacon_block_root": "0x0000000000000000000000000000000000000000000000000100000000000000", - "source": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000100000000000000" - }, - "target": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000200000000000000" - } - }, - "signature": "0x93fef587a63acf72aaf8df627718fd43cb268035764071f802ffb4370a2969d226595cc650f4c0bf2291ae0c0a41fcac1700f318603d75d34bcb4b9f4a8368f61eeea0e1f5d969d92d5073ba5fbadec102b45ec87d418d25168d2e3c74b9fcbb" - } -} -``` - -_Note: data sent here is for demonstration purposes only_ - - - -## `/beacon/proposer_slashing` - -Accepts a `proposer_slashing` and verifies it. If it is valid, it is added to the operations pool for potential inclusion in a future block. Returns an 400 error if the `proposer_slashing` is invalid. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/proposer_slashing` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/400 - -### Request Body - -Expects the following object in the POST request body: - -``` -{ - proposer_index: u64, - header_1: { - slot: Slot, - parent_root: Bytes32, - state_root: Bytes32, - body_root: Bytes32, - signature: Bytes32 - }, - header_2: { - slot: Slot, - parent_root: Bytes32, - state_root: Bytes32, - body_root: Bytes32, - signature: Bytes32 - } -} -``` - -### Returns - -Returns `true` if the proposer slashing was inserted successfully, or the corresponding error if it failed. - -### Example - -### Request Body - -```json -{ - "proposer_index": 0, - "header_1": { - "slot": 0, - "parent_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "state_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "body_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "signature": "0xb8970d1342c6d5779c700ec366efd0ca819937ca330960db3ca5a55eb370a3edd83f4cbb2f74d06e82f934fcbd4bb80609a19c2254cc8b3532a4efff9e80edf312ac735757c059d77126851e377f875593e64ba50d1dffe69a809a409202dd12" - }, - "header_2": { - "slot": 0, - "parent_root": "0x0202020202020202020202020202020202020202020202020202020202020202", - "state_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "body_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "signature": "0xb60e6b348698a34e59b22e0af96f8809f977f00f95d52375383ade8d22e9102270a66c6d52b0434214897e11ca4896871510c01b3fd74d62108a855658d5705fcfc4ced5136264a1c6496f05918576926aa191b1ad311b7e27f5aa2167aba294" - } -} -``` - -_Note: data sent here is for demonstration purposes only_ - - - - - diff --git a/book/src/http/lighthouse.md b/book/src/http/lighthouse.md deleted file mode 100644 index d80c0f694a5..00000000000 --- a/book/src/http/lighthouse.md +++ /dev/null @@ -1,182 +0,0 @@ -# Lighthouse REST API: `/lighthouse` - -The `/lighthouse` endpoints provide lighthouse-specific information about the beacon node. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/lighthouse/syncing`](#lighthousesyncing) | Get the node's syncing status -[`/lighthouse/peers`](#lighthousepeers) | Get the peers info known by the beacon node -[`/lighthouse/connected_peers`](#lighthousepeers) | Get the connected_peers known by the beacon node - -## `/lighthouse/syncing` - -Requests the syncing state of a Lighthouse beacon node. Lighthouse as a -custom sync protocol, this request gets Lighthouse-specific sync information. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/lighthouse/syncing` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -If the node is undergoing a finalization sync: -```json -{ - "SyncingFinalized": { - "start_slot": 10, - "head_slot": 20, - "head_root":"0x74020d0e3c3c02d2ea6279d5760f7d0dd376c4924beaaec4d5c0cefd1c0c4465" - } -} -``` - -If the node is undergoing a head chain sync: -```json -{ - "SyncingHead": { - "start_slot":0, - "head_slot":1195 - } -} -``` - -If the node is synced -```json -{ -"Synced" -} -``` - -## `/lighthouse/peers` - -Get all known peers info from the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/lighthouse/peers` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ -{ - "peer_id" : "16Uiu2HAmTEinipUS3haxqucrn7d7SmCKx5XzAVbAZCiNW54ncynG", - "peer_info" : { - "_status" : "Healthy", - "client" : { - "agent_string" : "github.com/libp2p/go-libp2p", - "kind" : "Prysm", - "os_version" : "unknown", - "protocol_version" : "ipfs/0.1.0", - "version" : "unknown" - }, - "connection_status" : { - "Disconnected" : { - "since" : 3 - } - }, - "listening_addresses" : [ - "/ip4/10.3.58.241/tcp/9001", - "/ip4/35.172.14.146/tcp/9001", - "/ip4/35.172.14.146/tcp/9001" - ], - "meta_data" : { - "attnets" : "0x0000000000000000", - "seq_number" : 0 - }, - "reputation" : 20, - "sync_status" : { - "Synced" : { - "status_head_slot" : 18146 - } - } - } - }, - { - "peer_id" : "16Uiu2HAm8XZfPv3YjktCjitSRtfS7UfHfEvpiUyHrdiX6uAD55xZ", - "peer_info" : { - "_status" : "Healthy", - "client" : { - "agent_string" : null, - "kind" : "Unknown", - "os_version" : "unknown", - "protocol_version" : "unknown", - "version" : "unknown" - }, - "connection_status" : { - "Disconnected" : { - "since" : 5 - } - }, - "listening_addresses" : [], - "meta_data" : { - "attnets" : "0x0900000000000000", - "seq_number" : 0 - }, - "reputation" : 20, - "sync_status" : "Unknown" - } - }, -] -``` - -## `/lighthouse/connected_peers` - -Get all known peers info from the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/lighthouse/connected_peers` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ - { - "peer_id" : "16Uiu2HAm8XZfPv3YjktCjitSRtfS7UfHfEvpiUyHrdiX6uAD55xZ", - "peer_info" : { - "_status" : "Healthy", - "client" : { - "agent_string" : null, - "kind" : "Unknown", - "os_version" : "unknown", - "protocol_version" : "unknown", - "version" : "unknown" - }, - "connection_status" : { - "Connected" : { - "in" : 5, - "out" : 2 - } - }, - "listening_addresses" : [], - "meta_data" : { - "attnets" : "0x0900000000000000", - "seq_number" : 0 - }, - "reputation" : 20, - "sync_status" : "Unknown" - } - }, - ] -``` diff --git a/book/src/http/network.md b/book/src/http/network.md deleted file mode 100644 index 2ac0c83ba49..00000000000 --- a/book/src/http/network.md +++ /dev/null @@ -1,148 +0,0 @@ -# Lighthouse REST API: `/network` - -The `/network` endpoints provide information about the p2p network that -Lighthouse uses to communicate with other beacon nodes. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/network/enr`](#networkenr) | Get the local node's `ENR` as base64 . -[`/network/peer_count`](#networkpeer_count) | Get the count of connected peers. -[`/network/peer_id`](#networkpeer_id) | Get a node's libp2p `PeerId`. -[`/network/peers`](#networkpeers) | List a node's connected peers (as `PeerIds`). -[`/network/listen_port`](#networklisten_port) | Get a node's libp2p listening port. -[`/network/listen_addresses`](#networklisten_addresses) | Get a list of libp2p multiaddr the node is listening on. - -## `network/enr` - -Requests the beacon node for its listening `ENR` address. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/enr` -Method | GET -JSON Encoding | String (base64) -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -"-IW4QPYyGkXJSuJ2Eji8b-m4PTNrW4YMdBsNOBrYAdCk8NLMJcddAiQlpcv6G_hdNjiLACOPTkqTBhUjnC0wtIIhyQkEgmlwhKwqAPqDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhA1sBKo0yCfw4Z_jbggwflNfftjwKACu-a-CoFAQHJnrm" -``` - -## `/network/peer_count` - -Requests the count of peers connected to the client. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/peer_count` -Method | GET -JSON Encoding | Number -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -5 -``` -## `/network/peer_id` - -Requests the beacon node's local `PeerId`. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/peer_id` -Method | GET -JSON Encoding | String (base58) -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -"QmVFcULBYZecPdCKgGmpEYDqJLqvMecfhJadVBtB371Avd" -``` - -## `/network/peers` - -Requests one `MultiAddr` for each peer connected to the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/peers` -Method | GET -JSON Encoding | [String] (base58) -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ - "QmaPGeXcfKFMU13d8VgbnnpeTxcvoFoD9bUpnRGMUJ1L9w", - "QmZt47cP8V96MgiS35WzHKpPbKVBMqr1eoBNTLhQPqpP3m" -] -``` - - -## `/network/listen_port` - -Requests the TCP port that the client's libp2p service is listening on. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/listen_port` -Method | GET -JSON Encoding | Number -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -9000 -``` - -## `/network/listen_addresses` - -Requests the list of multiaddr that the client's libp2p service is listening on. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/listen_addresses` -Method | GET -JSON Encoding | Array -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ - "/ip4/127.0.0.1/tcp/9000", - "/ip4/192.168.31.115/tcp/9000", - "/ip4/172.24.0.1/tcp/9000", - "/ip4/172.21.0.1/tcp/9000", - "/ip4/172.17.0.1/tcp/9000", - "/ip4/172.18.0.1/tcp/9000", - "/ip4/172.19.0.1/tcp/9000", - "/ip4/172.42.0.1/tcp/9000", - "/ip6/::1/tcp/9000" -] -``` diff --git a/book/src/http/node.md b/book/src/http/node.md deleted file mode 100644 index ae370cbe981..00000000000 --- a/book/src/http/node.md +++ /dev/null @@ -1,91 +0,0 @@ -# Lighthouse REST API: `/node` - -The `/node` endpoints provide information about the lighthouse beacon node. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/node/version`](#nodeversion) | Get the node's version. -[`/node/syncing`](#nodesyncing) | Get the node's syncing status. -[`/node/health`](#nodehealth) | Get the node's health. - -## `/node/version` - -Requests the beacon node's version. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/node/version` -Method | GET -JSON Encoding | String -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -"Lighthouse-0.2.0-unstable" -``` - -## `/node/syncing` - -Requests the syncing status of the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/node/syncing` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - is_syncing: true, - sync_status: { - starting_slot: 0, - current_slot: 100, - highest_slot: 200, - } -} -``` - -## `/node/health` - -Requests information about the health of the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/node/health` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "pid": 96160, - "pid_num_threads": 30, - "pid_mem_resident_set_size": 55476224, - "pid_mem_virtual_memory_size": 2081382400, - "sys_virt_mem_total": 16721076224, - "sys_virt_mem_available": 7423197184, - "sys_virt_mem_used": 8450183168, - "sys_virt_mem_free": 3496345600, - "sys_virt_mem_percent": 55.605743, - "sys_loadavg_1": 1.56, - "sys_loadavg_5": 2.61, - "sys_loadavg_15": 2.43 -} -``` diff --git a/book/src/http/spec.md b/book/src/http/spec.md deleted file mode 100644 index 619a1d4e362..00000000000 --- a/book/src/http/spec.md +++ /dev/null @@ -1,154 +0,0 @@ -# Lighthouse REST API: `/spec` - -The `/spec` endpoints provide information about Eth2.0 specifications that the node is running. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/spec`](#spec) | Get the full spec object that a node's running. -[`/spec/slots_per_epoch`](#specslots_per_epoch) | Get the number of slots per epoch. -[`/spec/eth2_config`](#specseth2_config) | Get the full Eth2 config object. - -## `/spec` - -Requests the full spec object that a node's running. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/spec` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "genesis_slot": 0, - "base_rewards_per_epoch": 4, - "deposit_contract_tree_depth": 32, - "max_committees_per_slot": 64, - "target_committee_size": 128, - "min_per_epoch_churn_limit": 4, - "churn_limit_quotient": 65536, - "shuffle_round_count": 90, - "min_genesis_active_validator_count": 16384, - "min_genesis_time": 1578009600, - "min_deposit_amount": 1000000000, - "max_effective_balance": 32000000000, - "ejection_balance": 16000000000, - "effective_balance_increment": 1000000000, - "genesis_fork_version": "0x00000000", - "bls_withdrawal_prefix_byte": "0x00", - "genesis_delay": 172800, - "milliseconds_per_slot": 12000, - "min_attestation_inclusion_delay": 1, - "min_seed_lookahead": 1, - "max_seed_lookahead": 4, - "min_epochs_to_inactivity_penalty": 4, - "min_validator_withdrawability_delay": 256, - "shard_committee_period": 2048, - "base_reward_factor": 64, - "whistleblower_reward_quotient": 512, - "proposer_reward_quotient": 8, - "inactivity_penalty_quotient": 33554432, - "min_slashing_penalty_quotient": 32, - "domain_beacon_proposer": 0, - "domain_beacon_attester": 1, - "domain_randao": 2, - "domain_deposit": 3, - "domain_voluntary_exit": 4, - "safe_slots_to_update_justified": 8, - "eth1_follow_distance": 1024, - "seconds_per_eth1_block": 14, - "boot_nodes": [], - "network_id": 1 -} -``` - -## `/spec/eth2_config` - -Requests the full `Eth2Config` object. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/spec/eth2_config` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "spec_constants": "mainnet", - "spec": { - "genesis_slot": 0, - "base_rewards_per_epoch": 4, - "deposit_contract_tree_depth": 32, - "max_committees_per_slot": 64, - "target_committee_size": 128, - "min_per_epoch_churn_limit": 4, - "churn_limit_quotient": 65536, - "shuffle_round_count": 90, - "min_genesis_active_validator_count": 16384, - "min_genesis_time": 1578009600, - "min_deposit_amount": 1000000000, - "max_effective_balance": 32000000000, - "ejection_balance": 16000000000, - "effective_balance_increment": 1000000000, - "genesis_fork_version": "0x00000000", - "bls_withdrawal_prefix_byte": "0x00", - "genesis_delay": 172800, - "milliseconds_per_slot": 12000, - "min_attestation_inclusion_delay": 1, - "min_seed_lookahead": 1, - "max_seed_lookahead": 4, - "min_epochs_to_inactivity_penalty": 4, - "min_validator_withdrawability_delay": 256, - "shard_committee_period": 2048, - "base_reward_factor": 64, - "whistleblower_reward_quotient": 512, - "proposer_reward_quotient": 8, - "inactivity_penalty_quotient": 33554432, - "min_slashing_penalty_quotient": 32, - "domain_beacon_proposer": 0, - "domain_beacon_attester": 1, - "domain_randao": 2, - "domain_deposit": 3, - "domain_voluntary_exit": 4, - "safe_slots_to_update_justified": 8, - "eth1_follow_distance": 1024, - "seconds_per_eth1_block": 14, - "boot_nodes": [], - "network_id": 1 - } -} -``` - -## `/spec/slots_per_epoch` - -Requests the `SLOTS_PER_EPOCH` parameter from the specs that the node is running. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/spec/slots_per_epoch` -Method | GET -JSON Encoding | Number -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -32 -``` \ No newline at end of file diff --git a/book/src/http/validator.md b/book/src/http/validator.md deleted file mode 100644 index eff0c609501..00000000000 --- a/book/src/http/validator.md +++ /dev/null @@ -1,545 +0,0 @@ -# Lighthouse REST API: `/validator` - -The `/validator` endpoints provide the minimum functionality required for a validator -client to connect to the beacon node and produce blocks and attestations. - -## Endpoints - -HTTP Path | HTTP Method | Description | -| - | - | ---- | -[`/validator/duties`](#validatorduties) | POST | Provides block and attestation production information for validators. -[`/validator/subscribe`](#validatorsubscribe) | POST | Subscribes a list of validators to the beacon node for a particular duty/slot. -[`/validator/duties/all`](#validatordutiesall) | GET |Provides block and attestation production information for all validators. -[`/validator/duties/active`](#validatordutiesactive) | GET | Provides block and attestation production information for all active validators. -[`/validator/block`](#validatorblock-get) | GET | Retrieves the current beacon block for the validator to publish. -[`/validator/block`](#validatorblock-post) | POST | Publishes a signed block to the network. -[`/validator/attestation`](#validatorattestation) | GET | Retrieves the current best attestation for a validator to publish. -[`/validator/aggregate_attestation`](#validatoraggregate_attestation) | GET | Gets an aggregate attestation for validators to sign and publish. -[`/validator/attestations`](#validatorattestations) | POST | Publishes a list of raw unaggregated attestations to their appropriate subnets. -[`/validator/aggregate_and_proofs`](#validatoraggregate_and_proofs) | POST | Publishes a list of Signed aggregate and proofs for validators who are aggregators. - -## `/validator/duties` - -Request information about when a validator must produce blocks and attestations -at some given `epoch`. The information returned always refers to the canonical -chain and the same input parameters may yield different results after a re-org. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/duties` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Request Body - -Expects the following object in the POST request body: - -``` -{ - epoch: Epoch, - pubkeys: [PublicKey] -} -``` - -Duties are assigned on a per-epoch basis, all duties returned will contain -slots that are inside the given `epoch`. A set of duties will be returned for -each of the `pubkeys`. - -Validators who are not known to the beacon chain (e.g., have not yet deposited) -will have `null` values for most fields. - - -### Returns - -A set of duties for each given pubkey. - -### Example - -#### Request Body - -```json -{ - "epoch": 1203, - "pubkeys": [ - "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42" - ] -} -``` - -_Note: for demonstration purposes the second pubkey is some unknown pubkey._ - -#### Response Body - -```json -[ - { - "validator_pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "validator_index": 14935, - "attestation_slot": 38511, - "attestation_committee_index": 3, - "attestation_committee_position": 39, - "block_proposal_slots": [], - "aggregator_modulo": 5, - }, - { - "validator_pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42", - "validator_index": null, - "attestation_slot": null, - "attestation_committee_index": null, - "attestation_committee_position": null, - "block_proposal_slots": [] - "aggregator_modulo": null, - } -] -``` - -## `/validator/duties/all` - -Returns the duties for all validators, equivalent to calling [Validator -Duties](#validator-duties) while providing all known validator public keys. - -Considering that duties for non-active validators will just be `null`, it is -generally more efficient to query using [Active Validator -Duties](#active-validator-duties). - -This endpoint will only return validators that were in the beacon state -in the given epoch. For example, if the query epoch is 10 and some validator -deposit was included in epoch 11, that validator will not be included in the -result. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/duties/all` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200 - -### Parameters - -The duties returned will all be inside the given `epoch` (`Epoch`) query -parameter. This parameter is required. - -### Returns - -The return format is identical to the [Validator Duties](#validator-duties) response body. - -## `/validator/duties/active` - -Returns the duties for all active validators, equivalent to calling [Validator -Duties](#validator-duties) while providing all known validator public keys that -are active in the given epoch. - -This endpoint will only return validators that were in the beacon state -in the given epoch. For example, if the query epoch is 10 and some validator -deposit was included in epoch 11, that validator will not be included in the -result. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/duties/active` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200 - -### Parameters - -The duties returned will all be inside the given `epoch` (`Epoch`) query -parameter. This parameter is required. - -### Returns - -The return format is identical to the [Validator Duties](#validator-duties) response body. - -## `/validator/subscribe` - -Posts a list of `ValidatorSubscription` to subscribe validators to -particular slots to perform attestation duties. - -This informs the beacon node to search for peers and subscribe to -required attestation subnets to perform the attestation duties required. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/subscribe` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Request Body - -Expects the following object in the POST request body: - -``` -[ - { - validator_index: 10, - attestation_committee_index: 12, - slot: 3, - is_aggregator: true - } -] -``` - -The `is_aggregator` informs the beacon node if the validator is an aggregator -for this slot/committee. - -### Returns - -A null object on success and an error indicating any failures. - -## `/validator/block` GET - - -Produces and returns an unsigned `BeaconBlock` object. - -The block will be produced with the given `slot` and the parent block will be the -highest block in the canonical chain that has a slot less than `slot`. The -block will still be produced if some other block is also known to be at `slot` -(i.e., it may produce a block that would be slashable if signed). - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/block` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `randao_reveal` -Typical Responses | 200 - -### Parameters - - -- `slot` (`Slot`): The slot number for which the block is to be produced. -- `randao_reveal` (`Signature`): 96 bytes `Signature` for the randomness. - - -### Returns - -Returns a `BeaconBlock` object. - -#### Response Body - -```json -{ - "slot": 33, - "parent_root": "0xf54de54bd33e33aee4706cffff4bd991bcbf522f2551ab007180479c63f4fe912", - "state_root": "0x615c887bad27bc05754d627d941e1730e1b4c77b2eb4378c195ac8a8203bbf26", - "body": { - "randao_reveal": "0x8d7b2a32b026e9c79aae6ec6b83eabae89d60cacd65ac41ed7d2f4be9dd8c89c1bf7cd3d700374e18d03d12f6a054c23006f64f0e4e8b7cf37d6ac9a4c7d815c858120c54673b7d3cb2bb1550a4d659eaf46e34515677c678b70d6f62dbf89f", - "eth1_data": { - "deposit_root": "0x66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925", - "deposit_count": 8, - "block_hash": "0x2b32db6c2c0a6235fb1397e8225ea85e0f0e6e8c7b126d0016ccbde0e667151e" - }, - "graffiti": "0x736967702f6c69676874686f7573652d302e312e312d7076572656c65617365", - "proposer_slashings": [], - "attester_slashings": [], - "attestations": [], - "deposits": [], - "voluntary_exits": [] - } -} -``` - -## `/validator/block` POST - -Accepts a `SignedBeaconBlock` for verification. If it is valid, it will be -imported into the local database and published on the network. Invalid blocks -will not be published to the network. - -A block may be considered invalid because it is fundamentally incorrect, or its -parent has not yet been imported. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/block` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/202 - - -### Request Body - -Expects a JSON encoded `SignedBeaconBlock` in the POST request body: - -### Returns - -Returns a null object if the block passed all block validation and is published to the network. -Else, returns a processing error description. - -### Example - -### Request Body - -```json -{ - "message": { - "slot": 33, - "parent_root": "0xf54de54bd33e33aee4706cffff4bd991bcbf522f2551ab007180479c63f4fe912", - "state_root": "0x615c887bad27bc05754d627d941e1730e1b4c77b2eb4378c195ac8a8203bbf26", - "body": { - "randao_reveal": "0x8d7b2a32b026e9c79aae6ec6b83eabae89d60cacd65ac41ed7d2f4be9dd8c89c1bf7cd3d700374e18d03d12f6a054c23006f64f0e4e8b7cf37d6ac9a4c7d815c858120c54673b7d3cb2bb1550a4d659eaf46e34515677c678b70d6f62dbf89f", - "eth1_data": { - "deposit_root": "0x66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925", - "deposit_count": 8, - "block_hash": "0x2b32db6c2c0a6235fb1397e8225ea85e0f0e6e8c7b126d0016ccbde0e667151e" - }, - "graffiti": "0x736967702f6c69676874686f7573652d302e312e312d7076572656c65617365", - "proposer_slashings": [ - - ], - "attester_slashings": [ - - ], - "attestations": [ - - ], - "deposits": [ - - ], - "voluntary_exits": [ - - ] - } - }, - "signature": "0x965ced900dbabd0a78b81a0abb5d03407be0d38762104316416347f2ea6f82652b5759396f402e85df8ee18ba2c60145037c73b1c335f4272f1751a1cd89862b7b4937c035e350d0108554bd4a8930437ec3311c801a65fe8e5ba022689b5c24" -} -``` - -## `/validator/attestation` - -Produces and returns an unsigned `Attestation` from the current state. - -The attestation will reference the `beacon_block_root` of the highest block in -the canonical chain with a slot equal to or less than the given `slot`. - -An error will be returned if the given slot is more than -`SLOTS_PER_HISTORICAL_VECTOR` slots behind the current head block. - -This endpoint is not protected against slashing. Signing the returned -attestation may result in a slashable offence. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/attestation` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `committee_index` -Typical Responses | 200 - -### Parameters - - -- `slot` (`Slot`): The slot number for which the attestation is to be produced. -- `committee_index` (`CommitteeIndex`): The index of the committee that makes the attestation. - - -### Returns - -Returns a `Attestation` object with a default signature. The `signature` field should be replaced by the valid signature. - -#### Response Body - -```json -{ - "aggregation_bits": "0x01", - "data": { - "slot": 100, - "index": 0, - "beacon_block_root": "0xf22e4ec281136d119eabcd4d9d248aeacd042eb63d8d7642f73ad3e71f1c9283", - "source": { - "epoch": 2, - "root": "0x34c1244535c923f08e7f83170d41a076e4f1ec61013846b3a615a1d109d3c329" - }, - "target": { - "epoch": 3, - "root": "0xaefd23b384994dc0c1a6b77836bdb2f24f209ebfe6c4819324d9685f4a43b4e1" - } - }, - "signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" -} -``` - - - -## `/validator/aggregate_attestation` - -Requests an `AggregateAttestation` from the beacon node that has a -specific `attestation.data`. If no aggregate attestation is known this will -return a null object. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/aggregate_attestation` -Method | GET -JSON Encoding | Object -Query Parameters | `attestation_data` -Typical Responses | 200 - -### Returns - -Returns a null object if the attestation data passed is not known to the beacon -node. - -### Example - -### Request Body - -```json -{ - "aggregation_bits": "0x03", - "data": { - "slot": 3, - "index": 0, - "beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9", - "source": { - "epoch": 0, - "root": "0x0000000000000000000000000000000000000000000000000000000000000000" - }, - "target": { - "epoch": 0, - "root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd" - } - }, - "signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03$649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" -} -``` - - -## `/validator/attestations` - -Accepts a list of `Attestation` for verification. If they are valid, they will be imported -into the local database and published to the network. Invalid attestations will -not be published to the network. - -An attestation may be considered invalid because it is fundamentally incorrect -or because the beacon node has not imported the relevant blocks required to -verify it. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/attestations` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/202 - - -### Request Body - -Expects a JSON encoded list of signed `Attestation` objects in the POST request body. In -accordance with the naive aggregation scheme, the attestation _must_ have -exactly one of the `attestation.aggregation_bits` fields set. - -### Returns - -Returns a null object if the attestation passed all validation and is published to the network. -Else, returns a processing error description. - -### Example - -### Request Body - -```json -{ - "aggregation_bits": "0x03", - "data": { - "slot": 3, - "index": 0, - "beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9", - "source": { - "epoch": 0, - "root": "0x0000000000000000000000000000000000000000000000000000000000000000" - }, - "target": { - "epoch": 0, - "root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd" - } - }, - "signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03$649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" -} -``` - -## `/validator/aggregate_and_proofs` - -Accepts a list of `SignedAggregateAndProof` for publication. If they are valid -(the validator is an aggregator and the signatures can be verified) these -are published to the network on the global aggregate gossip topic. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/aggregate_and_proofs` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/202 - -### Request Body - -Expects a JSON encoded list of `SignedAggregateAndProof` objects in the POST request body. - -### Returns - -Returns a null object if the attestation passed all validation and is published to the network. -Else, returns a processing error description. - -### Example - -### Request Body - -```json -[ - { - "message": { - "aggregator_index": 12, - "aggregate": { - "aggregation_bits": "0x03", - "data": { - "slot": 3, - "index": 0, - "beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9", - "source": { - "epoch": 0, - "root": "0x0000000000000000000000000000000000000000000000000000000000000000" - }, - "target": { - "epoch": 0, - "root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd" - } - }, - "signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" - }, - "selection_proof": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" - } - signature: "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" - } -] -``` -_Note: The data in this request is for demonstrating types and does not -contain real data_ diff --git a/book/src/http/consensus.md b/book/src/validator-inclusion.md similarity index 52% rename from book/src/http/consensus.md rename to book/src/validator-inclusion.md index c71b78ce3e9..ce8e61cafee 100644 --- a/book/src/http/consensus.md +++ b/book/src/validator-inclusion.md @@ -1,16 +1,21 @@ -# Lighthouse REST API: `/consensus` +# Validator Inclusion APIs -The `/consensus` endpoints provide information on results of the proof-of-stake -voting process used for finality/justification under Casper FFG. +The `/lighthouse/validator_inclusion` API endpoints provide information on +results of the proof-of-stake voting process used for finality/justification +under Casper FFG. + +These endpoints are not stable or included in the Eth2 standard API. As such, +they are subject to change or removal without a change in major release +version. ## Endpoints HTTP Path | Description | | --- | -- | -[`/consensus/global_votes`](#consensusglobal_votes) | A global vote count for a given epoch. -[`/consensus/individual_votes`](#consensusindividual_votes) | A per-validator breakdown of votes in a given epoch. +[`/lighthouse/validator_inclusion/{epoch}/global`](#global) | A global vote count for a given epoch. +[`/lighthouse/validator_inclusion/{epoch}/{validator_id}`](#individual) | A per-validator breakdown of votes in a given epoch. -## `/consensus/global_votes` +## Global Returns a global count of votes for some given `epoch`. The results are included both for the current and previous (`epoch - 1`) epochs since both are required @@ -75,40 +80,27 @@ voting upon the previous epoch included in a block. When this value is greater than or equal to `2/3` it is possible that the beacon chain may justify and/or finalize the epoch. -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/consensus/global_votes` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200 - -### Parameters - -Requires the `epoch` (`Epoch`) query parameter to determine which epoch will be -considered the current epoch. - -### Returns - -A report on global validator voting participation. +### HTTP Example -### Example +```bash +curl -X GET "http://localhost:5052/lighthouse/validator_inclusion/0/global" -H "accept: application/json" | jq +``` ```json { - "current_epoch_active_gwei": 52377600000000, - "previous_epoch_active_gwei": 52377600000000, - "current_epoch_attesting_gwei": 50740900000000, - "current_epoch_target_attesting_gwei": 49526000000000, - "previous_epoch_attesting_gwei": 52377600000000, - "previous_epoch_target_attesting_gwei": 51063400000000, - "previous_epoch_head_attesting_gwei": 9248600000000 + "data": { + "current_epoch_active_gwei": 642688000000000, + "previous_epoch_active_gwei": 642688000000000, + "current_epoch_attesting_gwei": 366208000000000, + "current_epoch_target_attesting_gwei": 366208000000000, + "previous_epoch_attesting_gwei": 1000000000, + "previous_epoch_target_attesting_gwei": 1000000000, + "previous_epoch_head_attesting_gwei": 1000000000 + } } ``` -## `/consensus/individual_votes` +## Individual Returns a per-validator summary of how that validator performed during the current epoch. @@ -117,73 +109,26 @@ The [Global Votes](#consensusglobal_votes) endpoint is the summation of all of t individual values, please see it for definitions of terms like "current_epoch", "previous_epoch" and "target_attester". -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/consensus/individual_votes` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Request Body -Expects the following object in the POST request body: +### HTTP Example +```bash +curl -X GET "http://localhost:5052/lighthouse/validator_inclusion/0/42" -H "accept: application/json" | jq ``` -{ - epoch: Epoch, - pubkeys: [PublicKey] -} -``` - -### Returns - -A report on the validators voting participation. - -### Example - -#### Request Body ```json { - "epoch": 1203, - "pubkeys": [ - "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42" - ] + "data": { + "is_slashed": false, + "is_withdrawable_in_current_epoch": false, + "is_active_in_current_epoch": true, + "is_active_in_previous_epoch": true, + "current_epoch_effective_balance_gwei": 32000000000, + "is_current_epoch_attester": false, + "is_current_epoch_target_attester": false, + "is_previous_epoch_attester": false, + "is_previous_epoch_target_attester": false, + "is_previous_epoch_head_attester": false + } } ``` - -_Note: for demonstration purposes the second pubkey is some unknown pubkey._ - -#### Response Body - -```json -[ - { - "epoch": 1203, - "pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "validator_index": 14935, - "vote": { - "is_slashed": false, - "is_withdrawable_in_current_epoch": false, - "is_active_in_current_epoch": true, - "is_active_in_previous_epoch": true, - "current_epoch_effective_balance_gwei": 3200000000, - "is_current_epoch_attester": true, - "is_current_epoch_target_attester": true, - "is_previous_epoch_attester": true, - "is_previous_epoch_target_attester": true, - "is_previous_epoch_head_attester": false - } - }, - { - "epoch": 1203, - "pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42", - "validator_index": null, - "vote": null - } -] -``` diff --git a/book/src/websockets.md b/book/src/websockets.md deleted file mode 100644 index 69cf0e18d69..00000000000 --- a/book/src/websockets.md +++ /dev/null @@ -1,111 +0,0 @@ -# Websocket API - -**Note: the WebSocket server _only_ emits events. It does not accept any -requests. Use the [HTTP API](./http.md) for requests.** - -By default, a Lighthouse `beacon_node` exposes a websocket server on `localhost:5053`. - -The following CLI flags control the websocket server: - -- `--no-ws`: disable the websocket server. -- `--ws-port`: specify the listen port of the server. -- `--ws-address`: specify the listen address of the server. - -All clients connected to the websocket server will receive the same stream of events, all triggered -by the `BeaconChain`. Each event is a JSON object with the following schema: - -```json -{ - "event": "string", - "data": "object" -} -``` - -## Events - -The following events may be emitted: - -### Beacon Head Changed - -Occurs whenever the canonical head of the beacon chain changes. - -```json -{ - "event": "beacon_head_changed", - "data": { - "reorg": "boolean", - "current_head_beacon_block_root": "string", - "previous_head_beacon_block_root": "string" - } -} -``` - -### Beacon Finalization - -Occurs whenever the finalized checkpoint of the canonical head changes. - -```json -{ - "event": "beacon_finalization", - "data": { - "epoch": "number", - "root": "string" - } -} -``` - -### Beacon Block Imported - -Occurs whenever the beacon node imports a valid block. - -```json -{ - "event": "beacon_block_imported", - "data": { - "block": "object" - } -} -``` - -### Beacon Block Rejected - -Occurs whenever the beacon node rejects a block because it is invalid or an -error occurred during validation. - -```json -{ - "event": "beacon_block_rejected", - "data": { - "reason": "string", - "block": "object" - } -} -``` - -### Beacon Attestation Imported - -Occurs whenever the beacon node imports a valid attestation. - -```json -{ - "event": "beacon_attestation_imported", - "data": { - "attestation": "object" - } -} -``` - -### Beacon Attestation Rejected - -Occurs whenever the beacon node rejects an attestation because it is invalid or -an error occurred during validation. - -```json -{ - "event": "beacon_attestation_rejected", - "data": { - "reason": "string", - "attestation": "object" - } -} -``` diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml new file mode 100644 index 00000000000..dd5231dde06 --- /dev/null +++ b/common/eth2/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "eth2" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +serde = { version = "1.0.110", features = ["derive"] } +serde_json = "1.0.52" +types = { path = "../../consensus/types" } +hex = "0.4.2" +reqwest = { version = "0.10.8", features = ["json"] } +eth2_libp2p = { path = "../../beacon_node/eth2_libp2p" } +proto_array = { path = "../../consensus/proto_array", optional = true } + +[target.'cfg(target_os = "linux")'.dependencies] +psutil = { version = "3.1.0", optional = true } +procinfo = { version = "0.4.2", optional = true } + +[features] +default = ["lighthouse"] +lighthouse = ["proto_array", "psutil", "procinfo"] diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs new file mode 100644 index 00000000000..84429e3b88d --- /dev/null +++ b/common/eth2/src/lib.rs @@ -0,0 +1,786 @@ +//! This crate provides two major things: +//! +//! 1. The types served by the `http_api` crate. +//! 2. A wrapper around `reqwest` that forms a HTTP client, able of consuming the endpoints served +//! by the `http_api` crate. +//! +//! Eventually it would be ideal to publish this crate on crates.io, however we have some local +//! dependencies preventing this presently. + +#[cfg(feature = "lighthouse")] +pub mod lighthouse; +pub mod types; + +use self::types::*; +use reqwest::{IntoUrl, Response}; +use serde::{de::DeserializeOwned, Serialize}; +use std::convert::TryFrom; +use std::fmt; + +pub use reqwest; +pub use reqwest::{StatusCode, Url}; + +#[derive(Debug)] +pub enum Error { + /// The `reqwest` client raised an error. + Reqwest(reqwest::Error), + /// The server returned an error message where the body was able to be parsed. + ServerMessage(ErrorMessage), + /// The server returned an error message where the body was unable to be parsed. + StatusCode(StatusCode), + /// The supplied URL is badly formatted. It should look something like `http://127.0.0.1:5052`. + InvalidUrl(Url), +} + +impl Error { + /// If the error has a HTTP status code, return it. + pub fn status(&self) -> Option { + match self { + Error::Reqwest(error) => error.status(), + Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(), + Error::StatusCode(status) => Some(*status), + Error::InvalidUrl(_) => None, + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a +/// Lighthouse Beacon Node HTTP server (`http_api`). +#[derive(Clone)] +pub struct BeaconNodeHttpClient { + client: reqwest::Client, + server: Url, +} + +impl BeaconNodeHttpClient { + /// Returns `Err(())` if the URL is invalid. + pub fn new(server: Url) -> Self { + Self { + client: reqwest::Client::new(), + server, + } + } + + /// Returns `Err(())` if the URL is invalid. + pub fn from_components(server: Url, client: reqwest::Client) -> Self { + Self { client, server } + } + + /// Return the path with the standard `/eth1/v1` prefix applied. + fn eth_path(&self) -> Result { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1"); + + Ok(path) + } + + /// Perform a HTTP GET request. + async fn get(&self, url: U) -> Result { + let response = self.client.get(url).send().await.map_err(Error::Reqwest)?; + ok_or_error(response) + .await? + .json() + .await + .map_err(Error::Reqwest) + } + + /// Perform a HTTP GET request, returning `None` on a 404 error. + async fn get_opt(&self, url: U) -> Result, Error> { + let response = self.client.get(url).send().await.map_err(Error::Reqwest)?; + match ok_or_error(response).await { + Ok(resp) => resp.json().await.map(Option::Some).map_err(Error::Reqwest), + Err(err) => { + if err.status() == Some(StatusCode::NOT_FOUND) { + Ok(None) + } else { + Err(err) + } + } + } + } + + /// Perform a HTTP POST request. + async fn post(&self, url: U, body: &T) -> Result<(), Error> { + let response = self + .client + .post(url) + .json(body) + .send() + .await + .map_err(Error::Reqwest)?; + ok_or_error(response).await?; + Ok(()) + } + + /// `GET beacon/genesis` + /// + /// ## Errors + /// + /// May return a `404` if beacon chain genesis has not yet occurred. + pub async fn get_beacon_genesis(&self) -> Result, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("genesis"); + + self.get(path).await + } + + /// `GET beacon/states/{state_id}/root` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_root( + &self, + state_id: StateId, + ) -> Result>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("root"); + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/fork` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_fork( + &self, + state_id: StateId, + ) -> Result>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("fork"); + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/finality_checkpoints` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_finality_checkpoints( + &self, + state_id: StateId, + ) -> Result>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("finality_checkpoints"); + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/validators` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_validators( + &self, + state_id: StateId, + ) -> Result>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("validators"); + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/committees?slot,index` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_committees( + &self, + state_id: StateId, + epoch: Epoch, + slot: Option, + index: Option, + ) -> Result>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("committees") + .push(&epoch.to_string()); + + if let Some(slot) = slot { + path.query_pairs_mut() + .append_pair("slot", &slot.to_string()); + } + + if let Some(index) = index { + path.query_pairs_mut() + .append_pair("index", &index.to_string()); + } + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/validators/{validator_id}` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_validator_id( + &self, + state_id: StateId, + validator_id: &ValidatorId, + ) -> Result>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("validators") + .push(&validator_id.to_string()); + + self.get_opt(path).await + } + + /// `GET beacon/headers?slot,parent_root` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_headers( + &self, + slot: Option, + parent_root: Option, + ) -> Result>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("headers"); + + if let Some(slot) = slot { + path.query_pairs_mut() + .append_pair("slot", &slot.to_string()); + } + + if let Some(root) = parent_root { + path.query_pairs_mut() + .append_pair("parent_root", &format!("{:?}", root)); + } + + self.get_opt(path).await + } + + /// `GET beacon/headers/{block_id}` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_headers_block_id( + &self, + block_id: BlockId, + ) -> Result>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("headers") + .push(&block_id.to_string()); + + self.get_opt(path).await + } + + /// `POST beacon/blocks` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn post_beacon_blocks( + &self, + block: &SignedBeaconBlock, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks"); + + self.post(path, block).await?; + + Ok(()) + } + + /// `GET beacon/blocks` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blocks( + &self, + block_id: BlockId, + ) -> Result>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks") + .push(&block_id.to_string()); + + self.get_opt(path).await + } + + /// `GET beacon/blocks/{block_id}/root` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blocks_root( + &self, + block_id: BlockId, + ) -> Result>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks") + .push(&block_id.to_string()) + .push("root"); + + self.get_opt(path).await + } + + /// `GET beacon/blocks/{block_id}/attestations` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blocks_attestations( + &self, + block_id: BlockId, + ) -> Result>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks") + .push(&block_id.to_string()) + .push("attestations"); + + self.get_opt(path).await + } + + /// `POST beacon/pool/attestations` + pub async fn post_beacon_pool_attestations( + &self, + attestation: &Attestation, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attestations"); + + self.post(path, attestation).await?; + + Ok(()) + } + + /// `GET beacon/pool/attestations` + pub async fn get_beacon_pool_attestations( + &self, + ) -> Result>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attestations"); + + self.get(path).await + } + + /// `POST beacon/pool/attester_slashings` + pub async fn post_beacon_pool_attester_slashings( + &self, + slashing: &AttesterSlashing, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attester_slashings"); + + self.post(path, slashing).await?; + + Ok(()) + } + + /// `GET beacon/pool/attester_slashings` + pub async fn get_beacon_pool_attester_slashings( + &self, + ) -> Result>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attester_slashings"); + + self.get(path).await + } + + /// `POST beacon/pool/proposer_slashings` + pub async fn post_beacon_pool_proposer_slashings( + &self, + slashing: &ProposerSlashing, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("proposer_slashings"); + + self.post(path, slashing).await?; + + Ok(()) + } + + /// `GET beacon/pool/proposer_slashings` + pub async fn get_beacon_pool_proposer_slashings( + &self, + ) -> Result>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("proposer_slashings"); + + self.get(path).await + } + + /// `POST beacon/pool/voluntary_exits` + pub async fn post_beacon_pool_voluntary_exits( + &self, + exit: &SignedVoluntaryExit, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("voluntary_exits"); + + self.post(path, exit).await?; + + Ok(()) + } + + /// `GET beacon/pool/voluntary_exits` + pub async fn get_beacon_pool_voluntary_exits( + &self, + ) -> Result>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("voluntary_exits"); + + self.get(path).await + } + + /// `GET config/fork_schedule` + pub async fn get_config_fork_schedule(&self) -> Result>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("config") + .push("fork_schedule"); + + self.get(path).await + } + + /// `GET config/fork_schedule` + pub async fn get_config_spec(&self) -> Result, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("config") + .push("spec"); + + self.get(path).await + } + + /// `GET config/deposit_contract` + pub async fn get_config_deposit_contract( + &self, + ) -> Result, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("config") + .push("deposit_contract"); + + self.get(path).await + } + + /// `GET node/version` + pub async fn get_node_version(&self) -> Result, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("node") + .push("version"); + + self.get(path).await + } + + /// `GET node/syncing` + pub async fn get_node_syncing(&self) -> Result, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("node") + .push("syncing"); + + self.get(path).await + } + + /// `GET debug/beacon/states/{state_id}` + pub async fn get_debug_beacon_states( + &self, + state_id: StateId, + ) -> Result>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("debug") + .push("beacon") + .push("states") + .push(&state_id.to_string()); + + self.get_opt(path).await + } + + /// `GET debug/beacon/heads` + pub async fn get_debug_beacon_heads( + &self, + ) -> Result>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("debug") + .push("beacon") + .push("heads"); + + self.get(path).await + } + + /// `GET validator/duties/attester/{epoch}?index` + /// + /// ## Note + /// + /// The `index` query parameter accepts a list of validator indices. + pub async fn get_validator_duties_attester( + &self, + epoch: Epoch, + index: Option<&[u64]>, + ) -> Result>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("duties") + .push("attester") + .push(&epoch.to_string()); + + if let Some(index) = index { + let string = index + .iter() + .map(|i| i.to_string()) + .collect::>() + .join(","); + path.query_pairs_mut().append_pair("index", &string); + } + + self.get(path).await + } + + /// `GET validator/duties/proposer/{epoch}` + pub async fn get_validator_duties_proposer( + &self, + epoch: Epoch, + ) -> Result>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("duties") + .push("proposer") + .push(&epoch.to_string()); + + self.get(path).await + } + + /// `GET validator/duties/attester/{epoch}?index` + /// + /// ## Note + /// + /// The `index` query parameter accepts a list of validator indices. + pub async fn get_validator_blocks( + &self, + slot: Slot, + randao_reveal: SignatureBytes, + graffiti: Option<&Graffiti>, + ) -> Result>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("blocks") + .push(&slot.to_string()); + + path.query_pairs_mut() + .append_pair("randao_reveal", &randao_reveal.to_string()); + + if let Some(graffiti) = graffiti { + path.query_pairs_mut() + .append_pair("graffiti", &graffiti.to_string()); + } + + self.get(path).await + } + + /// `GET validator/attestation_data?slot,committee_index` + pub async fn get_validator_attestation_data( + &self, + slot: Slot, + committee_index: CommitteeIndex, + ) -> Result, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("attestation_data"); + + path.query_pairs_mut() + .append_pair("slot", &slot.to_string()) + .append_pair("committee_index", &committee_index.to_string()); + + self.get(path).await + } + + /// `GET validator/attestation_data?slot,committee_index` + pub async fn get_validator_aggregate_attestation( + &self, + slot: Slot, + attestation_data_root: Hash256, + ) -> Result>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("aggregate_attestation"); + + path.query_pairs_mut() + .append_pair("slot", &slot.to_string()) + .append_pair( + "attestation_data_root", + &format!("{:?}", attestation_data_root), + ); + + self.get_opt(path).await + } + + /// `POST validator/aggregate_and_proofs` + pub async fn post_validator_aggregate_and_proof( + &self, + aggregate: &SignedAggregateAndProof, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("aggregate_and_proofs"); + + self.post(path, aggregate).await?; + + Ok(()) + } + + /// `POST validator/beacon_committee_subscriptions` + pub async fn post_validator_beacon_committee_subscriptions( + &self, + subscriptions: &[BeaconCommitteeSubscription], + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("beacon_committee_subscriptions"); + + self.post(path, &subscriptions).await?; + + Ok(()) + } +} + +/// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an +/// appropriate error message. +async fn ok_or_error(response: Response) -> Result { + let status = response.status(); + + if status == StatusCode::OK { + Ok(response) + } else if let Ok(message) = response.json().await { + Err(Error::ServerMessage(message)) + } else { + Err(Error::StatusCode(status)) + } +} diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs new file mode 100644 index 00000000000..8fbde521376 --- /dev/null +++ b/common/eth2/src/lighthouse.rs @@ -0,0 +1,224 @@ +//! This module contains endpoints that are non-standard and only available on Lighthouse servers. + +use crate::{ + types::{Epoch, EthSpec, GenericResponse, ValidatorId}, + BeaconNodeHttpClient, Error, +}; +use proto_array::core::ProtoArray; +use serde::{Deserialize, Serialize}; + +pub use eth2_libp2p::{types::SyncState, PeerInfo}; + +/// Information returned by `peers` and `connected_peers`. +// TODO: this should be deserializable.. +#[derive(Debug, Clone, Serialize)] +#[serde(bound = "T: EthSpec")] +pub struct Peer { + /// The Peer's ID + pub peer_id: String, + /// The PeerInfo associated with the peer. + pub peer_info: PeerInfo, +} + +/// The results of validators voting during an epoch. +/// +/// Provides information about the current and previous epochs. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct GlobalValidatorInclusionData { + /// The total effective balance of all active validators during the _current_ epoch. + pub current_epoch_active_gwei: u64, + /// The total effective balance of all active validators during the _previous_ epoch. + pub previous_epoch_active_gwei: u64, + /// The total effective balance of all validators who attested during the _current_ epoch. + pub current_epoch_attesting_gwei: u64, + /// The total effective balance of all validators who attested during the _current_ epoch and + /// agreed with the state about the beacon block at the first slot of the _current_ epoch. + pub current_epoch_target_attesting_gwei: u64, + /// The total effective balance of all validators who attested during the _previous_ epoch. + pub previous_epoch_attesting_gwei: u64, + /// The total effective balance of all validators who attested during the _previous_ epoch and + /// agreed with the state about the beacon block at the first slot of the _previous_ epoch. + pub previous_epoch_target_attesting_gwei: u64, + /// The total effective balance of all validators who attested during the _previous_ epoch and + /// agreed with the state about the beacon block at the time of attestation. + pub previous_epoch_head_attesting_gwei: u64, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ValidatorInclusionData { + /// True if the validator has been slashed, ever. + pub is_slashed: bool, + /// True if the validator can withdraw in the current epoch. + pub is_withdrawable_in_current_epoch: bool, + /// True if the validator was active in the state's _current_ epoch. + pub is_active_in_current_epoch: bool, + /// True if the validator was active in the state's _previous_ epoch. + pub is_active_in_previous_epoch: bool, + /// The validator's effective balance in the _current_ epoch. + pub current_epoch_effective_balance_gwei: u64, + /// True if the validator had an attestation included in the _current_ epoch. + pub is_current_epoch_attester: bool, + /// True if the validator's beacon block root attestation for the first slot of the _current_ + /// epoch matches the block root known to the state. + pub is_current_epoch_target_attester: bool, + /// True if the validator had an attestation included in the _previous_ epoch. + pub is_previous_epoch_attester: bool, + /// True if the validator's beacon block root attestation for the first slot of the _previous_ + /// epoch matches the block root known to the state. + pub is_previous_epoch_target_attester: bool, + /// True if the validator's beacon block root attestation in the _previous_ epoch at the + /// attestation's slot (`attestation_data.slot`) matches the block root known to the state. + pub is_previous_epoch_head_attester: bool, +} + +#[cfg(target_os = "linux")] +use {procinfo::pid, psutil::process::Process}; + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +/// Reports on the health of the Lighthouse instance. +pub struct Health { + /// The pid of this process. + pub pid: u32, + /// The number of threads used by this pid. + pub pid_num_threads: i32, + /// The total resident memory used by this pid. + pub pid_mem_resident_set_size: u64, + /// The total virtual memory used by this pid. + pub pid_mem_virtual_memory_size: u64, + /// Total virtual memory on the system + pub sys_virt_mem_total: u64, + /// Total virtual memory available for new processes. + pub sys_virt_mem_available: u64, + /// Total virtual memory used on the system + pub sys_virt_mem_used: u64, + /// Total virtual memory not used on the system + pub sys_virt_mem_free: u64, + /// Percentage of virtual memory used on the system + pub sys_virt_mem_percent: f32, + /// System load average over 1 minute. + pub sys_loadavg_1: f64, + /// System load average over 5 minutes. + pub sys_loadavg_5: f64, + /// System load average over 15 minutes. + pub sys_loadavg_15: f64, +} + +impl Health { + #[cfg(not(target_os = "linux"))] + pub fn observe() -> Result { + Err("Health is only available on Linux".into()) + } + + #[cfg(target_os = "linux")] + pub fn observe() -> Result { + let process = + Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?; + + let process_mem = process + .memory_info() + .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; + + let stat = pid::stat_self().map_err(|e| format!("Unable to get stat: {:?}", e))?; + + let vm = psutil::memory::virtual_memory() + .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; + let loadavg = + psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; + + Ok(Self { + pid: process.pid(), + pid_num_threads: stat.num_threads, + pid_mem_resident_set_size: process_mem.rss(), + pid_mem_virtual_memory_size: process_mem.vms(), + sys_virt_mem_total: vm.total(), + sys_virt_mem_available: vm.available(), + sys_virt_mem_used: vm.used(), + sys_virt_mem_free: vm.free(), + sys_virt_mem_percent: vm.percent(), + sys_loadavg_1: loadavg.one, + sys_loadavg_5: loadavg.five, + sys_loadavg_15: loadavg.fifteen, + }) + } +} + +impl BeaconNodeHttpClient { + /// `GET lighthouse/health` + pub async fn get_lighthouse_health(&self) -> Result, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("health"); + + self.get(path).await + } + + /// `GET lighthouse/syncing` + pub async fn get_lighthouse_syncing(&self) -> Result, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("syncing"); + + self.get(path).await + } + + /* + * Note: + * + * The `lighthouse/peers` endpoints do not have functions here. We are yet to implement + * `Deserialize` on the `PeerInfo` struct since it contains use of `Instant`. This could be + * fairly simply achieved, if desired. + */ + + /// `GET lighthouse/proto_array` + pub async fn get_lighthouse_proto_array(&self) -> Result, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("proto_array"); + + self.get(path).await + } + + /// `GET lighthouse/validator_inclusion/{epoch}/global` + pub async fn get_lighthouse_validator_inclusion_global( + &self, + epoch: Epoch, + ) -> Result, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("validator_inclusion") + .push(&epoch.to_string()) + .push("global"); + + self.get(path).await + } + + /// `GET lighthouse/validator_inclusion/{epoch}/{validator_id}` + pub async fn get_lighthouse_validator_inclusion( + &self, + epoch: Epoch, + validator_id: ValidatorId, + ) -> Result>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("validator_inclusion") + .push(&epoch.to_string()) + .push(&validator_id.to_string()); + + self.get(path).await + } +} diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs new file mode 100644 index 00000000000..e7f4a4085bc --- /dev/null +++ b/common/eth2/src/types.rs @@ -0,0 +1,436 @@ +//! This module exposes a superset of the `types` crate. It adds additional types that are only +//! required for the HTTP API. + +use eth2_libp2p::{Enr, Multiaddr}; +use serde::{Deserialize, Serialize}; +use std::convert::TryFrom; +use std::fmt; +use std::str::FromStr; + +pub use types::*; + +/// An API error serializable to JSON. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ErrorMessage { + pub code: u16, + pub message: String, + #[serde(default)] + pub stacktraces: Vec, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct GenesisData { + #[serde(with = "serde_utils::quoted_u64")] + pub genesis_time: u64, + pub genesis_validators_root: Hash256, + #[serde(with = "serde_utils::bytes_4_hex")] + pub genesis_fork_version: [u8; 4], +} + +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum BlockId { + Head, + Genesis, + Finalized, + Justified, + Slot(Slot), + Root(Hash256), +} + +impl FromStr for BlockId { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "head" => Ok(BlockId::Head), + "genesis" => Ok(BlockId::Genesis), + "finalized" => Ok(BlockId::Finalized), + "justified" => Ok(BlockId::Justified), + other => { + if other.starts_with("0x") { + Hash256::from_str(&s[2..]) + .map(BlockId::Root) + .map_err(|e| format!("{} cannot be parsed as a root", e)) + } else { + u64::from_str(s) + .map(Slot::new) + .map(BlockId::Slot) + .map_err(|_| format!("{} cannot be parsed as a parameter", s)) + } + } + } + } +} + +impl fmt::Display for BlockId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BlockId::Head => write!(f, "head"), + BlockId::Genesis => write!(f, "genesis"), + BlockId::Finalized => write!(f, "finalized"), + BlockId::Justified => write!(f, "justified"), + BlockId::Slot(slot) => write!(f, "{}", slot), + BlockId::Root(root) => write!(f, "{:?}", root), + } + } +} + +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum StateId { + Head, + Genesis, + Finalized, + Justified, + Slot(Slot), + Root(Hash256), +} + +impl FromStr for StateId { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "head" => Ok(StateId::Head), + "genesis" => Ok(StateId::Genesis), + "finalized" => Ok(StateId::Finalized), + "justified" => Ok(StateId::Justified), + other => { + if other.starts_with("0x") { + Hash256::from_str(&s[2..]) + .map(StateId::Root) + .map_err(|e| format!("{} cannot be parsed as a root", e)) + } else { + u64::from_str(s) + .map(Slot::new) + .map(StateId::Slot) + .map_err(|_| format!("{} cannot be parsed as a slot", s)) + } + } + } + } +} + +impl fmt::Display for StateId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + StateId::Head => write!(f, "head"), + StateId::Genesis => write!(f, "genesis"), + StateId::Finalized => write!(f, "finalized"), + StateId::Justified => write!(f, "justified"), + StateId::Slot(slot) => write!(f, "{}", slot), + StateId::Root(root) => write!(f, "{:?}", root), + } + } +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(bound = "T: Serialize + serde::de::DeserializeOwned")] +pub struct GenericResponse { + pub data: T, +} + +impl From for GenericResponse { + fn from(data: T) -> Self { + Self { data } + } +} + +#[derive(Debug, PartialEq, Clone, Serialize)] +#[serde(bound = "T: Serialize")] +pub struct GenericResponseRef<'a, T: Serialize> { + pub data: &'a T, +} + +impl<'a, T: Serialize> From<&'a T> for GenericResponseRef<'a, T> { + fn from(data: &'a T) -> Self { + Self { data } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] +pub struct RootData { + pub root: Hash256, +} + +impl From for RootData { + fn from(root: Hash256) -> Self { + Self { root } + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct FinalityCheckpointsData { + pub previous_justified: Checkpoint, + pub current_justified: Checkpoint, + pub finalized: Checkpoint, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum ValidatorId { + PublicKey(PublicKeyBytes), + Index(u64), +} + +impl FromStr for ValidatorId { + type Err = String; + + fn from_str(s: &str) -> Result { + if s.starts_with("0x") { + PublicKeyBytes::from_str(s) + .map(ValidatorId::PublicKey) + .map_err(|e| format!("{} cannot be parsed as a public key: {}", s, e)) + } else { + u64::from_str(s) + .map(ValidatorId::Index) + .map_err(|e| format!("{} cannot be parsed as a slot: {}", s, e)) + } + } +} + +impl fmt::Display for ValidatorId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ValidatorId::PublicKey(pubkey) => write!(f, "{:?}", pubkey), + ValidatorId::Index(index) => write!(f, "{}", index), + } + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ValidatorData { + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub balance: u64, + pub status: ValidatorStatus, + pub validator: Validator, +} + +// TODO: This does not currently match the spec, but I'm going to try and change the spec using +// this proposal: +// +// https://hackmd.io/bQxMDRt1RbS1TLno8K4NPg?view +#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] +pub enum ValidatorStatus { + Unknown, + WaitingForEligibility, + WaitingForFinality, + WaitingInQueue, + StandbyForActive(Epoch), + Active, + ActiveAwaitingVoluntaryExit(Epoch), + ActiveAwaitingSlashedExit(Epoch), + ExitedVoluntarily(Epoch), + ExitedSlashed(Epoch), + Withdrawable, + Withdrawn, +} + +impl ValidatorStatus { + pub fn from_validator( + validator_opt: Option<&Validator>, + epoch: Epoch, + finalized_epoch: Epoch, + far_future_epoch: Epoch, + ) -> Self { + if let Some(validator) = validator_opt { + if validator.is_withdrawable_at(epoch) { + ValidatorStatus::Withdrawable + } else if validator.is_exited_at(epoch) { + if validator.slashed { + ValidatorStatus::ExitedSlashed(validator.withdrawable_epoch) + } else { + ValidatorStatus::ExitedVoluntarily(validator.withdrawable_epoch) + } + } else if validator.is_active_at(epoch) { + if validator.exit_epoch < far_future_epoch { + if validator.slashed { + ValidatorStatus::ActiveAwaitingSlashedExit(validator.exit_epoch) + } else { + ValidatorStatus::ActiveAwaitingVoluntaryExit(validator.exit_epoch) + } + } else { + ValidatorStatus::Active + } + } else if validator.activation_epoch < far_future_epoch { + ValidatorStatus::StandbyForActive(validator.activation_epoch) + } else if validator.activation_eligibility_epoch < far_future_epoch { + if finalized_epoch < validator.activation_eligibility_epoch { + ValidatorStatus::WaitingForFinality + } else { + ValidatorStatus::WaitingInQueue + } + } else { + ValidatorStatus::WaitingForEligibility + } + } else { + ValidatorStatus::Unknown + } + } +} + +#[derive(Serialize, Deserialize)] +pub struct CommitteesQuery { + pub slot: Option, + pub index: Option, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct CommitteeData { + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64_vec")] + pub validators: Vec, +} + +#[derive(Serialize, Deserialize)] +pub struct HeadersQuery { + pub slot: Option, + pub parent_root: Option, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BlockHeaderAndSignature { + pub message: BeaconBlockHeader, + pub signature: SignatureBytes, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BlockHeaderData { + pub root: Hash256, + pub canonical: bool, + pub header: BlockHeaderAndSignature, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct DepositContractData { + #[serde(with = "serde_utils::quoted_u64")] + pub chain_id: u64, + pub address: Address, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ChainHeadData { + pub slot: Slot, + pub root: Hash256, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct IdentityData { + pub peer_id: String, + pub enr: Enr, + pub p2p_addresses: Vec, + // TODO: missing the following fields: + // + // - discovery_addresses + // - metadata + // + // Tracked here: https://github.com/sigp/lighthouse/issues/1434 +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct VersionData { + pub version: String, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct SyncingData { + pub is_syncing: bool, + pub head_slot: Slot, + pub sync_distance: Slot, +} + +#[derive(Clone, PartialEq, Debug, Deserialize)] +#[serde(try_from = "String", bound = "T: FromStr")] +pub struct QueryVec(pub Vec); + +impl TryFrom for QueryVec { + type Error = String; + + fn try_from(string: String) -> Result { + if string == "" { + return Ok(Self(vec![])); + } + + string + .split(',') + .map(|s| s.parse().map_err(|_| "unable to parse".to_string())) + .collect::, String>>() + .map(Self) + } +} + +#[derive(Clone, Deserialize)] +pub struct ValidatorDutiesQuery { + pub index: Option>, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AttesterData { + pub pubkey: PublicKeyBytes, + #[serde(with = "serde_utils::quoted_u64")] + pub validator_index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub committees_at_slot: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub committee_index: CommitteeIndex, + #[serde(with = "serde_utils::quoted_u64")] + pub committee_length: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub validator_committee_index: u64, + pub slot: Slot, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ProposerData { + pub pubkey: PublicKeyBytes, + pub slot: Slot, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct ValidatorBlocksQuery { + pub randao_reveal: SignatureBytes, + pub graffiti: Option, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct ValidatorAttestationDataQuery { + pub slot: Slot, + pub committee_index: CommitteeIndex, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct ValidatorAggregateAttestationQuery { + pub attestation_data_root: Hash256, + pub slot: Slot, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BeaconCommitteeSubscription { + #[serde(with = "serde_utils::quoted_u64")] + pub validator_index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub committee_index: u64, + /// Note: this field did not exist in the API spec at the time of writing, however I have made + /// a PR to have it included: + /// + /// https://github.com/ethereum/eth2.0-APIs/pull/81 + #[serde(with = "serde_utils::quoted_u64")] + pub committees_at_slot: u64, + pub slot: Slot, + pub is_aggregator: bool, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn query_vec() { + assert_eq!( + QueryVec::try_from("0,1,2".to_string()).unwrap(), + QueryVec(vec![0_u64, 1, 2]) + ); + } +} diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index 0a4251e06df..0637b973c74 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -55,6 +55,7 @@ //! ``` use prometheus::{HistogramOpts, HistogramTimer, Opts}; +use std::time::Duration; pub use prometheus::{ Encoder, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge, @@ -221,6 +222,19 @@ pub fn start_timer(histogram: &Result) -> Option { } } +/// Starts a timer on `vec` with the given `name`. +pub fn observe_timer_vec(vec: &Result, name: &[&str], duration: Duration) { + // This conversion was taken from here: + // + // https://docs.rs/prometheus/0.5.0/src/prometheus/histogram.rs.html#550-555 + let nanos = f64::from(duration.subsec_nanos()) / 1e9; + let secs = duration.as_secs() as f64 + nanos; + + if let Some(h) = get_histogram(vec, name) { + h.observe(secs) + } +} + /// Stops a timer created with `start_timer(..)`. pub fn stop_timer(timer: Option) { if let Some(t) = timer { diff --git a/common/remote_beacon_node/Cargo.toml b/common/remote_beacon_node/Cargo.toml deleted file mode 100644 index 38ee8c7ca58..00000000000 --- a/common/remote_beacon_node/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "remote_beacon_node" -version = "0.2.0" -authors = ["Paul Hauner "] -edition = "2018" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -reqwest = { version = "0.10.4", features = ["json", "native-tls-vendored"] } -url = "2.1.1" -serde = "1.0.110" -futures = "0.3.5" -types = { path = "../../consensus/types" } -rest_types = { path = "../rest_types" } -hex = "0.4.2" -eth2_ssz = "0.1.2" -serde_json = "1.0.52" -eth2_config = { path = "../eth2_config" } -proto_array = { path = "../../consensus/proto_array" } -operation_pool = { path = "../../beacon_node/operation_pool" } diff --git a/common/remote_beacon_node/src/lib.rs b/common/remote_beacon_node/src/lib.rs deleted file mode 100644 index 199efefd9da..00000000000 --- a/common/remote_beacon_node/src/lib.rs +++ /dev/null @@ -1,732 +0,0 @@ -//! Provides a `RemoteBeaconNode` which interacts with a HTTP API on another Lighthouse (or -//! compatible) instance. -//! -//! Presently, this is only used for testing but it _could_ become a user-facing library. - -use eth2_config::Eth2Config; -use reqwest::{Client, ClientBuilder, Response, StatusCode}; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use ssz::Encode; -use std::marker::PhantomData; -use std::time::Duration; -use types::{ - Attestation, AttestationData, AttesterSlashing, BeaconBlock, BeaconState, CommitteeIndex, - Epoch, EthSpec, Fork, Graffiti, Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, - Signature, SignedAggregateAndProof, SignedBeaconBlock, Slot, SubnetId, -}; -use url::Url; - -pub use operation_pool::PersistedOperationPool; -pub use proto_array::core::ProtoArray; -pub use rest_types::{ - CanonicalHeadResponse, Committee, HeadBeaconBlock, Health, IndividualVotesRequest, - IndividualVotesResponse, SyncingResponse, ValidatorDutiesRequest, ValidatorDutyBytes, - ValidatorRequest, ValidatorResponse, ValidatorSubscription, -}; - -// Setting a long timeout for debug ensures that crypto-heavy operations can still succeed. -#[cfg(debug_assertions)] -pub const REQUEST_TIMEOUT_SECONDS: u64 = 15; - -#[cfg(not(debug_assertions))] -pub const REQUEST_TIMEOUT_SECONDS: u64 = 5; - -#[derive(Clone)] -/// Connects to a remote Lighthouse (or compatible) node via HTTP. -pub struct RemoteBeaconNode { - pub http: HttpClient, -} - -impl RemoteBeaconNode { - /// Uses the default HTTP timeout. - pub fn new(http_endpoint: String) -> Result { - Self::new_with_timeout(http_endpoint, Duration::from_secs(REQUEST_TIMEOUT_SECONDS)) - } - - pub fn new_with_timeout(http_endpoint: String, timeout: Duration) -> Result { - Ok(Self { - http: HttpClient::new(http_endpoint, timeout) - .map_err(|e| format!("Unable to create http client: {:?}", e))?, - }) - } -} - -#[derive(Debug)] -pub enum Error { - /// Unable to parse a URL. Check the server URL. - UrlParseError(url::ParseError), - /// The `reqwest` library returned an error. - ReqwestError(reqwest::Error), - /// There was an error when encoding/decoding an object using serde. - SerdeJsonError(serde_json::Error), - /// The server responded to the request, however it did not return a 200-type success code. - DidNotSucceed { status: StatusCode, body: String }, - /// The request input was invalid. - InvalidInput, -} - -#[derive(Clone)] -pub struct HttpClient { - client: Client, - url: Url, - timeout: Duration, - _phantom: PhantomData, -} - -impl HttpClient { - /// Creates a new instance (without connecting to the node). - pub fn new(server_url: String, timeout: Duration) -> Result { - Ok(Self { - client: ClientBuilder::new() - .timeout(timeout) - .build() - .expect("should build from static configuration"), - url: Url::parse(&server_url)?, - timeout: Duration::from_secs(15), - _phantom: PhantomData, - }) - } - - pub fn beacon(&self) -> Beacon { - Beacon(self.clone()) - } - - pub fn validator(&self) -> Validator { - Validator(self.clone()) - } - - pub fn spec(&self) -> Spec { - Spec(self.clone()) - } - - pub fn node(&self) -> Node { - Node(self.clone()) - } - - pub fn advanced(&self) -> Advanced { - Advanced(self.clone()) - } - - pub fn consensus(&self) -> Consensus { - Consensus(self.clone()) - } - - fn url(&self, path: &str) -> Result { - self.url.join(path).map_err(|e| e.into()) - } - - pub async fn json_post(&self, url: Url, body: T) -> Result { - self.client - .post(&url.to_string()) - .json(&body) - .send() - .await - .map_err(Error::from) - } - - pub async fn json_get( - &self, - mut url: Url, - query_pairs: Vec<(String, String)>, - ) -> Result { - query_pairs.into_iter().for_each(|(key, param)| { - url.query_pairs_mut().append_pair(&key, ¶m); - }); - - let response = self - .client - .get(&url.to_string()) - .send() - .await - .map_err(Error::from)?; - - let success = error_for_status(response).await.map_err(Error::from)?; - success.json::().await.map_err(Error::from) - } -} - -/// Returns an `Error` (with a description) if the `response` was not a 200-type success response. -/// -/// Distinct from `Response::error_for_status` because it includes the body of the response as -/// text. This ensures the error message from the server is not discarded. -async fn error_for_status(response: Response) -> Result { - let status = response.status(); - - if status.is_success() { - Ok(response) - } else { - let text_result = response.text().await; - match text_result { - Err(e) => Err(Error::ReqwestError(e)), - Ok(body) => Err(Error::DidNotSucceed { status, body }), - } - } -} - -#[derive(Debug, PartialEq, Clone)] -pub enum PublishStatus { - /// The object was valid and has been published to the network. - Valid, - /// The object was not valid and may or may not have been published to the network. - Invalid(String), - /// The server responded with an unknown status code. The object may or may not have been - /// published to the network. - Unknown, -} - -impl PublishStatus { - /// Returns `true` if `*self == PublishStatus::Valid`. - pub fn is_valid(&self) -> bool { - *self == PublishStatus::Valid - } -} - -/// Provides the functions on the `/validator` endpoint of the node. -#[derive(Clone)] -pub struct Validator(HttpClient); - -impl Validator { - fn url(&self, path: &str) -> Result { - self.0 - .url("validator/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - /// Produces an unsigned attestation. - pub async fn produce_attestation( - &self, - slot: Slot, - committee_index: CommitteeIndex, - ) -> Result, Error> { - let query_params = vec![ - ("slot".into(), format!("{}", slot)), - ("committee_index".into(), format!("{}", committee_index)), - ]; - - let client = self.0.clone(); - let url = self.url("attestation")?; - client.json_get(url, query_params).await - } - - /// Produces an aggregate attestation. - pub async fn produce_aggregate_attestation( - &self, - attestation_data: &AttestationData, - ) -> Result, Error> { - let query_params = vec![( - "attestation_data".into(), - as_ssz_hex_string(attestation_data), - )]; - - let client = self.0.clone(); - let url = self.url("aggregate_attestation")?; - client.json_get(url, query_params).await - } - - /// Posts a list of attestations to the beacon node, expecting it to verify it and publish it to the network. - pub async fn publish_attestations( - &self, - attestation: Vec<(Attestation, SubnetId)>, - ) -> Result { - let client = self.0.clone(); - let url = self.url("attestations")?; - let response = client.json_post::<_>(url, attestation).await?; - - match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( - response.text().await.map_err(Error::from)?, - )), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - } - } - - /// Posts a list of signed aggregates and proofs to the beacon node, expecting it to verify it and publish it to the network. - pub async fn publish_aggregate_and_proof( - &self, - signed_aggregate_and_proofs: Vec>, - ) -> Result { - let client = self.0.clone(); - let url = self.url("aggregate_and_proofs")?; - let response = client - .json_post::<_>(url, signed_aggregate_and_proofs) - .await?; - - match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( - response.text().await.map_err(Error::from)?, - )), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - } - } - - /// Returns the duties required of the given validator pubkeys in the given epoch. - pub async fn get_duties( - &self, - epoch: Epoch, - validator_pubkeys: &[PublicKey], - ) -> Result, Error> { - let client = self.0.clone(); - - let bulk_request = ValidatorDutiesRequest { - epoch, - pubkeys: validator_pubkeys - .iter() - .map(|pubkey| pubkey.clone().into()) - .collect(), - }; - - let url = self.url("duties")?; - let response = client.json_post::<_>(url, bulk_request).await?; - let success = error_for_status(response).await.map_err(Error::from)?; - success.json().await.map_err(Error::from) - } - - /// Posts a block to the beacon node, expecting it to verify it and publish it to the network. - pub async fn publish_block(&self, block: SignedBeaconBlock) -> Result { - let client = self.0.clone(); - let url = self.url("block")?; - let response = client.json_post::<_>(url, block).await?; - - match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( - response.text().await.map_err(Error::from)?, - )), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - } - } - - /// Requests a new (unsigned) block from the beacon node. - pub async fn produce_block( - &self, - slot: Slot, - randao_reveal: Signature, - graffiti: Option, - ) -> Result, Error> { - let client = self.0.clone(); - let url = self.url("block")?; - - let mut query_pairs = vec![ - ("slot".into(), format!("{}", slot.as_u64())), - ("randao_reveal".into(), as_ssz_hex_string(&randao_reveal)), - ]; - - if let Some(graffiti_bytes) = graffiti { - query_pairs.push(("graffiti".into(), as_ssz_hex_string(&graffiti_bytes))); - } - - client.json_get::>(url, query_pairs).await - } - - /// Subscribes a list of validators to particular slots for attestation production/publication. - pub async fn subscribe( - &self, - subscriptions: Vec, - ) -> Result { - let client = self.0.clone(); - let url = self.url("subscribe")?; - let response = client.json_post::<_>(url, subscriptions).await?; - - match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( - response.text().await.map_err(Error::from)?, - )), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - } - } -} - -/// Provides the functions on the `/beacon` endpoint of the node. -#[derive(Clone)] -pub struct Beacon(HttpClient); - -impl Beacon { - fn url(&self, path: &str) -> Result { - self.0 - .url("beacon/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - /// Returns the genesis time. - pub async fn get_genesis_time(&self) -> Result { - let client = self.0.clone(); - let url = self.url("genesis_time")?; - client.json_get(url, vec![]).await - } - - /// Returns the genesis validators root. - pub async fn get_genesis_validators_root(&self) -> Result { - let client = self.0.clone(); - let url = self.url("genesis_validators_root")?; - client.json_get(url, vec![]).await - } - - /// Returns the fork at the head of the beacon chain. - pub async fn get_fork(&self) -> Result { - let client = self.0.clone(); - let url = self.url("fork")?; - client.json_get(url, vec![]).await - } - - /// Returns info about the head of the canonical beacon chain. - pub async fn get_head(&self) -> Result { - let client = self.0.clone(); - let url = self.url("head")?; - client.json_get::(url, vec![]).await - } - - /// Returns the set of known beacon chain head blocks. One of these will be the canonical head. - pub async fn get_heads(&self) -> Result, Error> { - let client = self.0.clone(); - let url = self.url("heads")?; - client.json_get(url, vec![]).await - } - - /// Returns the block and block root at the given slot. - pub async fn get_block_by_slot( - &self, - slot: Slot, - ) -> Result<(SignedBeaconBlock, Hash256), Error> { - self.get_block("slot".to_string(), format!("{}", slot.as_u64())) - .await - } - - /// Returns the block and block root at the given root. - pub async fn get_block_by_root( - &self, - root: Hash256, - ) -> Result<(SignedBeaconBlock, Hash256), Error> { - self.get_block("root".to_string(), root_as_string(root)) - .await - } - - /// Returns the block and block root at the given slot. - async fn get_block( - &self, - query_key: String, - query_param: String, - ) -> Result<(SignedBeaconBlock, Hash256), Error> { - let client = self.0.clone(); - let url = self.url("block")?; - client - .json_get::>(url, vec![(query_key, query_param)]) - .await - .map(|response| (response.beacon_block, response.root)) - } - - /// Returns the state and state root at the given slot. - pub async fn get_state_by_slot(&self, slot: Slot) -> Result<(BeaconState, Hash256), Error> { - self.get_state("slot".to_string(), format!("{}", slot.as_u64())) - .await - } - - /// Returns the state and state root at the given root. - pub async fn get_state_by_root( - &self, - root: Hash256, - ) -> Result<(BeaconState, Hash256), Error> { - self.get_state("root".to_string(), root_as_string(root)) - .await - } - - /// Returns the root of the state at the given slot. - pub async fn get_state_root(&self, slot: Slot) -> Result { - let client = self.0.clone(); - let url = self.url("state_root")?; - client - .json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))]) - .await - } - - /// Returns the root of the block at the given slot. - pub async fn get_block_root(&self, slot: Slot) -> Result { - let client = self.0.clone(); - let url = self.url("block_root")?; - client - .json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))]) - .await - } - - /// Returns the state and state root at the given slot. - async fn get_state( - &self, - query_key: String, - query_param: String, - ) -> Result<(BeaconState, Hash256), Error> { - let client = self.0.clone(); - let url = self.url("state")?; - client - .json_get::>(url, vec![(query_key, query_param)]) - .await - .map(|response| (response.beacon_state, response.root)) - } - - /// Returns the block and block root at the given slot. - /// - /// If `state_root` is `Some`, the query will use the given state instead of the default - /// canonical head state. - pub async fn get_validators( - &self, - validator_pubkeys: Vec, - state_root: Option, - ) -> Result, Error> { - let client = self.0.clone(); - - let bulk_request = ValidatorRequest { - state_root, - pubkeys: validator_pubkeys - .iter() - .map(|pubkey| pubkey.clone().into()) - .collect(), - }; - - let url = self.url("validators")?; - let response = client.json_post::<_>(url, bulk_request).await?; - let success = error_for_status(response).await.map_err(Error::from)?; - success.json().await.map_err(Error::from) - } - - /// Returns all validators. - /// - /// If `state_root` is `Some`, the query will use the given state instead of the default - /// canonical head state. - pub async fn get_all_validators( - &self, - state_root: Option, - ) -> Result, Error> { - let client = self.0.clone(); - - let query_params = if let Some(state_root) = state_root { - vec![("state_root".into(), root_as_string(state_root))] - } else { - vec![] - }; - - let url = self.url("validators/all")?; - client.json_get(url, query_params).await - } - - /// Returns the active validators. - /// - /// If `state_root` is `Some`, the query will use the given state instead of the default - /// canonical head state. - pub async fn get_active_validators( - &self, - state_root: Option, - ) -> Result, Error> { - let client = self.0.clone(); - - let query_params = if let Some(state_root) = state_root { - vec![("state_root".into(), root_as_string(state_root))] - } else { - vec![] - }; - - let url = self.url("validators/active")?; - client.json_get(url, query_params).await - } - - /// Returns committees at the given epoch. - pub async fn get_committees(&self, epoch: Epoch) -> Result, Error> { - let client = self.0.clone(); - - let url = self.url("committees")?; - client - .json_get(url, vec![("epoch".into(), format!("{}", epoch.as_u64()))]) - .await - } - - pub async fn proposer_slashing( - &self, - proposer_slashing: ProposerSlashing, - ) -> Result { - let client = self.0.clone(); - - let url = self.url("proposer_slashing")?; - let response = client.json_post::<_>(url, proposer_slashing).await?; - let success = error_for_status(response).await.map_err(Error::from)?; - success.json().await.map_err(Error::from) - } - - pub async fn attester_slashing( - &self, - attester_slashing: AttesterSlashing, - ) -> Result { - let client = self.0.clone(); - - let url = self.url("attester_slashing")?; - let response = client.json_post::<_>(url, attester_slashing).await?; - let success = error_for_status(response).await.map_err(Error::from)?; - success.json().await.map_err(Error::from) - } -} - -/// Provides the functions on the `/spec` endpoint of the node. -#[derive(Clone)] -pub struct Spec(HttpClient); - -impl Spec { - fn url(&self, path: &str) -> Result { - self.0 - .url("spec/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - pub async fn get_eth2_config(&self) -> Result { - let client = self.0.clone(); - let url = self.url("eth2_config")?; - client.json_get(url, vec![]).await - } -} - -/// Provides the functions on the `/node` endpoint of the node. -#[derive(Clone)] -pub struct Node(HttpClient); - -impl Node { - fn url(&self, path: &str) -> Result { - self.0 - .url("node/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - pub async fn get_version(&self) -> Result { - let client = self.0.clone(); - let url = self.url("version")?; - client.json_get(url, vec![]).await - } - - pub async fn get_health(&self) -> Result { - let client = self.0.clone(); - let url = self.url("health")?; - client.json_get(url, vec![]).await - } - - pub async fn syncing_status(&self) -> Result { - let client = self.0.clone(); - let url = self.url("syncing")?; - client.json_get(url, vec![]).await - } -} - -/// Provides the functions on the `/advanced` endpoint of the node. -#[derive(Clone)] -pub struct Advanced(HttpClient); - -impl Advanced { - fn url(&self, path: &str) -> Result { - self.0 - .url("advanced/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - /// Gets the core `ProtoArray` struct from the node. - pub async fn get_fork_choice(&self) -> Result { - let client = self.0.clone(); - let url = self.url("fork_choice")?; - client.json_get(url, vec![]).await - } - - /// Gets the core `PersistedOperationPool` struct from the node. - pub async fn get_operation_pool(&self) -> Result, Error> { - let client = self.0.clone(); - let url = self.url("operation_pool")?; - client.json_get(url, vec![]).await - } -} - -/// Provides the functions on the `/consensus` endpoint of the node. -#[derive(Clone)] -pub struct Consensus(HttpClient); - -impl Consensus { - fn url(&self, path: &str) -> Result { - self.0 - .url("consensus/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - /// Gets a `IndividualVote` for each of the given `pubkeys`. - pub async fn get_individual_votes( - &self, - epoch: Epoch, - pubkeys: Vec, - ) -> Result { - let client = self.0.clone(); - let req_body = IndividualVotesRequest { epoch, pubkeys }; - - let url = self.url("individual_votes")?; - let response = client.json_post::<_>(url, req_body).await?; - let success = error_for_status(response).await.map_err(Error::from)?; - success.json().await.map_err(Error::from) - } - - /// Gets a `VoteCount` for the given `epoch`. - pub async fn get_vote_count(&self, epoch: Epoch) -> Result { - let client = self.0.clone(); - let query_params = vec![("epoch".into(), format!("{}", epoch.as_u64()))]; - let url = self.url("vote_count")?; - client.json_get(url, query_params).await - } -} - -#[derive(Deserialize)] -#[serde(bound = "T: EthSpec")] -pub struct BlockResponse { - pub beacon_block: SignedBeaconBlock, - pub root: Hash256, -} - -#[derive(Deserialize)] -#[serde(bound = "T: EthSpec")] -pub struct StateResponse { - pub beacon_state: BeaconState, - pub root: Hash256, -} - -fn root_as_string(root: Hash256) -> String { - format!("0x{:?}", root) -} - -fn as_ssz_hex_string(item: &T) -> String { - format!("0x{}", hex::encode(item.as_ssz_bytes())) -} - -impl From for Error { - fn from(e: reqwest::Error) -> Error { - Error::ReqwestError(e) - } -} - -impl From for Error { - fn from(e: url::ParseError) -> Error { - Error::UrlParseError(e) - } -} - -impl From for Error { - fn from(e: serde_json::Error) -> Error { - Error::SerdeJsonError(e) - } -} diff --git a/common/rest_types/Cargo.toml b/common/rest_types/Cargo.toml deleted file mode 100644 index d9e021fe19d..00000000000 --- a/common/rest_types/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "rest_types" -version = "0.2.0" -authors = ["Sigma Prime "] -edition = "2018" - -[dependencies] -types = { path = "../../consensus/types" } -eth2_ssz_derive = "0.1.0" -eth2_ssz = "0.1.2" -eth2_hashing = "0.1.0" -tree_hash = "0.1.0" -state_processing = { path = "../../consensus/state_processing" } -bls = { path = "../../crypto/bls" } -serde = { version = "1.0.110", features = ["derive"] } -rayon = "1.3.0" -hyper = "0.13.5" -tokio = { version = "0.2.21", features = ["sync"] } -environment = { path = "../../lighthouse/environment" } -store = { path = "../../beacon_node/store" } -beacon_chain = { path = "../../beacon_node/beacon_chain" } -serde_json = "1.0.52" -serde_yaml = "0.8.11" - -[target.'cfg(target_os = "linux")'.dependencies] -psutil = "3.1.0" -procinfo = "0.4.2" diff --git a/common/rest_types/src/api_error.rs b/common/rest_types/src/api_error.rs deleted file mode 100644 index 1eac8d4a468..00000000000 --- a/common/rest_types/src/api_error.rs +++ /dev/null @@ -1,99 +0,0 @@ -use hyper::{Body, Response, StatusCode}; -use std::error::Error as StdError; - -#[derive(PartialEq, Debug, Clone)] -pub enum ApiError { - MethodNotAllowed(String), - ServerError(String), - NotImplemented(String), - BadRequest(String), - NotFound(String), - UnsupportedType(String), - ImATeapot(String), // Just in case. - ProcessingError(String), // A 202 error, for when a block/attestation cannot be processed, but still transmitted. - InvalidHeaderValue(String), -} - -pub type ApiResult = Result, ApiError>; - -impl ApiError { - pub fn status_code(self) -> (StatusCode, String) { - match self { - ApiError::MethodNotAllowed(desc) => (StatusCode::METHOD_NOT_ALLOWED, desc), - ApiError::ServerError(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc), - ApiError::NotImplemented(desc) => (StatusCode::NOT_IMPLEMENTED, desc), - ApiError::BadRequest(desc) => (StatusCode::BAD_REQUEST, desc), - ApiError::NotFound(desc) => (StatusCode::NOT_FOUND, desc), - ApiError::UnsupportedType(desc) => (StatusCode::UNSUPPORTED_MEDIA_TYPE, desc), - ApiError::ImATeapot(desc) => (StatusCode::IM_A_TEAPOT, desc), - ApiError::ProcessingError(desc) => (StatusCode::ACCEPTED, desc), - ApiError::InvalidHeaderValue(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc), - } - } -} - -impl Into> for ApiError { - fn into(self) -> Response { - let (status_code, desc) = self.status_code(); - Response::builder() - .status(status_code) - .header("content-type", "text/plain; charset=utf-8") - .body(Body::from(desc)) - .expect("Response should always be created.") - } -} - -impl From for ApiError { - fn from(e: store::Error) -> ApiError { - ApiError::ServerError(format!("Database error: {:?}", e)) - } -} - -impl From for ApiError { - fn from(e: types::BeaconStateError) -> ApiError { - ApiError::ServerError(format!("BeaconState error: {:?}", e)) - } -} - -impl From for ApiError { - fn from(e: beacon_chain::BeaconChainError) -> ApiError { - ApiError::ServerError(format!("BeaconChainError error: {:?}", e)) - } -} - -impl From for ApiError { - fn from(e: state_processing::per_slot_processing::Error) -> ApiError { - ApiError::ServerError(format!("PerSlotProcessing error: {:?}", e)) - } -} - -impl From for ApiError { - fn from(e: hyper::error::Error) -> ApiError { - ApiError::ServerError(format!("Networking error: {:?}", e)) - } -} - -impl From for ApiError { - fn from(e: std::io::Error) -> ApiError { - ApiError::ServerError(format!("IO error: {:?}", e)) - } -} - -impl From for ApiError { - fn from(e: hyper::header::InvalidHeaderValue) -> ApiError { - ApiError::InvalidHeaderValue(format!("Invalid CORS header value: {:?}", e)) - } -} - -impl StdError for ApiError { - fn cause(&self) -> Option<&dyn StdError> { - None - } -} - -impl std::fmt::Display for ApiError { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let status = self.clone().status_code(); - write!(f, "{:?}: {:?}", status.0, status.1) - } -} diff --git a/common/rest_types/src/beacon.rs b/common/rest_types/src/beacon.rs deleted file mode 100644 index 0a141ea282a..00000000000 --- a/common/rest_types/src/beacon.rs +++ /dev/null @@ -1,65 +0,0 @@ -//! A collection of REST API types for interaction with the beacon node. - -use bls::PublicKeyBytes; -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use types::beacon_state::EthSpec; -use types::{BeaconState, CommitteeIndex, Hash256, SignedBeaconBlock, Slot, Validator}; - -/// Information about a block that is at the head of a chain. May or may not represent the -/// canonical head. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct HeadBeaconBlock { - pub beacon_block_root: Hash256, - pub beacon_block_slot: Slot, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -#[serde(bound = "T: EthSpec")] -pub struct BlockResponse { - pub root: Hash256, - pub beacon_block: SignedBeaconBlock, -} - -/// Information about the block and state that are at head of the beacon chain. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct CanonicalHeadResponse { - pub slot: Slot, - pub block_root: Hash256, - pub state_root: Hash256, - pub finalized_slot: Slot, - pub finalized_block_root: Hash256, - pub justified_slot: Slot, - pub justified_block_root: Hash256, - pub previous_justified_slot: Slot, - pub previous_justified_block_root: Hash256, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct ValidatorResponse { - pub pubkey: PublicKeyBytes, - pub validator_index: Option, - pub balance: Option, - pub validator: Option, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct ValidatorRequest { - /// If set to `None`, uses the canonical head state. - pub state_root: Option, - pub pubkeys: Vec, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct Committee { - pub slot: Slot, - pub index: CommitteeIndex, - pub committee: Vec, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -#[serde(bound = "T: EthSpec")] -pub struct StateResponse { - pub root: Hash256, - pub beacon_state: BeaconState, -} diff --git a/common/rest_types/src/consensus.rs b/common/rest_types/src/consensus.rs deleted file mode 100644 index 519b1ae247c..00000000000 --- a/common/rest_types/src/consensus.rs +++ /dev/null @@ -1,66 +0,0 @@ -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use state_processing::per_epoch_processing::ValidatorStatus; -use types::{Epoch, PublicKeyBytes}; - -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct IndividualVotesRequest { - pub epoch: Epoch, - pub pubkeys: Vec, -} - -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct IndividualVote { - /// True if the validator has been slashed, ever. - pub is_slashed: bool, - /// True if the validator can withdraw in the current epoch. - pub is_withdrawable_in_current_epoch: bool, - /// True if the validator was active in the state's _current_ epoch. - pub is_active_in_current_epoch: bool, - /// True if the validator was active in the state's _previous_ epoch. - pub is_active_in_previous_epoch: bool, - /// The validator's effective balance in the _current_ epoch. - pub current_epoch_effective_balance_gwei: u64, - /// True if the validator had an attestation included in the _current_ epoch. - pub is_current_epoch_attester: bool, - /// True if the validator's beacon block root attestation for the first slot of the _current_ - /// epoch matches the block root known to the state. - pub is_current_epoch_target_attester: bool, - /// True if the validator had an attestation included in the _previous_ epoch. - pub is_previous_epoch_attester: bool, - /// True if the validator's beacon block root attestation for the first slot of the _previous_ - /// epoch matches the block root known to the state. - pub is_previous_epoch_target_attester: bool, - /// True if the validator's beacon block root attestation in the _previous_ epoch at the - /// attestation's slot (`attestation_data.slot`) matches the block root known to the state. - pub is_previous_epoch_head_attester: bool, -} - -impl Into for ValidatorStatus { - fn into(self) -> IndividualVote { - IndividualVote { - is_slashed: self.is_slashed, - is_withdrawable_in_current_epoch: self.is_withdrawable_in_current_epoch, - is_active_in_current_epoch: self.is_active_in_current_epoch, - is_active_in_previous_epoch: self.is_active_in_previous_epoch, - current_epoch_effective_balance_gwei: self.current_epoch_effective_balance, - is_current_epoch_attester: self.is_current_epoch_attester, - is_current_epoch_target_attester: self.is_current_epoch_target_attester, - is_previous_epoch_attester: self.is_previous_epoch_attester, - is_previous_epoch_target_attester: self.is_previous_epoch_target_attester, - is_previous_epoch_head_attester: self.is_previous_epoch_head_attester, - } - } -} - -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct IndividualVotesResponse { - /// The epoch which is considered the "current" epoch. - pub epoch: Epoch, - /// The validators public key. - pub pubkey: PublicKeyBytes, - /// The index of the validator in state.validators. - pub validator_index: Option, - /// Voting statistics for the validator, if they voted in the given epoch. - pub vote: Option, -} diff --git a/common/rest_types/src/handler.rs b/common/rest_types/src/handler.rs deleted file mode 100644 index cbbcd73b19a..00000000000 --- a/common/rest_types/src/handler.rs +++ /dev/null @@ -1,247 +0,0 @@ -use crate::{ApiError, ApiResult}; -use environment::TaskExecutor; -use hyper::header; -use hyper::{Body, Request, Response, StatusCode}; -use serde::Deserialize; -use serde::Serialize; -use ssz::Encode; - -/// Defines the encoding for the API. -#[derive(Clone, Serialize, Deserialize, Copy)] -pub enum ApiEncodingFormat { - JSON, - YAML, - SSZ, -} - -impl ApiEncodingFormat { - pub fn get_content_type(&self) -> &str { - match self { - ApiEncodingFormat::JSON => "application/json", - ApiEncodingFormat::YAML => "application/yaml", - ApiEncodingFormat::SSZ => "application/ssz", - } - } -} - -impl From<&str> for ApiEncodingFormat { - fn from(f: &str) -> ApiEncodingFormat { - match f { - "application/yaml" => ApiEncodingFormat::YAML, - "application/ssz" => ApiEncodingFormat::SSZ, - _ => ApiEncodingFormat::JSON, - } - } -} - -/// Provides a HTTP request handler with Lighthouse-specific functionality. -pub struct Handler { - executor: TaskExecutor, - req: Request<()>, - body: Body, - ctx: T, - encoding: ApiEncodingFormat, - allow_body: bool, -} - -impl Handler { - /// Start handling a new request. - pub fn new(req: Request, ctx: T, executor: TaskExecutor) -> Result { - let (req_parts, body) = req.into_parts(); - let req = Request::from_parts(req_parts, ()); - - let accept_header: String = req - .headers() - .get(header::ACCEPT) - .map_or(Ok(""), |h| h.to_str()) - .map_err(|e| { - ApiError::BadRequest(format!( - "The Accept header contains invalid characters: {:?}", - e - )) - }) - .map(String::from)?; - - Ok(Self { - executor, - req, - body, - ctx, - allow_body: false, - encoding: ApiEncodingFormat::from(accept_header.as_str()), - }) - } - - /// The default behaviour is to return an error if any body is supplied in the request. Calling - /// this function disables that error. - pub fn allow_body(mut self) -> Self { - self.allow_body = true; - self - } - - /// Return a simple static value. - /// - /// Does not use the blocking executor. - pub async fn static_value(self, value: V) -> Result, ApiError> { - // Always check and disallow a body for a static value. - let _ = Self::get_body(self.body, false).await?; - - Ok(HandledRequest { - value, - encoding: self.encoding, - }) - } - - /// Calls `func` in-line, on the core executor. - /// - /// This should only be used for very fast tasks. - pub async fn in_core_task(self, func: F) -> Result, ApiError> - where - V: Send + Sync + 'static, - F: Fn(Request>, T) -> Result + Send + Sync + 'static, - { - let body = Self::get_body(self.body, self.allow_body).await?; - let (req_parts, _) = self.req.into_parts(); - let req = Request::from_parts(req_parts, body); - - let value = func(req, self.ctx)?; - - Ok(HandledRequest { - value, - encoding: self.encoding, - }) - } - - /// Spawns `func` on the blocking executor. - /// - /// This method is suitable for handling long-running or intensive tasks. - pub async fn in_blocking_task(self, func: F) -> Result, ApiError> - where - V: Send + Sync + 'static, - F: Fn(Request>, T) -> Result + Send + Sync + 'static, - { - let ctx = self.ctx; - let body = Self::get_body(self.body, self.allow_body).await?; - let (req_parts, _) = self.req.into_parts(); - let req = Request::from_parts(req_parts, body); - - let value = self - .executor - .clone() - .handle - .spawn_blocking(move || func(req, ctx)) - .await - .map_err(|e| { - ApiError::ServerError(format!( - "Failed to get blocking join handle: {}", - e.to_string() - )) - })??; - - Ok(HandledRequest { - value, - encoding: self.encoding, - }) - } - - /// Call `func`, then return a response that is suitable for an SSE stream. - pub async fn sse_stream(self, func: F) -> ApiResult - where - F: Fn(Request<()>, T) -> Result, - { - let body = func(self.req, self.ctx)?; - - Response::builder() - .status(200) - .header("Content-Type", "text/event-stream") - .header("Connection", "Keep-Alive") - .header("Cache-Control", "no-cache") - .header("Access-Control-Allow-Origin", "*") - .body(body) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) - } - - /// Downloads the bytes for `body`. - async fn get_body(body: Body, allow_body: bool) -> Result, ApiError> { - let bytes = hyper::body::to_bytes(body) - .await - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; - - if !allow_body && !bytes[..].is_empty() { - Err(ApiError::BadRequest( - "The request body must be empty".to_string(), - )) - } else { - Ok(bytes.into_iter().collect()) - } - } -} - -/// A request that has been "handled" and now a result (`value`) needs to be serialize and -/// returned. -pub struct HandledRequest { - encoding: ApiEncodingFormat, - value: V, -} - -impl HandledRequest { - /// Simple encode a string as utf-8. - pub fn text_encoding(self) -> ApiResult { - Response::builder() - .status(StatusCode::OK) - .header("content-type", "text/plain; charset=utf-8") - .body(Body::from(self.value)) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) - } -} - -impl HandledRequest { - /// Suitable for all items which implement `serde` and `ssz`. - pub fn all_encodings(self) -> ApiResult { - match self.encoding { - ApiEncodingFormat::SSZ => Response::builder() - .status(StatusCode::OK) - .header("content-type", "application/ssz") - .body(Body::from(self.value.as_ssz_bytes())) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))), - _ => self.serde_encodings(), - } - } -} - -impl HandledRequest { - /// Suitable for items which only implement `serde`. - pub fn serde_encodings(self) -> ApiResult { - let (body, content_type) = match self.encoding { - ApiEncodingFormat::JSON => ( - Body::from(serde_json::to_string(&self.value).map_err(|e| { - ApiError::ServerError(format!( - "Unable to serialize response body as JSON: {:?}", - e - )) - })?), - "application/json", - ), - ApiEncodingFormat::SSZ => { - return Err(ApiError::UnsupportedType( - "Response cannot be encoded as SSZ.".into(), - )); - } - ApiEncodingFormat::YAML => ( - Body::from(serde_yaml::to_string(&self.value).map_err(|e| { - ApiError::ServerError(format!( - "Unable to serialize response body as YAML: {:?}", - e - )) - })?), - "application/yaml", - ), - }; - - Response::builder() - .status(StatusCode::OK) - .header("content-type", content_type) - .body(body) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) - } -} diff --git a/common/rest_types/src/lib.rs b/common/rest_types/src/lib.rs deleted file mode 100644 index 1bedd1cadbc..00000000000 --- a/common/rest_types/src/lib.rs +++ /dev/null @@ -1,22 +0,0 @@ -//! A collection of types used to pass data across the rest HTTP API. -//! -//! This is primarily used by the validator client and the beacon node rest API. - -mod api_error; -mod beacon; -mod consensus; -mod handler; -mod node; -mod validator; - -pub use api_error::{ApiError, ApiResult}; -pub use beacon::{ - BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse, - ValidatorRequest, ValidatorResponse, -}; -pub use consensus::{IndividualVote, IndividualVotesRequest, IndividualVotesResponse}; -pub use handler::{ApiEncodingFormat, Handler}; -pub use node::{Health, SyncingResponse, SyncingStatus}; -pub use validator::{ - ValidatorDutiesRequest, ValidatorDuty, ValidatorDutyBytes, ValidatorSubscription, -}; diff --git a/common/rest_types/src/node.rs b/common/rest_types/src/node.rs deleted file mode 100644 index ca98645cc8c..00000000000 --- a/common/rest_types/src/node.rs +++ /dev/null @@ -1,103 +0,0 @@ -//! Collection of types for the /node HTTP -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use types::Slot; - -#[cfg(target_os = "linux")] -use {procinfo::pid, psutil::process::Process}; - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -/// The current syncing status of the node. -pub struct SyncingStatus { - /// The starting slot of sync. - /// - /// For a finalized sync, this is the start slot of the current finalized syncing - /// chain. - /// - /// For head sync this is the last finalized slot. - pub starting_slot: Slot, - /// The current slot. - pub current_slot: Slot, - /// The highest known slot. For the current syncing chain. - /// - /// For a finalized sync, the target finalized slot. - /// For head sync, this is the highest known slot of all head chains. - pub highest_slot: Slot, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -/// The response for the /node/syncing HTTP GET. -pub struct SyncingResponse { - /// Is the node syncing. - pub is_syncing: bool, - /// The current sync status. - pub sync_status: SyncingStatus, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -/// Reports on the health of the Lighthouse instance. -pub struct Health { - /// The pid of this process. - pub pid: u32, - /// The number of threads used by this pid. - pub pid_num_threads: i32, - /// The total resident memory used by this pid. - pub pid_mem_resident_set_size: u64, - /// The total virtual memory used by this pid. - pub pid_mem_virtual_memory_size: u64, - /// Total virtual memory on the system - pub sys_virt_mem_total: u64, - /// Total virtual memory available for new processes. - pub sys_virt_mem_available: u64, - /// Total virtual memory used on the system - pub sys_virt_mem_used: u64, - /// Total virtual memory not used on the system - pub sys_virt_mem_free: u64, - /// Percentage of virtual memory used on the system - pub sys_virt_mem_percent: f32, - /// System load average over 1 minute. - pub sys_loadavg_1: f64, - /// System load average over 5 minutes. - pub sys_loadavg_5: f64, - /// System load average over 15 minutes. - pub sys_loadavg_15: f64, -} - -impl Health { - #[cfg(not(target_os = "linux"))] - pub fn observe() -> Result { - Err("Health is only available on Linux".into()) - } - - #[cfg(target_os = "linux")] - pub fn observe() -> Result { - let process = - Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?; - - let process_mem = process - .memory_info() - .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; - - let stat = pid::stat_self().map_err(|e| format!("Unable to get stat: {:?}", e))?; - - let vm = psutil::memory::virtual_memory() - .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; - let loadavg = - psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; - - Ok(Self { - pid: process.pid(), - pid_num_threads: stat.num_threads, - pid_mem_resident_set_size: process_mem.rss(), - pid_mem_virtual_memory_size: process_mem.vms(), - sys_virt_mem_total: vm.total(), - sys_virt_mem_available: vm.available(), - sys_virt_mem_used: vm.used(), - sys_virt_mem_free: vm.free(), - sys_virt_mem_percent: vm.percent(), - sys_loadavg_1: loadavg.one, - sys_loadavg_5: loadavg.five, - sys_loadavg_15: loadavg.fifteen, - }) - } -} diff --git a/common/rest_types/src/validator.rs b/common/rest_types/src/validator.rs deleted file mode 100644 index 2b0f077298a..00000000000 --- a/common/rest_types/src/validator.rs +++ /dev/null @@ -1,103 +0,0 @@ -use bls::{PublicKey, PublicKeyBytes}; -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use types::{CommitteeIndex, Epoch, Slot}; - -/// A Validator duty with the validator public key represented a `PublicKeyBytes`. -pub type ValidatorDutyBytes = ValidatorDutyBase; -/// A validator duty with the pubkey represented as a `PublicKey`. -pub type ValidatorDuty = ValidatorDutyBase; - -// NOTE: if you add or remove fields, please adjust `eq_ignoring_proposal_slots` -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] -pub struct ValidatorDutyBase { - /// The validator's BLS public key, uniquely identifying them. - pub validator_pubkey: T, - /// The validator's index in `state.validators` - pub validator_index: Option, - /// The slot at which the validator must attest. - pub attestation_slot: Option, - /// The index of the committee within `slot` of which the validator is a member. - pub attestation_committee_index: Option, - /// The position of the validator in the committee. - pub attestation_committee_position: Option, - /// The committee count at `attestation_slot`. - pub committee_count_at_slot: Option, - /// The slots in which a validator must propose a block (can be empty). - /// - /// Should be set to `None` when duties are not yet known (before the current epoch). - pub block_proposal_slots: Option>, - /// This provides the modulo: `max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)` - /// which allows the validator client to determine if this duty requires the validator to be - /// aggregate attestations. - pub aggregator_modulo: Option, -} - -impl ValidatorDutyBase { - /// Return `true` if these validator duties are equal, ignoring their `block_proposal_slots`. - pub fn eq_ignoring_proposal_slots(&self, other: &Self) -> bool - where - T: PartialEq, - { - self.validator_pubkey == other.validator_pubkey - && self.validator_index == other.validator_index - && self.attestation_slot == other.attestation_slot - && self.attestation_committee_index == other.attestation_committee_index - && self.attestation_committee_position == other.attestation_committee_position - && self.committee_count_at_slot == other.committee_count_at_slot - && self.aggregator_modulo == other.aggregator_modulo - } -} - -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct ValidatorDutiesRequest { - pub epoch: Epoch, - pub pubkeys: Vec, -} - -/// A validator subscription, created when a validator subscribes to a slot to perform optional aggregation -/// duties. -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct ValidatorSubscription { - /// The validators index. - pub validator_index: u64, - /// The index of the committee within `slot` of which the validator is a member. Used by the - /// beacon node to quickly evaluate the associated `SubnetId`. - pub attestation_committee_index: CommitteeIndex, - /// The slot in which to subscribe. - pub slot: Slot, - /// Committee count at slot to subscribe. - pub committee_count_at_slot: u64, - /// If true, the validator is an aggregator and the beacon node should aggregate attestations - /// for this slot. - pub is_aggregator: bool, -} - -#[cfg(test)] -mod test { - use super::*; - use bls::SecretKey; - - #[test] - fn eq_ignoring_proposal_slots() { - let validator_pubkey = SecretKey::deserialize(&[1; 32]).unwrap().public_key(); - - let duty1 = ValidatorDuty { - validator_pubkey, - validator_index: Some(10), - attestation_slot: Some(Slot::new(50)), - attestation_committee_index: Some(2), - attestation_committee_position: Some(6), - committee_count_at_slot: Some(4), - block_proposal_slots: None, - aggregator_modulo: Some(99), - }; - let duty2 = ValidatorDuty { - block_proposal_slots: Some(vec![Slot::new(42), Slot::new(45)]), - ..duty1.clone() - }; - assert_ne!(duty1, duty2); - assert!(duty1.eq_ignoring_proposal_slots(&duty2)); - assert!(duty2.eq_ignoring_proposal_slots(&duty1)); - } -} diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml new file mode 100644 index 00000000000..292c84bd8a7 --- /dev/null +++ b/common/warp_utils/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "warp_utils" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +warp = "0.2.5" +eth2 = { path = "../eth2" } +types = { path = "../../consensus/types" } +beacon_chain = { path = "../../beacon_node/beacon_chain" } +state_processing = { path = "../../consensus/state_processing" } diff --git a/common/warp_utils/src/lib.rs b/common/warp_utils/src/lib.rs new file mode 100644 index 00000000000..ec9cf3c3442 --- /dev/null +++ b/common/warp_utils/src/lib.rs @@ -0,0 +1,5 @@ +//! This crate contains functions that are common across multiple `warp` HTTP servers in the +//! Lighthouse project. E.g., the `http_api` and `http_metrics` crates. + +pub mod reject; +pub mod reply; diff --git a/common/warp_utils/src/reject.rs b/common/warp_utils/src/reject.rs new file mode 100644 index 00000000000..a48ad56a855 --- /dev/null +++ b/common/warp_utils/src/reject.rs @@ -0,0 +1,147 @@ +use eth2::types::ErrorMessage; +use std::convert::Infallible; +use warp::{http::StatusCode, reject::Reject}; + +#[derive(Debug)] +pub struct BeaconChainError(pub beacon_chain::BeaconChainError); + +impl Reject for BeaconChainError {} + +pub fn beacon_chain_error(e: beacon_chain::BeaconChainError) -> warp::reject::Rejection { + warp::reject::custom(BeaconChainError(e)) +} + +#[derive(Debug)] +pub struct BeaconStateError(pub types::BeaconStateError); + +impl Reject for BeaconStateError {} + +pub fn beacon_state_error(e: types::BeaconStateError) -> warp::reject::Rejection { + warp::reject::custom(BeaconStateError(e)) +} + +#[derive(Debug)] +pub struct SlotProcessingError(pub state_processing::SlotProcessingError); + +impl Reject for SlotProcessingError {} + +pub fn slot_processing_error(e: state_processing::SlotProcessingError) -> warp::reject::Rejection { + warp::reject::custom(SlotProcessingError(e)) +} + +#[derive(Debug)] +pub struct BlockProductionError(pub beacon_chain::BlockProductionError); + +impl Reject for BlockProductionError {} + +pub fn block_production_error(e: beacon_chain::BlockProductionError) -> warp::reject::Rejection { + warp::reject::custom(BlockProductionError(e)) +} + +#[derive(Debug)] +pub struct CustomNotFound(pub String); + +impl Reject for CustomNotFound {} + +pub fn custom_not_found(msg: String) -> warp::reject::Rejection { + warp::reject::custom(CustomNotFound(msg)) +} + +#[derive(Debug)] +pub struct CustomBadRequest(pub String); + +impl Reject for CustomBadRequest {} + +pub fn custom_bad_request(msg: String) -> warp::reject::Rejection { + warp::reject::custom(CustomBadRequest(msg)) +} + +#[derive(Debug)] +pub struct CustomServerError(pub String); + +impl Reject for CustomServerError {} + +pub fn custom_server_error(msg: String) -> warp::reject::Rejection { + warp::reject::custom(CustomServerError(msg)) +} + +#[derive(Debug)] +pub struct BroadcastWithoutImport(pub String); + +impl Reject for BroadcastWithoutImport {} + +pub fn broadcast_without_import(msg: String) -> warp::reject::Rejection { + warp::reject::custom(BroadcastWithoutImport(msg)) +} + +#[derive(Debug)] +pub struct ObjectInvalid(pub String); + +impl Reject for ObjectInvalid {} + +pub fn object_invalid(msg: String) -> warp::reject::Rejection { + warp::reject::custom(ObjectInvalid(msg)) +} + +/// This function receives a `Rejection` and tries to return a custom +/// value, otherwise simply passes the rejection along. +pub async fn handle_rejection(err: warp::Rejection) -> Result { + let code; + let message; + + if err.is_not_found() { + code = StatusCode::NOT_FOUND; + message = "NOT_FOUND".to_string(); + } else if let Some(e) = err.find::() { + message = format!("BAD_REQUEST: body deserialize error: {}", e); + code = StatusCode::BAD_REQUEST; + } else if let Some(e) = err.find::() { + code = StatusCode::BAD_REQUEST; + message = format!("BAD_REQUEST: invalid query: {}", e); + } else if let Some(e) = err.find::() { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = format!("UNHANDLED_ERROR: {:?}", e.0); + } else if let Some(e) = err.find::() { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = format!("UNHANDLED_ERROR: {:?}", e.0); + } else if let Some(e) = err.find::() { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = format!("UNHANDLED_ERROR: {:?}", e.0); + } else if let Some(e) = err.find::() { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = format!("UNHANDLED_ERROR: {:?}", e.0); + } else if let Some(e) = err.find::() { + code = StatusCode::NOT_FOUND; + message = format!("NOT_FOUND: {}", e.0); + } else if let Some(e) = err.find::() { + code = StatusCode::BAD_REQUEST; + message = format!("BAD_REQUEST: {}", e.0); + } else if let Some(e) = err.find::() { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = format!("INTERNAL_SERVER_ERROR: {}", e.0); + } else if let Some(e) = err.find::() { + code = StatusCode::ACCEPTED; + message = format!( + "ACCEPTED: the object was broadcast to the network without being \ + fully imported to the local database: {}", + e.0 + ); + } else if let Some(e) = err.find::() { + code = StatusCode::BAD_REQUEST; + message = format!("BAD_REQUEST: Invalid object: {}", e.0); + } else if err.find::().is_some() { + code = StatusCode::METHOD_NOT_ALLOWED; + message = "METHOD_NOT_ALLOWED".to_string(); + } else { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = "UNHANDLED_REJECTION".to_string(); + } + + let json = warp::reply::json(&ErrorMessage { + code: code.as_u16(), + message, + stacktraces: vec![], + }); + + Ok(warp::reply::with_status(json, code)) +} diff --git a/common/warp_utils/src/reply.rs b/common/warp_utils/src/reply.rs new file mode 100644 index 00000000000..dcec6214f0c --- /dev/null +++ b/common/warp_utils/src/reply.rs @@ -0,0 +1,15 @@ +/// Add CORS headers to `reply` only if `allow_origin.is_some()`. +pub fn maybe_cors( + reply: T, + allow_origin: Option<&String>, +) -> Box { + if let Some(allow_origin) = allow_origin { + Box::new(warp::reply::with_header( + reply, + "Access-Control-Allow-Origin", + allow_origin, + )) + } else { + Box::new(reply) + } +} diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 99f998e5584..8be0b328d1f 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -4,7 +4,7 @@ use proto_array::{Block as ProtoBlock, ProtoArrayForkChoice}; use ssz_derive::{Decode, Encode}; use types::{ BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256, - IndexedAttestation, Slot, + IndexedAttestation, RelativeEpoch, ShufflingId, Slot, }; use crate::ForkChoiceStore; @@ -534,6 +534,10 @@ where root: block_root, parent_root: Some(block.parent_root), target_root, + current_epoch_shuffling_id: ShufflingId::new(block_root, state, RelativeEpoch::Current) + .map_err(Error::BeaconStateError)?, + next_epoch_shuffling_id: ShufflingId::new(block_root, state, RelativeEpoch::Next) + .map_err(Error::BeaconStateError)?, state_root: block.state_root, justified_epoch: state.current_justified_checkpoint.epoch, finalized_epoch: state.finalized_checkpoint.epoch, diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 78c7534cde9..7b508afd49c 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -6,3 +6,4 @@ pub use crate::fork_choice::{ SAFE_SLOTS_TO_UPDATE_JUSTIFIED, }; pub use fork_choice_store::ForkChoiceStore; +pub use proto_array::Block as ProtoBlock; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index ffa9cbe6bd4..86fbbd8ec9e 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -351,7 +351,7 @@ impl ForkChoiceTest { let mut verified_attestation = self .harness .chain - .verify_unaggregated_attestation_for_gossip(attestation, subnet_id) + .verify_unaggregated_attestation_for_gossip(attestation, Some(subnet_id)) .expect("precondition: should gossip verify attestation"); if let MutationDelay::Blocks(slots) = delay { diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 6e1bd970b00..cd38d017c90 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -4,7 +4,7 @@ mod votes; use crate::proto_array_fork_choice::{Block, ProtoArrayForkChoice}; use serde_derive::{Deserialize, Serialize}; -use types::{Epoch, Hash256, Slot}; +use types::{Epoch, Hash256, ShufflingId, Slot}; pub use ffg_updates::*; pub use no_votes::*; @@ -125,6 +125,14 @@ impl ForkChoiceTestDefinition { parent_root: Some(parent_root), state_root: Hash256::zero(), target_root: Hash256::zero(), + current_epoch_shuffling_id: ShufflingId::from_components( + Epoch::new(0), + Hash256::zero(), + ), + next_epoch_shuffling_id: ShufflingId::from_components( + Epoch::new(0), + Hash256::zero(), + ), justified_epoch, finalized_epoch, }; diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 18db8d340ce..c89a96628a7 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -2,7 +2,7 @@ use crate::{error::Error, Block}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; -use types::{Epoch, Hash256, Slot}; +use types::{Epoch, Hash256, ShufflingId, Slot}; #[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)] pub struct ProtoNode { @@ -18,6 +18,8 @@ pub struct ProtoNode { /// The `target_root` is not necessary for `ProtoArray` either, it also just exists for upstream /// components (namely fork choice attestation verification). pub target_root: Hash256, + pub current_epoch_shuffling_id: ShufflingId, + pub next_epoch_shuffling_id: ShufflingId, pub root: Hash256, pub parent: Option, pub justified_epoch: Epoch, @@ -142,6 +144,8 @@ impl ProtoArray { slot: block.slot, root: block.root, target_root: block.target_root, + current_epoch_shuffling_id: block.current_epoch_shuffling_id, + next_epoch_shuffling_id: block.next_epoch_shuffling_id, state_root: block.state_root, parent: block .parent_root diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 05f6c5ec4de..aa8835349f9 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -4,7 +4,7 @@ use crate::ssz_container::SszContainer; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; -use types::{Epoch, Hash256, Slot}; +use types::{Epoch, Hash256, ShufflingId, Slot}; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; @@ -24,6 +24,8 @@ pub struct Block { pub parent_root: Option, pub state_root: Hash256, pub target_root: Hash256, + pub current_epoch_shuffling_id: ShufflingId, + pub next_epoch_shuffling_id: ShufflingId, pub justified_epoch: Epoch, pub finalized_epoch: Epoch, } @@ -86,6 +88,12 @@ impl ProtoArrayForkChoice { // We are using the finalized_root as the target_root, since it always lies on an // epoch boundary. target_root: finalized_root, + // TODO: explain why this is safe. + current_epoch_shuffling_id: ShufflingId::from_components( + finalized_epoch, + finalized_root, + ), + next_epoch_shuffling_id: ShufflingId::from_components(finalized_epoch, finalized_root), justified_epoch, finalized_epoch, }; @@ -193,6 +201,8 @@ impl ProtoArrayForkChoice { parent_root, state_root: block.state_root, target_root: block.target_root, + current_epoch_shuffling_id: block.current_epoch_shuffling_id.clone(), + next_epoch_shuffling_id: block.next_epoch_shuffling_id.clone(), justified_epoch: block.justified_epoch, finalized_epoch: block.finalized_epoch, }) @@ -340,6 +350,7 @@ mod test_compute_deltas { let finalized_desc = Hash256::from_low_u64_be(2); let not_finalized_desc = Hash256::from_low_u64_be(3); let unknown = Hash256::from_low_u64_be(4); + let shuffling_id = ShufflingId::from_components(Epoch::new(0), Hash256::zero()); let mut fc = ProtoArrayForkChoice::new( genesis_slot, @@ -358,6 +369,8 @@ mod test_compute_deltas { parent_root: Some(finalized_root), state_root, target_root: finalized_root, + current_epoch_shuffling_id: shuffling_id.clone(), + next_epoch_shuffling_id: shuffling_id.clone(), justified_epoch: genesis_epoch, finalized_epoch: genesis_epoch, }) @@ -371,6 +384,8 @@ mod test_compute_deltas { parent_root: None, state_root, target_root: finalized_root, + current_epoch_shuffling_id: shuffling_id.clone(), + next_epoch_shuffling_id: shuffling_id.clone(), justified_epoch: genesis_epoch, finalized_epoch: genesis_epoch, }) diff --git a/consensus/serde_hex/Cargo.toml b/consensus/serde_hex/Cargo.toml deleted file mode 100644 index 2df5ff02a08..00000000000 --- a/consensus/serde_hex/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "serde_hex" -version = "0.2.0" -authors = ["Paul Hauner "] -edition = "2018" - -[dependencies] -serde = "1.0.110" -hex = "0.4.2" diff --git a/consensus/serde_utils/Cargo.toml b/consensus/serde_utils/Cargo.toml index 1fb35736baf..8c0013562c1 100644 --- a/consensus/serde_utils/Cargo.toml +++ b/consensus/serde_utils/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] serde = { version = "1.0.110", features = ["derive"] } serde_derive = "1.0.110" +hex = "0.4.2" [dev-dependencies] serde_json = "1.0.52" diff --git a/consensus/serde_utils/src/bytes_4_hex.rs b/consensus/serde_utils/src/bytes_4_hex.rs new file mode 100644 index 00000000000..8e5ab915534 --- /dev/null +++ b/consensus/serde_utils/src/bytes_4_hex.rs @@ -0,0 +1,44 @@ +//! Formats `[u8; 4]` as a 0x-prefixed hex string. +//! +//! E.g., `[0, 1, 2, 3]` serializes as `"0x00010203"`. + +use serde::de::Error; +use serde::{Deserialize, Deserializer, Serializer}; + +const BYTES_LEN: usize = 4; + +pub fn serialize(bytes: &[u8; BYTES_LEN], serializer: S) -> Result +where + S: Serializer, +{ + let mut hex_string: String = "0x".to_string(); + hex_string.push_str(&hex::encode(&bytes)); + + serializer.serialize_str(&hex_string) +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; BYTES_LEN], D::Error> +where + D: Deserializer<'de>, +{ + let s: String = Deserialize::deserialize(deserializer)?; + let mut array = [0 as u8; BYTES_LEN]; + + let start = s + .as_str() + .get(2..) + .ok_or_else(|| D::Error::custom("string length too small"))?; + let decoded: Vec = hex::decode(&start).map_err(D::Error::custom)?; + + if decoded.len() != BYTES_LEN { + return Err(D::Error::custom("Fork length too long")); + } + + for (i, item) in array.iter_mut().enumerate() { + if i > decoded.len() { + break; + } + *item = decoded[i]; + } + Ok(array) +} diff --git a/consensus/serde_hex/src/lib.rs b/consensus/serde_utils/src/hex.rs similarity index 81% rename from consensus/serde_hex/src/lib.rs rename to consensus/serde_utils/src/hex.rs index db84222757d..79dfaa506b8 100644 --- a/consensus/serde_hex/src/lib.rs +++ b/consensus/serde_utils/src/hex.rs @@ -1,6 +1,9 @@ +//! Provides utilities for parsing 0x-prefixed hex strings. + use serde::de::{self, Visitor}; use std::fmt; +/// Encode `data` as a 0x-prefixed hex string. pub fn encode>(data: T) -> String { let hex = hex::encode(data); let mut s = "0x".to_string(); @@ -8,6 +11,15 @@ pub fn encode>(data: T) -> String { s } +/// Decode `data` from a 0x-prefixed hex string. +pub fn decode(s: &str) -> Result, String> { + if s.starts_with("0x") { + hex::decode(&s[2..]).map_err(|e| format!("invalid hex: {:?}", e)) + } else { + Err("hex must have 0x prefix".to_string()) + } +} + pub struct PrefixedHexVisitor; impl<'de> Visitor<'de> for PrefixedHexVisitor { diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index df2b44b6243..0016e67a3db 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -1,2 +1,9 @@ -pub mod quoted_u64; +mod quoted_int; + +pub mod bytes_4_hex; +pub mod hex; pub mod quoted_u64_vec; +pub mod u32_hex; +pub mod u8_hex; + +pub use quoted_int::{quoted_u32, quoted_u64, quoted_u8}; diff --git a/consensus/serde_utils/src/quoted_int.rs b/consensus/serde_utils/src/quoted_int.rs new file mode 100644 index 00000000000..24edf1ebee2 --- /dev/null +++ b/consensus/serde_utils/src/quoted_int.rs @@ -0,0 +1,144 @@ +//! Formats some integer types using quotes. +//! +//! E.g., `1` serializes as `"1"`. +//! +//! Quotes can be optional during decoding. + +use serde::{Deserializer, Serializer}; +use serde_derive::{Deserialize, Serialize}; +use std::convert::TryFrom; +use std::marker::PhantomData; + +macro_rules! define_mod { + ($int: ty, $visit_fn: ident) => { + /// Serde support for deserializing quoted integers. + /// + /// Configurable so that quotes are either required or optional. + pub struct QuotedIntVisitor { + require_quotes: bool, + _phantom: PhantomData, + } + + impl<'a, T> serde::de::Visitor<'a> for QuotedIntVisitor + where + T: From<$int> + Into<$int> + Copy + TryFrom, + { + type Value = T; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + if self.require_quotes { + write!(formatter, "a quoted integer") + } else { + write!(formatter, "a quoted or unquoted integer") + } + } + + fn visit_str(self, s: &str) -> Result + where + E: serde::de::Error, + { + s.parse::<$int>() + .map(T::from) + .map_err(serde::de::Error::custom) + } + + fn visit_u64(self, v: u64) -> Result + where + E: serde::de::Error, + { + if self.require_quotes { + Err(serde::de::Error::custom( + "received unquoted integer when quotes are required", + )) + } else { + T::try_from(v).map_err(|_| serde::de::Error::custom("invalid integer")) + } + } + } + + /// Wrapper type for requiring quotes on a `$int`-like type. + /// + /// Unlike using `serde(with = "quoted_$int::require_quotes")` this is composable, and can be nested + /// inside types like `Option`, `Result` and `Vec`. + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] + #[serde(transparent)] + pub struct Quoted + where + T: From<$int> + Into<$int> + Copy + TryFrom, + { + #[serde(with = "require_quotes")] + pub value: T, + } + + /// Serialize with quotes. + pub fn serialize(value: &T, serializer: S) -> Result + where + S: Serializer, + T: From<$int> + Into<$int> + Copy, + { + let v: $int = (*value).into(); + serializer.serialize_str(&format!("{}", v)) + } + + /// Deserialize with or without quotes. + pub fn deserialize<'de, D, T>(deserializer: D) -> Result + where + D: Deserializer<'de>, + T: From<$int> + Into<$int> + Copy + TryFrom, + { + deserializer.deserialize_any(QuotedIntVisitor { + require_quotes: false, + _phantom: PhantomData, + }) + } + + /// Requires quotes when deserializing. + /// + /// Usage: `#[serde(with = "quoted_u64::require_quotes")]`. + pub mod require_quotes { + pub use super::serialize; + use super::*; + + pub fn deserialize<'de, D, T>(deserializer: D) -> Result + where + D: Deserializer<'de>, + T: From<$int> + Into<$int> + Copy + TryFrom, + { + deserializer.deserialize_any(QuotedIntVisitor { + require_quotes: true, + _phantom: PhantomData, + }) + } + } + + #[cfg(test)] + mod test { + use super::*; + + #[test] + fn require_quotes() { + let x = serde_json::from_str::>("\"8\"").unwrap(); + assert_eq!(x.value, 8); + serde_json::from_str::>("8").unwrap_err(); + } + } + }; +} + +pub mod quoted_u8 { + use super::*; + + define_mod!(u8, visit_u8); +} + +pub mod quoted_u32 { + use super::*; + + define_mod!(u32, visit_u32); +} + +pub mod quoted_u64 { + use super::*; + + define_mod!(u64, visit_u64); +} diff --git a/consensus/serde_utils/src/quoted_u64.rs b/consensus/serde_utils/src/quoted_u64.rs deleted file mode 100644 index 2e73a104f19..00000000000 --- a/consensus/serde_utils/src/quoted_u64.rs +++ /dev/null @@ -1,115 +0,0 @@ -use serde::{Deserializer, Serializer}; -use serde_derive::{Deserialize, Serialize}; -use std::marker::PhantomData; - -/// Serde support for deserializing quoted integers. -/// -/// Configurable so that quotes are either required or optional. -pub struct QuotedIntVisitor { - require_quotes: bool, - _phantom: PhantomData, -} - -impl<'a, T> serde::de::Visitor<'a> for QuotedIntVisitor -where - T: From + Into + Copy, -{ - type Value = T; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - if self.require_quotes { - write!(formatter, "a quoted integer") - } else { - write!(formatter, "a quoted or unquoted integer") - } - } - - fn visit_str(self, s: &str) -> Result - where - E: serde::de::Error, - { - s.parse::() - .map(T::from) - .map_err(serde::de::Error::custom) - } - - fn visit_u64(self, v: u64) -> Result - where - E: serde::de::Error, - { - if self.require_quotes { - Err(serde::de::Error::custom( - "received unquoted integer when quotes are required", - )) - } else { - Ok(T::from(v)) - } - } -} - -/// Wrapper type for requiring quotes on a `u64`-like type. -/// -/// Unlike using `serde(with = "quoted_u64::require_quotes")` this is composable, and can be nested -/// inside types like `Option`, `Result` and `Vec`. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] -#[serde(transparent)] -pub struct Quoted -where - T: From + Into + Copy, -{ - #[serde(with = "require_quotes")] - pub value: T, -} - -/// Serialize with quotes. -pub fn serialize(value: &T, serializer: S) -> Result -where - S: Serializer, - T: From + Into + Copy, -{ - let v: u64 = (*value).into(); - serializer.serialize_str(&format!("{}", v)) -} - -/// Deserialize with or without quotes. -pub fn deserialize<'de, D, T>(deserializer: D) -> Result -where - D: Deserializer<'de>, - T: From + Into + Copy, -{ - deserializer.deserialize_any(QuotedIntVisitor { - require_quotes: false, - _phantom: PhantomData, - }) -} - -/// Requires quotes when deserializing. -/// -/// Usage: `#[serde(with = "quoted_u64::require_quotes")]`. -pub mod require_quotes { - pub use super::serialize; - use super::*; - - pub fn deserialize<'de, D, T>(deserializer: D) -> Result - where - D: Deserializer<'de>, - T: From + Into + Copy, - { - deserializer.deserialize_any(QuotedIntVisitor { - require_quotes: true, - _phantom: PhantomData, - }) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn require_quotes() { - let x = serde_json::from_str::>("\"8\"").unwrap(); - assert_eq!(x.value, 8); - serde_json::from_str::>("8").unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/quoted_u64_vec.rs b/consensus/serde_utils/src/quoted_u64_vec.rs index c5badee5012..f124c989092 100644 --- a/consensus/serde_utils/src/quoted_u64_vec.rs +++ b/consensus/serde_utils/src/quoted_u64_vec.rs @@ -1,3 +1,9 @@ +//! Formats `Vec` using quotes. +//! +//! E.g., `vec![0, 1, 2]` serializes as `["0", "1", "2"]`. +//! +//! Quotes can be optional during decoding. + use serde::ser::SerializeSeq; use serde::{Deserializer, Serializer}; use serde_derive::{Deserialize, Serialize}; @@ -6,7 +12,7 @@ use serde_derive::{Deserialize, Serialize}; #[serde(transparent)] pub struct QuotedIntWrapper { #[serde(with = "crate::quoted_u64")] - int: u64, + pub int: u64, } pub struct QuotedIntVecVisitor; diff --git a/consensus/serde_utils/src/u32_hex.rs b/consensus/serde_utils/src/u32_hex.rs new file mode 100644 index 00000000000..d39732ac903 --- /dev/null +++ b/consensus/serde_utils/src/u32_hex.rs @@ -0,0 +1,32 @@ +//! Formats `u32` as a 0x-prefixed, little-endian hex string. +//! +//! E.g., `0` serializes as `"0x00000000"`. + +use serde::de::Error; +use serde::{Deserialize, Deserializer, Serializer}; + +pub fn serialize(num: &u32, serializer: S) -> Result +where + S: Serializer, +{ + let mut hex: String = "0x".to_string(); + let bytes = num.to_le_bytes(); + hex.push_str(&hex::encode(&bytes)); + + serializer.serialize_str(&hex) +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let s: String = Deserialize::deserialize(deserializer)?; + let start = s + .as_str() + .get(2..) + .ok_or_else(|| D::Error::custom("string length too small"))?; + + u32::from_str_radix(&start, 16) + .map_err(D::Error::custom) + .map(u32::from_be) +} diff --git a/consensus/serde_utils/src/u8_hex.rs b/consensus/serde_utils/src/u8_hex.rs new file mode 100644 index 00000000000..7f8635579f4 --- /dev/null +++ b/consensus/serde_utils/src/u8_hex.rs @@ -0,0 +1,29 @@ +//! Formats `u8` as a 0x-prefixed hex string. +//! +//! E.g., `0` serializes as `"0x00"`. + +use serde::de::Error; +use serde::{Deserialize, Deserializer, Serializer}; + +pub fn serialize(byte: &u8, serializer: S) -> Result +where + S: Serializer, +{ + let mut hex: String = "0x".to_string(); + hex.push_str(&hex::encode(&[*byte])); + + serializer.serialize_str(&hex) +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let s: String = Deserialize::deserialize(deserializer)?; + + let start = match s.as_str().get(2..) { + Some(start) => start, + None => return Err(D::Error::custom("string length too small")), + }; + u8::from_str_radix(&start, 16).map_err(D::Error::custom) +} diff --git a/consensus/ssz_types/Cargo.toml b/consensus/ssz_types/Cargo.toml index 144b3ce31fd..ca6a5adbe81 100644 --- a/consensus/ssz_types/Cargo.toml +++ b/consensus/ssz_types/Cargo.toml @@ -11,7 +11,7 @@ name = "ssz_types" tree_hash = "0.1.0" serde = "1.0.110" serde_derive = "1.0.110" -serde_hex = { path = "../serde_hex" } +serde_utils = { path = "../serde_utils" } eth2_ssz = "0.1.2" typenum = "1.12.0" arbitrary = { version = "0.4.4", features = ["derive"], optional = true } diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs index 1b6dce3ec0f..09fa9fc2df4 100644 --- a/consensus/ssz_types/src/bitfield.rs +++ b/consensus/ssz_types/src/bitfield.rs @@ -3,7 +3,7 @@ use crate::Error; use core::marker::PhantomData; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::{encode as hex_encode, PrefixedHexVisitor}; use ssz::{Decode, Encode}; use tree_hash::Hash256; use typenum::Unsigned; diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index d893ff3ad03..87f4aea4879 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -40,6 +40,8 @@ tempfile = "3.1.0" derivative = "2.1.1" rusqlite = { version = "0.23.1", features = ["bundled"], optional = true } arbitrary = { version = "0.4.4", features = ["derive"], optional = true } +serde_utils = { path = "../serde_utils" } +regex = "1.3.9" [dev-dependencies] serde_json = "1.0.52" diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 737c891c9fb..52871226107 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -16,6 +16,7 @@ use tree_hash_derive::TreeHash; #[serde(bound = "T: EthSpec")] pub struct AggregateAndProof { /// The index of the validator that created the attestation. + #[serde(with = "serde_utils::quoted_u64")] pub aggregator_index: u64, /// The aggregate attestation. pub aggregate: Attestation, diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index 67fb280025c..07fa529e0ff 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -26,6 +26,7 @@ use tree_hash_derive::TreeHash; )] pub struct AttestationData { pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, // LMD GHOST vote diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation_duty.rs index c32e4683e18..613d7fd1c88 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation_duty.rs @@ -12,4 +12,7 @@ pub struct AttestationDuty { pub committee_position: usize, /// The total number of attesters in the committee. pub committee_len: usize, + /// The committee count at `attestation_slot`. + #[serde(with = "serde_utils::quoted_u64")] + pub committees_at_slot: u64, } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index eeb10458bfa..d3a9160709c 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -16,6 +16,7 @@ use tree_hash_derive::TreeHash; #[serde(bound = "T: EthSpec")] pub struct BeaconBlock { pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, pub parent_root: Hash256, pub state_root: Hash256, diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 489c5bc9d77..ef28307edcc 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -1,5 +1,4 @@ use crate::test_utils::TestRandom; -use crate::utils::{graffiti_from_hex_str, graffiti_to_hex_str, Graffiti}; use crate::*; use serde_derive::{Deserialize, Serialize}; @@ -17,10 +16,6 @@ use tree_hash_derive::TreeHash; pub struct BeaconBlockBody { pub randao_reveal: Signature, pub eth1_data: Eth1Data, - #[serde( - serialize_with = "graffiti_to_hex_str", - deserialize_with = "graffiti_from_hex_str" - )] pub graffiti: Graffiti, pub proposer_slashings: VariableList, pub attester_slashings: VariableList, T::MaxAttesterSlashings>, diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index 04a20e56d3f..708c0e16fe7 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -14,6 +14,7 @@ use tree_hash_derive::TreeHash; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct BeaconBlockHeader { pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, pub parent_root: Hash256, pub state_root: Hash256, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 9594a22175d..29528324b98 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -157,6 +157,7 @@ where T: EthSpec, { // Versioning + #[serde(with = "serde_utils::quoted_u64")] pub genesis_time: u64, pub genesis_validators_root: Hash256, pub slot: Slot, @@ -173,6 +174,7 @@ where // Ethereum 1.0 chain data pub eth1_data: Eth1Data, pub eth1_data_votes: VariableList, + #[serde(with = "serde_utils::quoted_u64")] pub eth1_deposit_index: u64, // Registry @@ -907,6 +909,13 @@ impl BeaconState { self.exit_cache = ExitCache::default(); } + /// Returns `true` if the committee cache for `relative_epoch` is built and ready to use. + pub fn committee_cache_is_initialized(&self, relative_epoch: RelativeEpoch) -> bool { + let i = Self::committee_cache_index(relative_epoch); + + self.committee_caches[i].is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) + } + /// Build an epoch cache, unless it is has already been built. pub fn build_committee_cache( &mut self, diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index f71ad2e893e..a6018df304c 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -185,6 +185,7 @@ impl CommitteeCache { index, committee_position, committee_len, + committees_at_slot: self.committees_per_slot(), }) }) } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 5afe7db8fdd..8fcaa1eb7d2 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -4,10 +4,6 @@ use serde_derive::{Deserialize, Serialize}; use std::fs::File; use std::path::Path; use tree_hash::TreeHash; -use utils::{ - fork_from_hex_str, fork_to_hex_str, u32_from_hex_str, u32_to_hex_str, u8_from_hex_str, - u8_to_hex_str, -}; /// Each of the BLS signature domains. /// @@ -64,12 +60,9 @@ pub struct ChainSpec { /* * Initial Values */ - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub genesis_fork_version: [u8; 4], - #[serde(deserialize_with = "u8_from_hex_str", serialize_with = "u8_to_hex_str")] + #[serde(with = "serde_utils::u8_hex")] pub bls_withdrawal_prefix_byte: u8, /* @@ -114,6 +107,7 @@ pub struct ChainSpec { */ pub eth1_follow_distance: u64, pub seconds_per_eth1_block: u64, + pub deposit_contract_address: Address, /* * Networking @@ -324,6 +318,9 @@ impl ChainSpec { */ eth1_follow_distance: 1_024, seconds_per_eth1_block: 14, + deposit_contract_address: "1234567890123456789012345678901234567890" + .parse() + .expect("chain spec deposit contract address"), /* * Network specific @@ -449,108 +446,132 @@ mod tests { #[serde(default)] pub struct YamlConfig { // ChainSpec + #[serde(with = "serde_utils::quoted_u64")] far_future_epoch: u64, + #[serde(with = "serde_utils::quoted_u64")] base_rewards_per_epoch: u64, + #[serde(with = "serde_utils::quoted_u64")] deposit_contract_tree_depth: u64, - max_committees_per_slot: usize, - target_committee_size: usize, + #[serde(with = "serde_utils::quoted_u64")] + max_committees_per_slot: u64, + #[serde(with = "serde_utils::quoted_u64")] + target_committee_size: u64, + #[serde(with = "serde_utils::quoted_u64")] min_per_epoch_churn_limit: u64, + #[serde(with = "serde_utils::quoted_u64")] churn_limit_quotient: u64, + #[serde(with = "serde_utils::quoted_u8")] shuffle_round_count: u8, + #[serde(with = "serde_utils::quoted_u64")] min_genesis_active_validator_count: u64, + #[serde(with = "serde_utils::quoted_u64")] min_genesis_time: u64, + #[serde(with = "serde_utils::quoted_u64")] genesis_delay: u64, + #[serde(with = "serde_utils::quoted_u64")] min_deposit_amount: u64, + #[serde(with = "serde_utils::quoted_u64")] max_effective_balance: u64, + #[serde(with = "serde_utils::quoted_u64")] ejection_balance: u64, + #[serde(with = "serde_utils::quoted_u64")] effective_balance_increment: u64, + #[serde(with = "serde_utils::quoted_u64")] hysteresis_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] hysteresis_downward_multiplier: u64, + #[serde(with = "serde_utils::quoted_u64")] hysteresis_upward_multiplier: u64, + #[serde(with = "serde_utils::quoted_u64")] genesis_slot: u64, - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] genesis_fork_version: [u8; 4], - #[serde(deserialize_with = "u8_from_hex_str", serialize_with = "u8_to_hex_str")] + #[serde(with = "serde_utils::u8_hex")] bls_withdrawal_prefix: u8, + #[serde(with = "serde_utils::quoted_u64")] seconds_per_slot: u64, + #[serde(with = "serde_utils::quoted_u64")] min_attestation_inclusion_delay: u64, + #[serde(with = "serde_utils::quoted_u64")] min_seed_lookahead: u64, + #[serde(with = "serde_utils::quoted_u64")] max_seed_lookahead: u64, + #[serde(with = "serde_utils::quoted_u64")] min_epochs_to_inactivity_penalty: u64, + #[serde(with = "serde_utils::quoted_u64")] min_validator_withdrawability_delay: u64, + #[serde(with = "serde_utils::quoted_u64")] shard_committee_period: u64, + #[serde(with = "serde_utils::quoted_u64")] base_reward_factor: u64, + #[serde(with = "serde_utils::quoted_u64")] whistleblower_reward_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] proposer_reward_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] inactivity_penalty_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] min_slashing_penalty_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] safe_slots_to_update_justified: u64, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_beacon_proposer: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_beacon_attester: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_randao: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_deposit: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_voluntary_exit: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_selection_proof: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_aggregate_and_proof: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] // EthSpec + #[serde(with = "serde_utils::u32_hex")] justification_bits_length: u32, + #[serde(with = "serde_utils::quoted_u32")] max_validators_per_committee: u32, genesis_epoch: Epoch, + #[serde(with = "serde_utils::quoted_u64")] slots_per_epoch: u64, + #[serde(with = "serde_utils::quoted_u64")] epochs_per_eth1_voting_period: u64, - slots_per_historical_root: usize, - epochs_per_historical_vector: usize, - epochs_per_slashings_vector: usize, + #[serde(with = "serde_utils::quoted_u64")] + slots_per_historical_root: u64, + #[serde(with = "serde_utils::quoted_u64")] + epochs_per_historical_vector: u64, + #[serde(with = "serde_utils::quoted_u64")] + epochs_per_slashings_vector: u64, + #[serde(with = "serde_utils::quoted_u64")] historical_roots_limit: u64, + #[serde(with = "serde_utils::quoted_u64")] validator_registry_limit: u64, + #[serde(with = "serde_utils::quoted_u32")] max_proposer_slashings: u32, + #[serde(with = "serde_utils::quoted_u32")] max_attester_slashings: u32, + #[serde(with = "serde_utils::quoted_u32")] max_attestations: u32, + #[serde(with = "serde_utils::quoted_u32")] max_deposits: u32, + #[serde(with = "serde_utils::quoted_u32")] max_voluntary_exits: u32, // Validator + #[serde(with = "serde_utils::quoted_u64")] eth1_follow_distance: u64, + #[serde(with = "serde_utils::quoted_u64")] target_aggregators_per_committee: u64, + #[serde(with = "serde_utils::quoted_u64")] random_subnets_per_validator: u64, + #[serde(with = "serde_utils::quoted_u64")] epochs_per_random_subnet_subscription: u64, + #[serde(with = "serde_utils::quoted_u64")] seconds_per_eth1_block: u64, + deposit_contract_address: Address, } impl Default for YamlConfig { @@ -569,8 +590,8 @@ impl YamlConfig { far_future_epoch: spec.far_future_epoch.into(), base_rewards_per_epoch: spec.base_rewards_per_epoch, deposit_contract_tree_depth: spec.deposit_contract_tree_depth, - max_committees_per_slot: spec.max_committees_per_slot, - target_committee_size: spec.target_committee_size, + max_committees_per_slot: spec.max_committees_per_slot as u64, + target_committee_size: spec.target_committee_size as u64, min_per_epoch_churn_limit: spec.min_per_epoch_churn_limit, churn_limit_quotient: spec.churn_limit_quotient, shuffle_round_count: spec.shuffle_round_count, @@ -614,9 +635,9 @@ impl YamlConfig { genesis_epoch: T::genesis_epoch(), slots_per_epoch: T::slots_per_epoch(), epochs_per_eth1_voting_period: T::EpochsPerEth1VotingPeriod::to_u64(), - slots_per_historical_root: T::slots_per_historical_root(), - epochs_per_historical_vector: T::epochs_per_historical_vector(), - epochs_per_slashings_vector: T::EpochsPerSlashingsVector::to_usize(), + slots_per_historical_root: T::slots_per_historical_root() as u64, + epochs_per_historical_vector: T::epochs_per_historical_vector() as u64, + epochs_per_slashings_vector: T::EpochsPerSlashingsVector::to_u64(), historical_roots_limit: T::HistoricalRootsLimit::to_u64(), validator_registry_limit: T::ValidatorRegistryLimit::to_u64(), max_proposer_slashings: T::MaxProposerSlashings::to_u32(), @@ -631,6 +652,7 @@ impl YamlConfig { random_subnets_per_validator: spec.random_subnets_per_validator, epochs_per_random_subnet_subscription: spec.epochs_per_random_subnet_subscription, seconds_per_eth1_block: spec.seconds_per_eth1_block, + deposit_contract_address: spec.deposit_contract_address, } } @@ -648,9 +670,9 @@ impl YamlConfig { || self.genesis_epoch != T::genesis_epoch() || self.slots_per_epoch != T::slots_per_epoch() || self.epochs_per_eth1_voting_period != T::EpochsPerEth1VotingPeriod::to_u64() - || self.slots_per_historical_root != T::slots_per_historical_root() - || self.epochs_per_historical_vector != T::epochs_per_historical_vector() - || self.epochs_per_slashings_vector != T::EpochsPerSlashingsVector::to_usize() + || self.slots_per_historical_root as usize != T::slots_per_historical_root() + || self.epochs_per_historical_vector as usize != T::epochs_per_historical_vector() + || self.epochs_per_slashings_vector != T::EpochsPerSlashingsVector::to_u64() || self.historical_roots_limit != T::HistoricalRootsLimit::to_u64() || self.validator_registry_limit != T::ValidatorRegistryLimit::to_u64() || self.max_proposer_slashings != T::MaxProposerSlashings::to_u32() @@ -667,7 +689,7 @@ impl YamlConfig { far_future_epoch: Epoch::from(self.far_future_epoch), base_rewards_per_epoch: self.base_rewards_per_epoch, deposit_contract_tree_depth: self.deposit_contract_tree_depth, - target_committee_size: self.target_committee_size, + target_committee_size: self.target_committee_size as usize, min_per_epoch_churn_limit: self.min_per_epoch_churn_limit, churn_limit_quotient: self.churn_limit_quotient, shuffle_round_count: self.shuffle_round_count, @@ -705,6 +727,8 @@ impl YamlConfig { boot_nodes: chain_spec.boot_nodes.clone(), genesis_fork_version: self.genesis_fork_version, eth1_follow_distance: self.eth1_follow_distance, + seconds_per_eth1_block: self.seconds_per_eth1_block, + deposit_contract_address: self.deposit_contract_address, ..*chain_spec }) } diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index ce72c362e21..8e2050a0b83 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -15,6 +15,7 @@ use tree_hash_derive::TreeHash; pub struct DepositData { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, pub signature: SignatureBytes, } diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit_message.rs index fe283a17f93..92f6b66bf7f 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit_message.rs @@ -15,6 +15,7 @@ use tree_hash_derive::TreeHash; pub struct DepositMessage { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, } diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index e10744368f6..008b7933fff 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -1,5 +1,4 @@ use crate::test_utils::TestRandom; -use crate::utils::{fork_from_hex_str, fork_to_hex_str}; use crate::Epoch; use serde_derive::{Deserialize, Serialize}; @@ -16,15 +15,9 @@ use tree_hash_derive::TreeHash; Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] pub struct EnrForkId { - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub fork_digest: [u8; 4], - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub next_fork_version: [u8; 4], pub next_fork_epoch: Epoch, } diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index dcc1ea09819..e3b74cc491c 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -26,6 +26,7 @@ use tree_hash_derive::TreeHash; )] pub struct Eth1Data { pub deposit_root: Hash256, + #[serde(with = "serde_utils::quoted_u64")] pub deposit_count: u64, pub block_hash: Hash256, } diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork.rs index 8e95710c4ad..b129271ba0f 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork.rs @@ -1,5 +1,4 @@ use crate::test_utils::TestRandom; -use crate::utils::{fork_from_hex_str, fork_to_hex_str}; use crate::Epoch; use serde_derive::{Deserialize, Serialize}; @@ -25,15 +24,9 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct Fork { - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub previous_version: [u8; 4], - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub current_version: [u8; 4], pub epoch: Epoch, } diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork_data.rs index bad6f6219e2..092102f779e 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork_data.rs @@ -1,5 +1,4 @@ use crate::test_utils::TestRandom; -use crate::utils::{fork_from_hex_str, fork_to_hex_str}; use crate::{Hash256, SignedRoot}; use serde_derive::{Deserialize, Serialize}; @@ -15,10 +14,7 @@ use tree_hash_derive::TreeHash; Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] pub struct ForkData { - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub current_version: [u8; 4], pub genesis_validators_root: Hash256, } diff --git a/consensus/types/src/free_attestation.rs b/consensus/types/src/free_attestation.rs index 6215fb0cd7d..79bc149e43e 100644 --- a/consensus/types/src/free_attestation.rs +++ b/consensus/types/src/free_attestation.rs @@ -9,5 +9,6 @@ use serde_derive::Serialize; pub struct FreeAttestation { pub data: AttestationData, pub signature: Signature, + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, } diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs new file mode 100644 index 00000000000..f35df93838b --- /dev/null +++ b/consensus/types/src/graffiti.rs @@ -0,0 +1,132 @@ +use crate::{ + test_utils::{RngCore, TestRandom}, + Hash256, +}; +use regex::bytes::Regex; +use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; +use ssz::{Decode, DecodeError, Encode}; +use std::fmt; +use tree_hash::TreeHash; + +pub const GRAFFITI_BYTES_LEN: usize = 32; + +/// The 32-byte `graffiti` field on a beacon block. +#[derive(Default, Debug, PartialEq, Clone, Copy, Serialize, Deserialize)] +#[serde(transparent)] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +pub struct Graffiti(#[serde(with = "serde_graffiti")] pub [u8; GRAFFITI_BYTES_LEN]); + +impl Graffiti { + pub fn as_utf8_lossy(&self) -> String { + #[allow(clippy::invalid_regex)] + let re = Regex::new("\\p{C}").expect("graffiti regex is valid"); + String::from_utf8_lossy(&re.replace_all(&self.0[..], &b""[..])).to_string() + } +} + +impl fmt::Display for Graffiti { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", serde_utils::hex::encode(&self.0)) + } +} + +impl From<[u8; GRAFFITI_BYTES_LEN]> for Graffiti { + fn from(bytes: [u8; GRAFFITI_BYTES_LEN]) -> Self { + Self(bytes) + } +} + +impl Into<[u8; GRAFFITI_BYTES_LEN]> for Graffiti { + fn into(self) -> [u8; GRAFFITI_BYTES_LEN] { + self.0 + } +} + +pub mod serde_graffiti { + use super::*; + + pub fn serialize(bytes: &[u8; GRAFFITI_BYTES_LEN], serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&serde_utils::hex::encode(bytes)) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; GRAFFITI_BYTES_LEN], D::Error> + where + D: Deserializer<'de>, + { + let s: String = Deserialize::deserialize(deserializer)?; + + let bytes = serde_utils::hex::decode(&s).map_err(D::Error::custom)?; + + if bytes.len() != GRAFFITI_BYTES_LEN { + return Err(D::Error::custom(format!( + "incorrect byte length {}, expected {}", + bytes.len(), + GRAFFITI_BYTES_LEN + ))); + } + + let mut array = [0; GRAFFITI_BYTES_LEN]; + array[..].copy_from_slice(&bytes); + + Ok(array) + } +} + +impl Encode for Graffiti { + fn is_ssz_fixed_len() -> bool { + <[u8; GRAFFITI_BYTES_LEN] as Encode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <[u8; GRAFFITI_BYTES_LEN] as Encode>::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } +} + +impl Decode for Graffiti { + fn is_ssz_fixed_len() -> bool { + <[u8; GRAFFITI_BYTES_LEN] as Decode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <[u8; GRAFFITI_BYTES_LEN] as Decode>::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + <[u8; GRAFFITI_BYTES_LEN]>::from_ssz_bytes(bytes).map(Self) + } +} + +impl TreeHash for Graffiti { + fn tree_hash_type() -> tree_hash::TreeHashType { + <[u8; GRAFFITI_BYTES_LEN]>::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> Vec { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + <[u8; GRAFFITI_BYTES_LEN]>::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl TestRandom for Graffiti { + fn random_for_test(rng: &mut impl RngCore) -> Self { + Self::from(Hash256::random_for_test(rng).to_fixed_bytes()) + } +} diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index 341db180750..eaae75de839 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -18,6 +18,7 @@ use tree_hash_derive::TreeHash; #[serde(bound = "T: EthSpec")] pub struct IndexedAttestation { /// Lists validator registry indices, not committee indices. + #[serde(with = "quoted_variable_list_u64")] pub attesting_indices: VariableList, pub data: AttestationData, pub signature: AggregateSignature, @@ -53,6 +54,43 @@ impl Hash for IndexedAttestation { } } +/// Serialize a variable list of `u64` such that each int is quoted. Deserialize a variable +/// list supporting both quoted and un-quoted ints. +/// +/// E.g.,`["0", "1", "2"]` +mod quoted_variable_list_u64 { + use super::*; + use crate::Unsigned; + use serde::ser::SerializeSeq; + use serde::{Deserializer, Serializer}; + use serde_utils::quoted_u64_vec::{QuotedIntVecVisitor, QuotedIntWrapper}; + + pub fn serialize(value: &VariableList, serializer: S) -> Result + where + S: Serializer, + T: Unsigned, + { + let mut seq = serializer.serialize_seq(Some(value.len()))?; + for &int in value.iter() { + seq.serialize_element(&QuotedIntWrapper { int })?; + } + seq.end() + } + + pub fn deserialize<'de, D, T>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + T: Unsigned, + { + deserializer + .deserialize_any(QuotedIntVecVisitor) + .and_then(|vec| { + VariableList::new(vec) + .map_err(|e| serde::de::Error::custom(format!("invalid length: {:?}", e))) + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 19697118a50..464a00825f1 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -29,19 +29,22 @@ pub mod eth_spec; pub mod fork; pub mod fork_data; pub mod free_attestation; +pub mod graffiti; pub mod historical_batch; pub mod indexed_attestation; pub mod pending_attestation; pub mod proposer_slashing; pub mod relative_epoch; pub mod selection_proof; +pub mod serde_utils; +pub mod shuffling_id; pub mod signed_aggregate_and_proof; pub mod signed_beacon_block; pub mod signed_beacon_block_header; pub mod signed_voluntary_exit; pub mod signing_data; -pub mod utils; pub mod validator; +pub mod validator_subscription; pub mod voluntary_exit; #[macro_use] pub mod slot_epoch_macros; @@ -74,12 +77,14 @@ pub use crate::eth1_data::Eth1Data; pub use crate::fork::Fork; pub use crate::fork_data::ForkData; pub use crate::free_attestation::FreeAttestation; +pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; pub use crate::pending_attestation::PendingAttestation; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; pub use crate::selection_proof::SelectionProof; +pub use crate::shuffling_id::ShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{SignedBeaconBlock, SignedBeaconBlockHash}; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; @@ -88,6 +93,7 @@ pub use crate::signing_data::{SignedRoot, SigningData}; pub use crate::slot_epoch::{Epoch, Slot}; pub use crate::subnet_id::SubnetId; pub use crate::validator::Validator; +pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; pub type CommitteeIndex = u64; @@ -99,4 +105,3 @@ pub use bls::{ AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, Signature, SignatureBytes, }; pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; -pub use utils::{Graffiti, GRAFFITI_BYTES_LEN}; diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/pending_attestation.rs index 70ebb1bbd68..f4b0fd9b148 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/pending_attestation.rs @@ -13,7 +13,9 @@ use tree_hash_derive::TreeHash; pub struct PendingAttestation { pub aggregation_bits: BitList, pub data: AttestationData, + #[serde(with = "serde_utils::quoted_u64")] pub inclusion_delay: u64, + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, } diff --git a/consensus/types/src/serde_utils.rs b/consensus/types/src/serde_utils.rs new file mode 100644 index 00000000000..85639577083 --- /dev/null +++ b/consensus/types/src/serde_utils.rs @@ -0,0 +1,149 @@ +use serde::de::Error; +use serde::{Deserialize, Deserializer, Serializer}; + +pub use serde_utils::*; + +pub const FORK_BYTES_LEN: usize = 4; +pub const GRAFFITI_BYTES_LEN: usize = 32; + +/// Type for a slice of `GRAFFITI_BYTES_LEN` bytes. +/// +/// Gets included inside each `BeaconBlockBody`. +pub type Graffiti = [u8; GRAFFITI_BYTES_LEN]; + +pub mod u32_hex { + use super::*; + + pub fn serialize(num: &u32, serializer: S) -> Result + where + S: Serializer, + { + let mut hex: String = "0x".to_string(); + let bytes = num.to_le_bytes(); + hex.push_str(&::hex::encode(&bytes)); + + serializer.serialize_str(&hex) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s: String = Deserialize::deserialize(deserializer)?; + let start = s + .as_str() + .get(2..) + .ok_or_else(|| D::Error::custom("string length too small"))?; + + u32::from_str_radix(&start, 16) + .map_err(D::Error::custom) + .map(u32::from_be) + } +} + +pub mod u8_hex { + use super::*; + + pub fn serialize(byte: &u8, serializer: S) -> Result + where + S: Serializer, + { + let mut hex: String = "0x".to_string(); + hex.push_str(&::hex::encode(&[*byte])); + + serializer.serialize_str(&hex) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s: String = Deserialize::deserialize(deserializer)?; + + let start = match s.as_str().get(2..) { + Some(start) => start, + None => return Err(D::Error::custom("string length too small")), + }; + u8::from_str_radix(&start, 16).map_err(D::Error::custom) + } +} + +pub mod fork_bytes_4 { + use super::*; + + pub fn serialize(bytes: &[u8; FORK_BYTES_LEN], serializer: S) -> Result + where + S: Serializer, + { + let mut hex_string: String = "0x".to_string(); + hex_string.push_str(&::hex::encode(&bytes)); + + serializer.serialize_str(&hex_string) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; FORK_BYTES_LEN], D::Error> + where + D: Deserializer<'de>, + { + let s: String = Deserialize::deserialize(deserializer)?; + let mut array = [0 as u8; FORK_BYTES_LEN]; + + let start = s + .as_str() + .get(2..) + .ok_or_else(|| D::Error::custom("string length too small"))?; + let decoded: Vec = ::hex::decode(&start).map_err(D::Error::custom)?; + + if decoded.len() != FORK_BYTES_LEN { + return Err(D::Error::custom("Fork length too long")); + } + + for (i, item) in array.iter_mut().enumerate() { + if i > decoded.len() { + break; + } + *item = decoded[i]; + } + Ok(array) + } +} + +pub mod graffiti { + use super::*; + + pub fn serialize(bytes: &Graffiti, serializer: S) -> Result + where + S: Serializer, + { + let mut hex_string: String = "0x".to_string(); + hex_string.push_str(&::hex::encode(&bytes)); + + serializer.serialize_str(&hex_string) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s: String = Deserialize::deserialize(deserializer)?; + let mut array = Graffiti::default(); + + let start = s + .as_str() + .get(2..) + .ok_or_else(|| D::Error::custom("string length too small"))?; + let decoded: Vec = ::hex::decode(&start).map_err(D::Error::custom)?; + + if decoded.len() > GRAFFITI_BYTES_LEN { + return Err(D::Error::custom("Fork length too long")); + } + + for (i, item) in array.iter_mut().enumerate() { + if i > decoded.len() { + break; + } + *item = decoded[i]; + } + Ok(array) + } +} diff --git a/consensus/types/src/shuffling_id.rs b/consensus/types/src/shuffling_id.rs new file mode 100644 index 00000000000..5903b549b11 --- /dev/null +++ b/consensus/types/src/shuffling_id.rs @@ -0,0 +1,64 @@ +use crate::*; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use std::hash::Hash; + +/// Can be used to key (ID) the shuffling in some chain, in some epoch. +/// +/// ## Reasoning +/// +/// We say that the ID of some shuffling is always equal to a 2-tuple: +/// +/// - The epoch for which the shuffling should be effective. +/// - A block root, where this is the root at the *last* slot of the penultimate epoch. I.e., the +/// final block which contributed a randao reveal to the seed for the shuffling. +/// +/// The struct stores exactly that 2-tuple. +#[derive(Debug, PartialEq, Clone, Hash, Serialize, Deserialize, Encode, Decode)] +pub struct ShufflingId { + pub shuffling_epoch: Epoch, + shuffling_decision_block: Hash256, +} + +impl ShufflingId { + /// Using the given `state`, return the shuffling id for the shuffling at the given + /// `relative_epoch`. + /// + /// The `block_root` provided should be either: + /// + /// - The root of the block which produced this state. + /// - If the state is from a skip slot, the root of the latest block in that state. + pub fn new( + block_root: Hash256, + state: &BeaconState, + relative_epoch: RelativeEpoch, + ) -> Result { + let shuffling_epoch = relative_epoch.into_epoch(state.current_epoch()); + + // Taking advantage of saturating subtraction on slot and epoch. + // + // This is the final slot of the penultimate epoch. + let shuffling_decision_slot = + (state.current_epoch() - 1).start_slot(E::slots_per_epoch()) - 1; + + let shuffling_decision_block = if state.slot == shuffling_decision_slot { + block_root + } else { + *state.get_block_root(shuffling_decision_slot)? + }; + + Ok(Self { + shuffling_epoch, + shuffling_decision_block, + }) + } + + pub fn from_components(shuffling_epoch: Epoch, shuffling_decision_block: Hash256) -> Self { + Self { + shuffling_epoch, + shuffling_decision_block, + } + } +} + +impl Eq for ShufflingId {} diff --git a/consensus/types/src/slot_epoch_macros.rs b/consensus/types/src/slot_epoch_macros.rs index 15263f654e8..183bf4ed72f 100644 --- a/consensus/types/src/slot_epoch_macros.rs +++ b/consensus/types/src/slot_epoch_macros.rs @@ -277,6 +277,18 @@ macro_rules! impl_hash { }; } +macro_rules! impl_from_str { + ($type: ident) => { + impl std::str::FromStr for $type { + type Err = std::num::ParseIntError; + + fn from_str(s: &str) -> Result<$type, Self::Err> { + u64::from_str(s).map($type) + } + } + }; +} + macro_rules! impl_common { ($type: ident) => { impl_from_into_u64!($type); @@ -288,6 +300,7 @@ macro_rules! impl_common { impl_debug!($type); impl_ssz!($type); impl_hash!($type); + impl_from_str!($type); }; } diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 80cc249776f..667e2c9b78e 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -6,7 +6,8 @@ use std::ops::{Deref, DerefMut}; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct SubnetId(u64); +#[serde(transparent)] +pub struct SubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); impl SubnetId { pub fn new(id: u64) -> Self { diff --git a/consensus/types/src/utils.rs b/consensus/types/src/utils.rs deleted file mode 100644 index a527fc18fd1..00000000000 --- a/consensus/types/src/utils.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod serde_utils; - -pub use self::serde_utils::*; diff --git a/consensus/types/src/utils/serde_utils.rs b/consensus/types/src/utils/serde_utils.rs deleted file mode 100644 index 36b719646bf..00000000000 --- a/consensus/types/src/utils/serde_utils.rs +++ /dev/null @@ -1,134 +0,0 @@ -use serde::de::Error; -use serde::{Deserialize, Deserializer, Serializer}; - -pub const FORK_BYTES_LEN: usize = 4; -pub const GRAFFITI_BYTES_LEN: usize = 32; - -/// Type for a slice of `GRAFFITI_BYTES_LEN` bytes. -/// -/// Gets included inside each `BeaconBlockBody`. -pub type Graffiti = [u8; GRAFFITI_BYTES_LEN]; - -pub fn u8_from_hex_str<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let s: String = Deserialize::deserialize(deserializer)?; - - let start = match s.as_str().get(2..) { - Some(start) => start, - None => return Err(D::Error::custom("string length too small")), - }; - u8::from_str_radix(&start, 16).map_err(D::Error::custom) -} - -#[allow(clippy::trivially_copy_pass_by_ref)] // Serde requires the `byte` to be a ref. -pub fn u8_to_hex_str(byte: &u8, serializer: S) -> Result -where - S: Serializer, -{ - let mut hex: String = "0x".to_string(); - hex.push_str(&hex::encode(&[*byte])); - - serializer.serialize_str(&hex) -} - -pub fn u32_from_hex_str<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let s: String = Deserialize::deserialize(deserializer)?; - let start = s - .as_str() - .get(2..) - .ok_or_else(|| D::Error::custom("string length too small"))?; - - u32::from_str_radix(&start, 16) - .map_err(D::Error::custom) - .map(u32::from_be) -} - -#[allow(clippy::trivially_copy_pass_by_ref)] // Serde requires the `num` to be a ref. -pub fn u32_to_hex_str(num: &u32, serializer: S) -> Result -where - S: Serializer, -{ - let mut hex: String = "0x".to_string(); - let bytes = num.to_le_bytes(); - hex.push_str(&hex::encode(&bytes)); - - serializer.serialize_str(&hex) -} - -pub fn fork_from_hex_str<'de, D>(deserializer: D) -> Result<[u8; FORK_BYTES_LEN], D::Error> -where - D: Deserializer<'de>, -{ - let s: String = Deserialize::deserialize(deserializer)?; - let mut array = [0 as u8; FORK_BYTES_LEN]; - - let start = s - .as_str() - .get(2..) - .ok_or_else(|| D::Error::custom("string length too small"))?; - let decoded: Vec = hex::decode(&start).map_err(D::Error::custom)?; - - if decoded.len() != FORK_BYTES_LEN { - return Err(D::Error::custom("Fork length too long")); - } - - for (i, item) in array.iter_mut().enumerate() { - if i > decoded.len() { - break; - } - *item = decoded[i]; - } - Ok(array) -} - -#[allow(clippy::trivially_copy_pass_by_ref)] -pub fn fork_to_hex_str(bytes: &[u8; FORK_BYTES_LEN], serializer: S) -> Result -where - S: Serializer, -{ - let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(&bytes)); - - serializer.serialize_str(&hex_string) -} - -pub fn graffiti_to_hex_str(bytes: &Graffiti, serializer: S) -> Result -where - S: Serializer, -{ - let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(&bytes)); - - serializer.serialize_str(&hex_string) -} - -pub fn graffiti_from_hex_str<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let s: String = Deserialize::deserialize(deserializer)?; - let mut array = Graffiti::default(); - - let start = s - .as_str() - .get(2..) - .ok_or_else(|| D::Error::custom("string length too small"))?; - let decoded: Vec = hex::decode(&start).map_err(D::Error::custom)?; - - if decoded.len() > GRAFFITI_BYTES_LEN { - return Err(D::Error::custom("Fork length too long")); - } - - for (i, item) in array.iter_mut().enumerate() { - if i > decoded.len() { - break; - } - *item = decoded[i]; - } - Ok(array) -} diff --git a/consensus/types/src/validator_subscription.rs b/consensus/types/src/validator_subscription.rs new file mode 100644 index 00000000000..fd48660c52b --- /dev/null +++ b/consensus/types/src/validator_subscription.rs @@ -0,0 +1,21 @@ +use crate::*; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; + +/// A validator subscription, created when a validator subscribes to a slot to perform optional aggregation +/// duties. +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] +pub struct ValidatorSubscription { + /// The validators index. + pub validator_index: u64, + /// The index of the committee within `slot` of which the validator is a member. Used by the + /// beacon node to quickly evaluate the associated `SubnetId`. + pub attestation_committee_index: CommitteeIndex, + /// The slot in which to subscribe. + pub slot: Slot, + /// Committee count at slot to subscribe. + pub committee_count_at_slot: u64, + /// If true, the validator is an aggregator and the beacon node should aggregate attestations + /// for this slot. + pub is_aggregator: bool, +} diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index a9509d7affa..c33ea7e79f7 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -16,6 +16,7 @@ use tree_hash_derive::TreeHash; pub struct VoluntaryExit { /// Earliest epoch when voluntary exit can be processed. pub epoch: Epoch, + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, } diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index d7459ebf497..cf6290278d8 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -11,7 +11,7 @@ milagro_bls = { git = "https://github.com/sigp/milagro_bls", branch = "paulh" } rand = "0.7.2" serde = "1.0.102" serde_derive = "1.0.102" -serde_hex = { path = "../../consensus/serde_hex" } +serde_utils = { path = "../../consensus/serde_utils" } hex = "0.3" eth2_hashing = "0.1.0" ethereum-types = "0.9.1" diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index 240b7d1880d..0517512f82a 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -6,7 +6,7 @@ use crate::{ }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::marker::PhantomData; @@ -245,6 +245,23 @@ where impl_tree_hash!(SIGNATURE_BYTES_LEN); } +impl fmt::Display for GenericAggregateSignature +where + Sig: TSignature, + AggSig: TAggregateSignature, +{ + impl_display!(); +} + +impl std::str::FromStr + for GenericAggregateSignature +where + Sig: TSignature, + AggSig: TAggregateSignature, +{ + impl_from_str!(); +} + impl Serialize for GenericAggregateSignature where Sig: TSignature, diff --git a/crypto/bls/src/generic_public_key.rs b/crypto/bls/src/generic_public_key.rs index 29814d24aca..7b22d272990 100644 --- a/crypto/bls/src/generic_public_key.rs +++ b/crypto/bls/src/generic_public_key.rs @@ -1,7 +1,7 @@ use crate::Error; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::hash::{Hash, Hasher}; @@ -97,6 +97,14 @@ impl TreeHash for GenericPublicKey { impl_tree_hash!(PUBLIC_KEY_BYTES_LEN); } +impl fmt::Display for GenericPublicKey { + impl_display!(); +} + +impl std::str::FromStr for GenericPublicKey { + impl_from_str!(); +} + impl Serialize for GenericPublicKey { impl_serde_serialize!(); } diff --git a/crypto/bls/src/generic_public_key_bytes.rs b/crypto/bls/src/generic_public_key_bytes.rs index beceac1c904..387eb91c969 100644 --- a/crypto/bls/src/generic_public_key_bytes.rs +++ b/crypto/bls/src/generic_public_key_bytes.rs @@ -4,7 +4,7 @@ use crate::{ }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; @@ -101,6 +101,16 @@ where Pub: TPublicKey, { fn from(pk: GenericPublicKey) -> Self { + Self::from(&pk) + } +} + +/// Serializes the `PublicKey` in compressed form, storing the bytes in the newly created `Self`. +impl From<&GenericPublicKey> for GenericPublicKeyBytes +where + Pub: TPublicKey, +{ + fn from(pk: &GenericPublicKey) -> Self { Self { bytes: pk.serialize(), _phantom: PhantomData, @@ -132,6 +142,14 @@ impl TreeHash for GenericPublicKeyBytes { impl_tree_hash!(PUBLIC_KEY_BYTES_LEN); } +impl fmt::Display for GenericPublicKeyBytes { + impl_display!(); +} + +impl std::str::FromStr for GenericPublicKeyBytes { + impl_from_str!(); +} + impl Serialize for GenericPublicKeyBytes { impl_serde_serialize!(); } diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index 28a9361957a..44250d4a6ba 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -4,7 +4,7 @@ use crate::{ }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::marker::PhantomData; @@ -149,6 +149,14 @@ impl> TreeHash for GenericSignature> fmt::Display for GenericSignature { + impl_display!(); +} + +impl> std::str::FromStr for GenericSignature { + impl_from_str!(); +} + impl> Serialize for GenericSignature { impl_serde_serialize!(); } diff --git a/crypto/bls/src/generic_signature_bytes.rs b/crypto/bls/src/generic_signature_bytes.rs index 1f987ecd362..bc7e7f111e8 100644 --- a/crypto/bls/src/generic_signature_bytes.rs +++ b/crypto/bls/src/generic_signature_bytes.rs @@ -5,7 +5,7 @@ use crate::{ }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; @@ -124,6 +124,14 @@ impl TreeHash for GenericSignatureBytes { impl_tree_hash!(SIGNATURE_BYTES_LEN); } +impl fmt::Display for GenericSignatureBytes { + impl_display!(); +} + +impl std::str::FromStr for GenericSignatureBytes { + impl_from_str!(); +} + impl Serialize for GenericSignatureBytes { impl_serde_serialize!(); } diff --git a/crypto/bls/src/macros.rs b/crypto/bls/src/macros.rs index ca103da6da4..136faeb4423 100644 --- a/crypto/bls/src/macros.rs +++ b/crypto/bls/src/macros.rs @@ -76,6 +76,35 @@ macro_rules! impl_ssz_decode { }; } +/// Contains the functions required for a `fmt::Display` implementation. +/// +/// Does not include the `Impl` section since it gets very complicated when it comes to generics. +macro_rules! impl_display { + () => { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", hex_encode(self.serialize().to_vec())) + } + }; +} + +/// Contains the functions required for a `fmt::Display` implementation. +/// +/// Does not include the `Impl` section since it gets very complicated when it comes to generics. +macro_rules! impl_from_str { + () => { + type Err = String; + + fn from_str(s: &str) -> Result { + if s.starts_with("0x") { + let bytes = hex::decode(&s[2..]).map_err(|e| e.to_string())?; + Self::deserialize(&bytes[..]).map_err(|e| format!("{:?}", e)) + } else { + Err("must start with 0x".to_string()) + } + } + }; +} + /// Contains the functions required for a `serde::Serialize` implementation. /// /// Does not include the `Impl` section since it gets very complicated when it comes to generics. @@ -85,7 +114,7 @@ macro_rules! impl_serde_serialize { where S: Serializer, { - serializer.serialize_str(&hex_encode(self.serialize().to_vec())) + serializer.serialize_str(&self.to_string()) } }; } @@ -99,9 +128,25 @@ macro_rules! impl_serde_deserialize { where D: Deserializer<'de>, { - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - Self::deserialize(&bytes[..]) - .map_err(|e| serde::de::Error::custom(format!("invalid pubkey ({:?})", e))) + pub struct StringVisitor; + + impl<'de> serde::de::Visitor<'de> for StringVisitor { + type Value = String; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a hex string with 0x prefix") + } + + fn visit_str(self, value: &str) -> Result + where + E: serde::de::Error, + { + Ok(value.to_string()) + } + } + + let string = deserializer.deserialize_str(StringVisitor)?; + ::from_str(&string).map_err(serde::de::Error::custom) } }; } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 6e89ebf0bf3..150a5a3316e 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -264,61 +264,63 @@ fn run( "name" => testnet_name ); - let beacon_node = if let Some(sub_matches) = matches.subcommand_matches("beacon_node") { - let runtime_context = environment.core_context(); - - let beacon = environment - .runtime() - .block_on(ProductionBeaconNode::new_from_cli( - runtime_context, - sub_matches, - )) - .map_err(|e| format!("Failed to start beacon node: {}", e))?; - - Some(beacon) - } else { - None - }; - - let validator_client = if let Some(sub_matches) = matches.subcommand_matches("validator_client") - { - let runtime_context = environment.core_context(); - - let mut validator = environment - .runtime() - .block_on(ProductionValidatorClient::new_from_cli( - runtime_context, - sub_matches, - )) - .map_err(|e| format!("Failed to init validator client: {}", e))?; - - environment - .core_context() - .executor - .runtime_handle() - .enter(|| { - validator - .start_service() - .map_err(|e| format!("Failed to start validator client service: {}", e)) - })?; - - Some(validator) - } else { - None + match matches.subcommand() { + ("beacon_node", Some(matches)) => { + let context = environment.core_context(); + let log = context.log().clone(); + let executor = context.executor.clone(); + let config = beacon_node::get_config::( + matches, + &context.eth2_config.spec_constants, + &context.eth2_config().spec, + context.log().clone(), + )?; + environment.runtime().spawn(async move { + if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await { + crit!(log, "Failed to start beacon node"; "reason" => e); + // Ignore the error since it always occurs during normal operation when + // shutting down. + let _ = executor + .shutdown_sender() + .try_send("Failed to start beacon node"); + } + }) + } + ("validator_client", Some(matches)) => { + let context = environment.core_context(); + let log = context.log().clone(); + let executor = context.executor.clone(); + let config = validator_client::Config::from_cli(&matches) + .map_err(|e| format!("Unable to initialize validator config: {}", e))?; + environment.runtime().spawn(async move { + let run = async { + ProductionValidatorClient::new(context, config) + .await? + .start_service()?; + + Ok::<(), String>(()) + }; + if let Err(e) = run.await { + crit!(log, "Failed to start validator client"; "reason" => e); + // Ignore the error since it always occurs during normal operation when + // shutting down. + let _ = executor + .shutdown_sender() + .try_send("Failed to start validator client"); + } + }) + } + _ => { + crit!(log, "No subcommand supplied. See --help ."); + return Err("No subcommand supplied.".into()); + } }; - if beacon_node.is_none() && validator_client.is_none() { - crit!(log, "No subcommand supplied. See --help ."); - return Err("No subcommand supplied.".into()); - } - // Block this thread until we get a ctrl-c or a task sends a shutdown signal. environment.block_until_shutdown_requested()?; info!(log, "Shutting down.."); environment.fire_signal(); - drop(beacon_node); - drop(validator_client); // Shutdown the environment once all tasks have completed. environment.shutdown_on_idle(); diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index a48f24f3f04..ae23936369e 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -15,6 +15,6 @@ url = "2.1.1" serde = "1.0.110" futures = "0.3.5" genesis = { path = "../../beacon_node/genesis" } -remote_beacon_node = { path = "../../common/remote_beacon_node" } +eth2 = { path = "../../common/eth2" } validator_client = { path = "../../validator_client" } validator_dir = { path = "../../common/validator_dir", features = ["insecure_keys"] } diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 9459a07b5b5..cc30695a33d 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -4,7 +4,12 @@ use beacon_node::ProductionBeaconNode; use environment::RuntimeContext; +use eth2::{ + reqwest::{ClientBuilder, Url}, + BeaconNodeHttpClient, +}; use std::path::PathBuf; +use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; use tempdir::TempDir; use types::EthSpec; @@ -13,9 +18,12 @@ use validator_dir::insecure_keys::build_deterministic_validator_dirs; pub use beacon_node::{ClientConfig, ClientGenesis, ProductionClient}; pub use environment; -pub use remote_beacon_node::RemoteBeaconNode; +pub use eth2; pub use validator_client::Config as ValidatorConfig; +/// The global timeout for HTTP requests to the beacon node. +const HTTP_TIMEOUT: Duration = Duration::from_secs(4); + /// Provides a beacon node that is running in the current process on a given tokio executor (it /// is _local_ to this process). /// @@ -52,16 +60,23 @@ impl LocalBeaconNode { impl LocalBeaconNode { /// Returns a `RemoteBeaconNode` that can connect to `self`. Useful for testing the node as if /// it were external this process. - pub fn remote_node(&self) -> Result, String> { - let socket_addr = self + pub fn remote_node(&self) -> Result { + let listen_addr = self .client - .http_listen_addr() + .http_api_listen_addr() .ok_or_else(|| "A remote beacon node must have a http server".to_string())?; - Ok(RemoteBeaconNode::new(format!( - "http://{}:{}", - socket_addr.ip(), - socket_addr.port() - ))?) + + let beacon_node_url: Url = format!("http://{}:{}", listen_addr.ip(), listen_addr.port()) + .parse() + .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?; + let beacon_node_http_client = ClientBuilder::new() + .timeout(HTTP_TIMEOUT) + .build() + .map_err(|e| format!("Unable to build HTTP client: {:?}", e))?; + Ok(BeaconNodeHttpClient::from_components( + beacon_node_url, + beacon_node_http_client, + )) } } @@ -71,8 +86,8 @@ pub fn testing_client_config() -> ClientConfig { // Setting ports to `0` means that the OS will choose some available port. client_config.network.libp2p_port = 0; client_config.network.discovery_port = 0; - client_config.rest_api.enabled = true; - client_config.rest_api.port = 0; + client_config.http_api.enabled = true; + client_config.http_api.listen_port = 0; client_config.websocket_server.enabled = true; client_config.websocket_server.port = 0; diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 43ceaa14fdb..e755c9005fa 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -1,4 +1,5 @@ use crate::local_network::LocalNetwork; +use node_test_rig::eth2::types::StateId; use std::time::Duration; use types::{Epoch, EthSpec, Slot, Unsigned}; @@ -65,11 +66,9 @@ pub async fn verify_all_finalized_at( for remote_node in network.remote_nodes()? { epochs.push( remote_node - .http - .beacon() - .get_head() + .get_beacon_states_finality_checkpoints(StateId::Head) .await - .map(|head| head.finalized_slot.epoch(E::slots_per_epoch())) + .map(|body| body.unwrap().data.finalized.epoch) .map_err(|e| format!("Get head via http failed: {:?}", e))?, ); } @@ -95,17 +94,10 @@ async fn verify_validator_count( let validator_counts = { let mut validator_counts = Vec::new(); for remote_node in network.remote_nodes()? { - let beacon = remote_node.http.beacon(); - - let head = beacon - .get_head() - .await - .map_err(|e| format!("Get head via http failed: {:?}", e))?; - - let vc = beacon - .get_state_by_root(head.state_root) + let vc = remote_node + .get_debug_beacon_states::(StateId::Head) .await - .map(|(state, _root)| state) + .map(|body| body.unwrap().data) .map_err(|e| format!("Get state root via http failed: {:?}", e))? .validators .len(); diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index de78aaa0578..1ce8b2a5d9b 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -34,7 +34,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .short("s") .long("speed_up_factor") .takes_value(true) - .default_value("4") + .default_value("3") .help("Speed up factor")) .arg(Arg::with_name("continue_after_checks") .short("c") @@ -62,7 +62,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .short("s") .long("speed_up_factor") .takes_value(true) - .default_value("4") + .default_value("3") .help("Speed up factor")) .arg(Arg::with_name("continue_after_checks") .short("c") diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 37ce3ab5664..0dd9b3424b8 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -1,6 +1,7 @@ use node_test_rig::{ - environment::RuntimeContext, ClientConfig, LocalBeaconNode, LocalValidatorClient, - RemoteBeaconNode, ValidatorConfig, ValidatorFiles, + environment::RuntimeContext, + eth2::{types::StateId, BeaconNodeHttpClient}, + ClientConfig, LocalBeaconNode, LocalValidatorClient, ValidatorConfig, ValidatorFiles, }; use parking_lot::RwLock; use std::ops::Deref; @@ -123,7 +124,7 @@ impl LocalNetwork { .ok_or_else(|| format!("No beacon node for index {}", beacon_node))?; beacon_node .client - .http_listen_addr() + .http_api_listen_addr() .expect("Must have http started") }; @@ -140,7 +141,7 @@ impl LocalNetwork { } /// For all beacon nodes in `Self`, return a HTTP client to access each nodes HTTP API. - pub fn remote_nodes(&self) -> Result>, String> { + pub fn remote_nodes(&self) -> Result, String> { let beacon_nodes = self.beacon_nodes.read(); beacon_nodes @@ -154,11 +155,9 @@ impl LocalNetwork { let nodes = self.remote_nodes().expect("Failed to get remote nodes"); let bootnode = nodes.first().expect("Should contain bootnode"); bootnode - .http - .beacon() - .get_head() + .get_beacon_states_finality_checkpoints(StateId::Head) .await .map_err(|e| format!("Cannot get head: {:?}", e)) - .map(|head| head.finalized_slot.epoch(E::slots_per_epoch())) + .map(|body| body.unwrap().data.finalized.epoch) } } diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 7583a6eab20..47272f62681 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -350,11 +350,9 @@ pub async fn check_still_syncing(network: &LocalNetwork) -> Resul for remote_node in network.remote_nodes()? { status.push( remote_node - .http - .node() - .syncing_status() + .get_node_syncing() .await - .map(|status| status.is_syncing) + .map(|body| body.data.is_syncing) .map_err(|e| format!("Get syncing status via http failed: {:?}", e))?, ) } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 0f79c2313fb..0f29f41e070 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -19,7 +19,6 @@ clap = "2.33.0" eth2_interop_keypairs = { path = "../common/eth2_interop_keypairs" } slashing_protection = { path = "./slashing_protection" } slot_clock = { path = "../common/slot_clock" } -rest_types = { path = "../common/rest_types" } types = { path = "../consensus/types" } serde = "1.0.110" serde_derive = "1.0.110" @@ -40,7 +39,7 @@ eth2_ssz_derive = "0.1.0" hex = "0.4.2" deposit_contract = { path = "../common/deposit_contract" } bls = { path = "../crypto/bls" } -remote_beacon_node = { path = "../common/remote_beacon_node" } +eth2 = { path = "../common/eth2" } tempdir = "0.3.7" rayon = "1.3.0" validator_dir = { path = "../common/validator_dir" } diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index fa79877774b..d675ebda2e8 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -3,22 +3,26 @@ use crate::{ validator_store::ValidatorStore, }; use environment::RuntimeContext; +use eth2::BeaconNodeHttpClient; use futures::StreamExt; -use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; -use slog::{crit, debug, error, info, trace}; +use slog::{crit, error, info, trace}; use slot_clock::SlotClock; use std::collections::HashMap; use std::ops::Deref; use std::sync::Arc; use tokio::time::{delay_until, interval_at, Duration, Instant}; -use types::{Attestation, ChainSpec, CommitteeIndex, EthSpec, Slot, SubnetId}; +use tree_hash::TreeHash; +use types::{ + AggregateSignature, Attestation, AttestationData, BitList, ChainSpec, CommitteeIndex, EthSpec, + Slot, +}; /// Builds an `AttestationService`. pub struct AttestationServiceBuilder { duties_service: Option>, validator_store: Option>, slot_clock: Option, - beacon_node: Option>, + beacon_node: Option, context: Option>, } @@ -48,7 +52,7 @@ impl AttestationServiceBuilder { self } - pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode) -> Self { + pub fn beacon_node(mut self, beacon_node: BeaconNodeHttpClient) -> Self { self.beacon_node = Some(beacon_node); self } @@ -86,7 +90,7 @@ pub struct Inner { duties_service: DutiesService, validator_store: ValidatorStore, slot_clock: T, - beacon_node: RemoteBeaconNode, + beacon_node: BeaconNodeHttpClient, context: RuntimeContext, } @@ -262,7 +266,7 @@ impl AttestationService { // Step 2. // // If an attestation was produced, make an aggregate. - if let Some(attestation) = attestation_opt { + if let Some(attestation_data) = attestation_opt { // First, wait until the `aggregation_production_instant` (2/3rds // of the way though the slot). As verified in the // `delay_triggers_when_in_the_past` test, this code will still run @@ -272,7 +276,7 @@ impl AttestationService { // Then download, sign and publish a `SignedAggregateAndProof` for each // validator that is elected to aggregate for this `slot` and // `committee_index`. - self.produce_and_publish_aggregates(attestation, &validator_duties) + self.produce_and_publish_aggregates(attestation_data, &validator_duties) .await .map_err(move |e| { crit!( @@ -305,7 +309,7 @@ impl AttestationService { slot: Slot, committee_index: CommitteeIndex, validator_duties: &[DutyAndProof], - ) -> Result>, String> { + ) -> Result, String> { let log = self.context.log(); if validator_duties.is_empty() { @@ -318,124 +322,88 @@ impl AttestationService { .ok_or_else(|| "Unable to determine current slot from clock".to_string())? .epoch(E::slots_per_epoch()); - let attestation = self + let attestation_data = self .beacon_node - .http - .validator() - .produce_attestation(slot, committee_index) + .get_validator_attestation_data(slot, committee_index) .await - .map_err(|e| format!("Failed to produce attestation: {:?}", e))?; + .map_err(|e| format!("Failed to produce attestation data: {:?}", e))? + .data; + + for duty in validator_duties { + // Ensure that all required fields are present in the validator duty. + let ( + duty_slot, + duty_committee_index, + validator_committee_position, + _, + _, + committee_length, + ) = if let Some(tuple) = duty.attestation_duties() { + tuple + } else { + crit!( + log, + "Missing validator duties when signing"; + "duties" => format!("{:?}", duty) + ); + continue; + }; - // For each validator in `validator_duties`, clone the `attestation` and add - // their signature. - // - // If any validator is unable to sign, they are simply skipped. - let signed_attestations = validator_duties - .iter() - .filter_map(|duty| { - // Ensure that all required fields are present in the validator duty. - let ( - duty_slot, - duty_committee_index, - validator_committee_position, - _, - committee_count_at_slot, - ) = if let Some(tuple) = duty.attestation_duties() { - tuple - } else { - crit!( - log, - "Missing validator duties when signing"; - "duties" => format!("{:?}", duty) - ); - return None; - }; + // Ensure that the attestation matches the duties. + if duty_slot != attestation_data.slot || duty_committee_index != attestation_data.index + { + crit!( + log, + "Inconsistent validator duties during signing"; + "validator" => format!("{:?}", duty.validator_pubkey()), + "duty_slot" => duty_slot, + "attestation_slot" => attestation_data.slot, + "duty_index" => duty_committee_index, + "attestation_index" => attestation_data.index, + ); + continue; + } - // Ensure that the attestation matches the duties. - if duty_slot != attestation.data.slot - || duty_committee_index != attestation.data.index - { - crit!( - log, - "Inconsistent validator duties during signing"; - "validator" => format!("{:?}", duty.validator_pubkey()), - "duty_slot" => duty_slot, - "attestation_slot" => attestation.data.slot, - "duty_index" => duty_committee_index, - "attestation_index" => attestation.data.index, - ); - return None; - } + let mut attestation = Attestation { + aggregation_bits: BitList::with_capacity(committee_length as usize).unwrap(), + data: attestation_data.clone(), + signature: AggregateSignature::infinity(), + }; - let mut attestation = attestation.clone(); - let subnet_id = SubnetId::compute_subnet_for_attestation_data::( - &attestation.data, - committee_count_at_slot, - &self.context.eth2_config().spec, + self.validator_store + .sign_attestation( + duty.validator_pubkey(), + validator_committee_position, + &mut attestation, + current_epoch, ) - .map_err(|e| { - error!( - log, - "Failed to compute subnet id to publish attestation: {:?}", e - ) - }) - .ok()?; - self.validator_store - .sign_attestation( - duty.validator_pubkey(), - validator_committee_position, - &mut attestation, - current_epoch, - ) - .map(|_| (attestation, subnet_id)) - }) - .collect::>(); - - // If there are any signed attestations, publish them to the BN. Otherwise, - // just return early. - if let Some(attestation) = signed_attestations.first().cloned() { - let num_attestations = signed_attestations.len(); - let beacon_block_root = attestation.0.data.beacon_block_root; - - self.beacon_node - .http - .validator() - .publish_attestations(signed_attestations) + .ok_or_else(|| "Failed to sign attestation".to_string())?; + + match self + .beacon_node + .post_beacon_pool_attestations(&attestation) .await - .map_err(|e| format!("Failed to publish attestation: {:?}", e)) - .map(move |publish_status| match publish_status { - PublishStatus::Valid => info!( - log, - "Successfully published attestations"; - "count" => num_attestations, - "head_block" => format!("{:?}", beacon_block_root), - "committee_index" => committee_index, - "slot" => slot.as_u64(), - "type" => "unaggregated", - ), - PublishStatus::Invalid(msg) => crit!( - log, - "Published attestation was invalid"; - "message" => msg, - "committee_index" => committee_index, - "slot" => slot.as_u64(), - "type" => "unaggregated", - ), - PublishStatus::Unknown => { - crit!(log, "Unknown condition when publishing unagg. attestation") - } - }) - .map(|()| Some(attestation.0)) - } else { - debug!( - log, - "No attestations to publish"; - "committee_index" => committee_index, - "slot" => slot.as_u64(), - ); - - Ok(None) + { + Ok(()) => info!( + log, + "Successfully published attestation"; + "head_block" => format!("{:?}", attestation.data.beacon_block_root), + "committee_index" => attestation.data.index, + "slot" => attestation.data.slot.as_u64(), + "type" => "unaggregated", + ), + Err(e) => error!( + log, + "Unable to publish attestation"; + "error" => e.to_string(), + "committee_index" => attestation.data.index, + "slot" => slot.as_u64(), + "type" => "unaggregated", + ), + } } + + Ok(Some(attestation_data)) } /// Performs the second step of the attesting process: downloading an aggregated `Attestation`, @@ -453,103 +421,89 @@ impl AttestationService { /// returned to the BN. async fn produce_and_publish_aggregates( &self, - attestation: Attestation, + attestation_data: AttestationData, validator_duties: &[DutyAndProof], ) -> Result<(), String> { let log = self.context.log(); let aggregated_attestation = self .beacon_node - .http - .validator() - .produce_aggregate_attestation(&attestation.data) + .get_validator_aggregate_attestation( + attestation_data.slot, + attestation_data.tree_hash_root(), + ) .await - .map_err(|e| format!("Failed to produce an aggregate attestation: {:?}", e))?; - - // For each validator, clone the `aggregated_attestation` and convert it into - // a `SignedAggregateAndProof` - let signed_aggregate_and_proofs = validator_duties - .iter() - .filter_map(|duty_and_proof| { - // Do not produce a signed aggregator for validators that are not + .map_err(|e| format!("Failed to produce an aggregate attestation: {:?}", e))? + .ok_or_else(|| format!("No aggregate available for {:?}", attestation_data))? + .data; + + for duty_and_proof in validator_duties { + let selection_proof = if let Some(proof) = duty_and_proof.selection_proof.as_ref() { + proof + } else { + // Do not produce a signed aggregate for validators that are not // subscribed aggregators. - let selection_proof = duty_and_proof.selection_proof.as_ref()?.clone(); - - let (duty_slot, duty_committee_index, _, validator_index, _) = - duty_and_proof.attestation_duties().or_else(|| { - crit!(log, "Missing duties when signing aggregate"); - None - })?; + continue; + }; + let (duty_slot, duty_committee_index, _, validator_index, _, _) = + if let Some(tuple) = duty_and_proof.attestation_duties() { + tuple + } else { + crit!(log, "Missing duties when signing aggregate"); + continue; + }; - let pubkey = &duty_and_proof.duty.validator_pubkey; - let slot = attestation.data.slot; - let committee_index = attestation.data.index; + let pubkey = &duty_and_proof.duty.validator_pubkey; + let slot = attestation_data.slot; + let committee_index = attestation_data.index; - if duty_slot != slot || duty_committee_index != committee_index { - crit!(log, "Inconsistent validator duties during signing"); - return None; - } + if duty_slot != slot || duty_committee_index != committee_index { + crit!(log, "Inconsistent validator duties during signing"); + continue; + } - if let Some(signed_aggregate_and_proof) = - self.validator_store.produce_signed_aggregate_and_proof( - pubkey, - validator_index, - aggregated_attestation.clone(), - selection_proof, - ) - { - Some(signed_aggregate_and_proof) - } else { - crit!(log, "Failed to sign attestation"); - None - } - }) - .collect::>(); + let signed_aggregate_and_proof = if let Some(aggregate) = + self.validator_store.produce_signed_aggregate_and_proof( + pubkey, + validator_index, + aggregated_attestation.clone(), + selection_proof.clone(), + ) { + aggregate + } else { + crit!(log, "Failed to sign attestation"); + continue; + }; - // If there any signed aggregates and proofs were produced, publish them to the - // BN. - if let Some(first) = signed_aggregate_and_proofs.first().cloned() { - let attestation = first.message.aggregate; + let attestation = &signed_aggregate_and_proof.message.aggregate; - let publish_status = self + match self .beacon_node - .http - .validator() - .publish_aggregate_and_proof(signed_aggregate_and_proofs) + .post_validator_aggregate_and_proof(&signed_aggregate_and_proof) .await - .map_err(|e| format!("Failed to publish aggregate and proofs: {:?}", e))?; - match publish_status { - PublishStatus::Valid => info!( + { + Ok(()) => info!( log, - "Successfully published attestations"; + "Successfully published attestation"; + "aggregator" => signed_aggregate_and_proof.message.aggregator_index, "signatures" => attestation.aggregation_bits.num_set_bits(), "head_block" => format!("{:?}", attestation.data.beacon_block_root), "committee_index" => attestation.data.index, "slot" => attestation.data.slot.as_u64(), "type" => "aggregated", ), - PublishStatus::Invalid(msg) => crit!( + Err(e) => crit!( log, - "Published attestation was invalid"; - "message" => msg, + "Failed to publish attestation"; + "error" => e.to_string(), "committee_index" => attestation.data.index, "slot" => attestation.data.slot.as_u64(), "type" => "aggregated", ), - PublishStatus::Unknown => { - crit!(log, "Unknown condition when publishing agg. attestation") - } - }; - Ok(()) - } else { - debug!( - log, - "No signed aggregates to publish"; - "committee_index" => attestation.data.index, - "slot" => attestation.data.slot.as_u64(), - ); - Ok(()) + } } + + Ok(()) } } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 60d1f4d5514..bf52cacfc0b 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -1,19 +1,19 @@ use crate::validator_store::ValidatorStore; use environment::RuntimeContext; +use eth2::{types::Graffiti, BeaconNodeHttpClient}; use futures::channel::mpsc::Receiver; use futures::{StreamExt, TryFutureExt}; -use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; -use types::{EthSpec, Graffiti, PublicKey, Slot}; +use types::{EthSpec, PublicKey, Slot}; /// Builds a `BlockService`. pub struct BlockServiceBuilder { validator_store: Option>, slot_clock: Option>, - beacon_node: Option>, + beacon_node: Option, context: Option>, graffiti: Option, } @@ -39,7 +39,7 @@ impl BlockServiceBuilder { self } - pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode) -> Self { + pub fn beacon_node(mut self, beacon_node: BeaconNodeHttpClient) -> Self { self.beacon_node = Some(beacon_node); self } @@ -79,7 +79,7 @@ impl BlockServiceBuilder { pub struct Inner { validator_store: ValidatorStore, slot_clock: Arc, - beacon_node: RemoteBeaconNode, + beacon_node: BeaconNodeHttpClient, context: RuntimeContext, graffiti: Option, } @@ -221,41 +221,28 @@ impl BlockService { let block = self .beacon_node - .http - .validator() - .produce_block(slot, randao_reveal, self.graffiti) + .get_validator_blocks(slot, randao_reveal.into(), self.graffiti.as_ref()) .await - .map_err(|e| format!("Error from beacon node when producing block: {:?}", e))?; + .map_err(|e| format!("Error from beacon node when producing block: {:?}", e))? + .data; let signed_block = self .validator_store .sign_block(&validator_pubkey, block, current_slot) .ok_or_else(|| "Unable to sign block".to_string())?; - let publish_status = self - .beacon_node - .http - .validator() - .publish_block(signed_block.clone()) + self.beacon_node + .post_beacon_blocks(&signed_block) .await .map_err(|e| format!("Error from beacon node when publishing block: {:?}", e))?; - match publish_status { - PublishStatus::Valid => info!( - log, - "Successfully published block"; - "deposits" => signed_block.message.body.deposits.len(), - "attestations" => signed_block.message.body.attestations.len(), - "slot" => signed_block.slot().as_u64(), - ), - PublishStatus::Invalid(msg) => crit!( - log, - "Published block was invalid"; - "message" => msg, - "slot" => signed_block.slot().as_u64(), - ), - PublishStatus::Unknown => crit!(log, "Unknown condition when publishing block"), - } + info!( + log, + "Successfully published block"; + "deposits" => signed_block.message.body.deposits.len(), + "attestations" => signed_block.message.body.attestations.len(), + "slot" => signed_block.slot().as_u64(), + ); Ok(()) } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index ed320c24cde..7ac483439ce 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -37,11 +37,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { nodes using the same key. Automatically enabled unless `--strict` is specified", )) .arg( - Arg::with_name("strict-lockfiles") - .long("strict-lockfiles") + Arg::with_name("delete-lockfiles") + .long("delete-lockfiles") .help( - "If present, do not load validators that are guarded by a lockfile. Note: for \ - Eth2 mainnet, this flag will likely be removed and its behaviour will become default." + "If present, ignore and delete any keystore lockfiles encountered during start up. \ + This is useful if the validator client did not exit gracefully on the last run. \ + WARNING: lockfiles help prevent users from accidentally running the same validator \ + using two different validator clients, an action that likely leads to slashing. \ + Ensure you are certain that there are no other validator client instances running \ + that might also be using the same keystores." ) ) .arg( diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 482c4ed7007..3a9366463af 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -1,8 +1,9 @@ use clap::ArgMatches; use clap_utils::{parse_optional, parse_path_with_default_in_home_dir}; +use eth2::types::Graffiti; use serde_derive::{Deserialize, Serialize}; use std::path::PathBuf; -use types::{Graffiti, GRAFFITI_BYTES_LEN}; +use types::GRAFFITI_BYTES_LEN; pub const DEFAULT_HTTP_SERVER: &str = "http://localhost:5052/"; pub const DEFAULT_DATA_DIR: &str = ".lighthouse/validators"; @@ -24,8 +25,8 @@ pub struct Config { /// If true, the validator client will still poll for duties and produce blocks even if the /// beacon node is not synced at startup. pub allow_unsynced_beacon_node: bool, - /// If true, refuse to unlock a keypair that is guarded by a lockfile. - pub strict_lockfiles: bool, + /// If true, delete any validator keystore lockfiles that would prevent starting. + pub delete_lockfiles: bool, /// If true, don't scan the validators dir for new keystores. pub disable_auto_discover: bool, /// Graffiti to be inserted everytime we create a block. @@ -46,7 +47,7 @@ impl Default for Config { secrets_dir, http_server: DEFAULT_HTTP_SERVER.to_string(), allow_unsynced_beacon_node: false, - strict_lockfiles: false, + delete_lockfiles: false, disable_auto_discover: false, graffiti: None, } @@ -77,7 +78,7 @@ impl Config { } config.allow_unsynced_beacon_node = cli_args.is_present("allow-unsynced"); - config.strict_lockfiles = cli_args.is_present("strict-lockfiles"); + config.delete_lockfiles = cli_args.is_present("delete-lockfiles"); config.disable_auto_discover = cli_args.is_present("disable-auto-discover"); if let Some(secrets_dir) = parse_optional(cli_args, "secrets-dir")? { @@ -92,15 +93,14 @@ impl Config { GRAFFITI_BYTES_LEN )); } else { - // Default graffiti to all 0 bytes. - let mut graffiti = Graffiti::default(); + let mut graffiti = [0; 32]; // Copy the provided bytes over. // // Panic-free because `graffiti_bytes.len()` <= `GRAFFITI_BYTES_LEN`. graffiti[..graffiti_bytes.len()].copy_from_slice(&graffiti_bytes); - config.graffiti = Some(graffiti); + config.graffiti = Some(graffiti.into()); } } diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 7375d550235..cea9451e77a 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -1,16 +1,15 @@ use crate::{ - block_service::BlockServiceNotification, is_synced::is_synced, validator_store::ValidatorStore, + block_service::BlockServiceNotification, is_synced::is_synced, validator_duty::ValidatorDuty, + validator_store::ValidatorStore, }; use environment::RuntimeContext; +use eth2::BeaconNodeHttpClient; use futures::channel::mpsc::Sender; use futures::{SinkExt, StreamExt}; use parking_lot::RwLock; -use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; -use rest_types::{ValidatorDuty, ValidatorDutyBytes, ValidatorSubscription}; use slog::{debug, error, trace, warn}; use slot_clock::SlotClock; use std::collections::HashMap; -use std::convert::TryInto; use std::ops::Deref; use std::sync::Arc; use tokio::time::{interval_at, Duration, Instant}; @@ -44,14 +43,14 @@ impl DutyAndProof { pub fn compute_selection_proof( &mut self, validator_store: &ValidatorStore, + spec: &ChainSpec, ) -> Result<(), String> { - let (modulo, slot) = if let (Some(modulo), Some(slot)) = - (self.duty.aggregator_modulo, self.duty.attestation_slot) + let (committee_length, slot) = if let (Some(count), Some(slot)) = + (self.duty.committee_length, self.duty.attestation_slot) { - (modulo, slot) + (count as usize, slot) } else { - // If there is no modulo or for the aggregator we assume they are not activated and - // therefore not an aggregator. + // If there are no attester duties we assume the validator is inactive. self.selection_proof = None; return Ok(()); }; @@ -61,7 +60,7 @@ impl DutyAndProof { .ok_or_else(|| "Failed to produce selection proof".to_string())?; self.selection_proof = selection_proof - .is_aggregator_from_modulo(modulo) + .is_aggregator(committee_length, spec) .map_err(|e| format!("Invalid modulo: {:?}", e)) .map(|is_aggregator| { if is_aggregator { @@ -87,19 +86,20 @@ impl DutyAndProof { /// It's important to note that this doesn't actually check `self.selection_proof`, instead it /// checks to see if the inputs to computing the selection proof are equal. fn selection_proof_eq(&self, other: &Self) -> bool { - self.duty.aggregator_modulo == other.duty.aggregator_modulo + self.duty.committee_count_at_slot == other.duty.committee_count_at_slot && self.duty.attestation_slot == other.duty.attestation_slot } /// Returns the information required for an attesting validator, if they are scheduled to /// attest. - pub fn attestation_duties(&self) -> Option<(Slot, CommitteeIndex, usize, u64, u64)> { + pub fn attestation_duties(&self) -> Option<(Slot, CommitteeIndex, usize, u64, u64, u64)> { Some(( self.duty.attestation_slot?, self.duty.attestation_committee_index?, self.duty.attestation_committee_position?, self.duty.validator_index?, self.duty.committee_count_at_slot?, + self.duty.committee_length?, )) } @@ -108,26 +108,12 @@ impl DutyAndProof { } } -impl TryInto for ValidatorDutyBytes { - type Error = String; - - fn try_into(self) -> Result { - let duty = ValidatorDuty { - validator_pubkey: (&self.validator_pubkey) - .try_into() - .map_err(|e| format!("Invalid pubkey bytes from server: {:?}", e))?, - validator_index: self.validator_index, - attestation_slot: self.attestation_slot, - attestation_committee_index: self.attestation_committee_index, - attestation_committee_position: self.attestation_committee_position, - committee_count_at_slot: self.committee_count_at_slot, - block_proposal_slots: self.block_proposal_slots, - aggregator_modulo: self.aggregator_modulo, - }; - Ok(DutyAndProof { - duty, +impl Into for ValidatorDuty { + fn into(self) -> DutyAndProof { + DutyAndProof { + duty: self, selection_proof: None, - }) + } } } @@ -260,6 +246,7 @@ impl DutiesStore { mut duties: DutyAndProof, slots_per_epoch: u64, validator_store: &ValidatorStore, + spec: &ChainSpec, ) -> Result { let mut store = self.store.write(); @@ -282,7 +269,7 @@ impl DutiesStore { } } else { // Compute the selection proof. - duties.compute_selection_proof(validator_store)?; + duties.compute_selection_proof(validator_store, spec)?; // Determine if a re-subscription is required. let should_resubscribe = !duties.subscription_eq(known_duties); @@ -294,7 +281,7 @@ impl DutiesStore { } } else { // Compute the selection proof. - duties.compute_selection_proof(validator_store)?; + duties.compute_selection_proof(validator_store, spec)?; validator_map.insert(epoch, duties); @@ -302,7 +289,7 @@ impl DutiesStore { } } else { // Compute the selection proof. - duties.compute_selection_proof(validator_store)?; + duties.compute_selection_proof(validator_store, spec)?; let validator_pubkey = duties.duty.validator_pubkey.clone(); @@ -328,7 +315,7 @@ impl DutiesStore { pub struct DutiesServiceBuilder { validator_store: Option>, slot_clock: Option, - beacon_node: Option>, + beacon_node: Option, context: Option>, allow_unsynced_beacon_node: bool, } @@ -354,7 +341,7 @@ impl DutiesServiceBuilder { self } - pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode) -> Self { + pub fn beacon_node(mut self, beacon_node: BeaconNodeHttpClient) -> Self { self.beacon_node = Some(beacon_node); self } @@ -397,7 +384,7 @@ pub struct Inner { store: Arc, validator_store: ValidatorStore, pub(crate) slot_clock: T, - pub(crate) beacon_node: RemoteBeaconNode, + pub(crate) beacon_node: BeaconNodeHttpClient, context: RuntimeContext, /// If true, the duties service will poll for duties from the beacon node even if it is not /// synced. @@ -462,7 +449,7 @@ impl DutiesService { pub fn start_update_service( self, mut block_service_tx: Sender, - spec: &ChainSpec, + spec: Arc, ) -> Result<(), String> { let duration_to_next_slot = self .slot_clock @@ -481,17 +468,22 @@ impl DutiesService { // Run an immediate update before starting the updater service. let duties_service = self.clone(); let mut block_service_tx_clone = block_service_tx.clone(); + let inner_spec = spec.clone(); self.inner .context .executor .runtime_handle() - .spawn(async move { duties_service.do_update(&mut block_service_tx_clone).await }); + .spawn(async move { + duties_service + .do_update(&mut block_service_tx_clone, &inner_spec) + .await + }); let executor = self.inner.context.executor.clone(); let interval_fut = async move { while interval.next().await.is_some() { - self.clone().do_update(&mut block_service_tx).await; + self.clone().do_update(&mut block_service_tx, &spec).await; } }; @@ -501,7 +493,11 @@ impl DutiesService { } /// Attempt to download the duties of all managed validators for this epoch and the next. - async fn do_update(self, block_service_tx: &mut Sender) { + async fn do_update( + self, + block_service_tx: &mut Sender, + spec: &ChainSpec, + ) { let log = self.context.log(); if !is_synced(&self.beacon_node, &self.slot_clock, None).await @@ -534,7 +530,11 @@ impl DutiesService { // Update duties for the current epoch, but keep running if there's an error: // block production or the next epoch update could still succeed. - if let Err(e) = self.clone().update_epoch(current_epoch).await { + if let Err(e) = self + .clone() + .update_epoch(current_epoch, current_epoch, spec) + .await + { error!( log, "Failed to get current epoch duties"; @@ -558,7 +558,11 @@ impl DutiesService { }; // Update duties for the next epoch. - if let Err(e) = self.clone().update_epoch(current_epoch + 1).await { + if let Err(e) = self + .clone() + .update_epoch(current_epoch, current_epoch + 1, spec) + .await + { error!( log, "Failed to get next epoch duties"; @@ -568,17 +572,13 @@ impl DutiesService { } /// Attempt to download the duties of all managed validators for the given `epoch`. - async fn update_epoch(self, epoch: Epoch) -> Result<(), String> { - let pubkeys = self.validator_store.voting_pubkeys(); - let all_duties = self - .beacon_node - .http - .validator() - .get_duties(epoch, pubkeys.as_slice()) - .await - .map_err(move |e| format!("Failed to get duties for epoch {}: {:?}", epoch, e))?; - - let log = self.context.log().clone(); + async fn update_epoch( + self, + current_epoch: Epoch, + request_epoch: Epoch, + spec: &ChainSpec, + ) -> Result<(), String> { + let log = self.context.log(); let mut new_validator = 0; let mut new_epoch = 0; @@ -587,74 +587,60 @@ impl DutiesService { let mut replaced = 0; let mut invalid = 0; - // For each of the duties, attempt to insert them into our local store and build a - // list of new or changed selections proofs for any aggregating validators. - let validator_subscriptions = all_duties - .into_iter() - .filter_map(|remote_duties| { - // Convert the remote duties into our local representation. - let duties: DutyAndProof = remote_duties - .clone() - .try_into() - .map_err(|e| { - error!( - log, - "Unable to convert remote duties"; - "error" => e - ) - }) - .ok()?; - - let validator_pubkey = duties.duty.validator_pubkey.clone(); - - // Attempt to update our local store. - let outcome = self - .store - .insert(epoch, duties, E::slots_per_epoch(), &self.validator_store) - .map_err(|e| { - error!( - log, - "Unable to store duties"; - "error" => e - ) - }) - .ok()?; - - match &outcome { - InsertOutcome::NewValidator => { - debug!( - log, - "First duty assignment for validator"; - "proposal_slots" => format!("{:?}", &remote_duties.block_proposal_slots), - "attestation_slot" => format!("{:?}", &remote_duties.attestation_slot), - "validator" => format!("{:?}", &remote_duties.validator_pubkey) - ); - new_validator += 1; + let mut validator_subscriptions = vec![]; + for pubkey in self.validator_store.voting_pubkeys() { + let remote_duties = + ValidatorDuty::download(&self.beacon_node, current_epoch, request_epoch, pubkey) + .await?; + // Convert the remote duties into our local representation. + let duties: DutyAndProof = remote_duties.clone().into(); + + let validator_pubkey = duties.duty.validator_pubkey.clone(); + + // Attempt to update our local store. + match self.store.insert( + request_epoch, + duties, + E::slots_per_epoch(), + &self.validator_store, + spec, + ) { + Ok(outcome) => { + match &outcome { + InsertOutcome::NewValidator => { + debug!( + log, + "First duty assignment for validator"; + "proposal_slots" => format!("{:?}", &remote_duties.block_proposal_slots), + "attestation_slot" => format!("{:?}", &remote_duties.attestation_slot), + "validator" => format!("{:?}", &remote_duties.validator_pubkey) + ); + new_validator += 1; + } + InsertOutcome::NewProposalSlots => new_proposal_slots += 1, + InsertOutcome::NewEpoch => new_epoch += 1, + InsertOutcome::Identical => identical += 1, + InsertOutcome::Replaced { .. } => replaced += 1, + InsertOutcome::Invalid => invalid += 1, + } + + if let Some(is_aggregator) = + self.store.is_aggregator(&validator_pubkey, request_epoch) + { + if outcome.is_subscription_candidate() { + if let Some(subscription) = remote_duties.subscription(is_aggregator) { + validator_subscriptions.push(subscription) + } + } } - InsertOutcome::NewProposalSlots => new_proposal_slots += 1, - InsertOutcome::NewEpoch => new_epoch += 1, - InsertOutcome::Identical => identical += 1, - InsertOutcome::Replaced { .. } => replaced += 1, - InsertOutcome::Invalid => invalid += 1, - }; - - // The selection proof is computed on `store.insert`, so it's necessary to check - // with the store that the validator is an aggregator. - let is_aggregator = self.store.is_aggregator(&validator_pubkey, epoch)?; - - if outcome.is_subscription_candidate() { - Some(ValidatorSubscription { - validator_index: remote_duties.validator_index?, - attestation_committee_index: remote_duties.attestation_committee_index?, - slot: remote_duties.attestation_slot?, - committee_count_at_slot: remote_duties.committee_count_at_slot?, - is_aggregator, - }) - } else { - None } - }) - .collect::>(); + Err(e) => error!( + log, + "Unable to store duties"; + "error" => e + ), + } + } if invalid > 0 { error!( @@ -673,7 +659,7 @@ impl DutiesService { "new_proposal_slots" => new_proposal_slots, "new_validator" => new_validator, "replaced" => replaced, - "epoch" => format!("{}", epoch) + "epoch" => format!("{}", request_epoch) ); if replaced > 0 { @@ -690,34 +676,19 @@ impl DutiesService { if count == 0 { debug!(log, "No new subscriptions required"); - - Ok(()) } else { self.beacon_node - .http - .validator() - .subscribe(validator_subscriptions) + .post_validator_beacon_committee_subscriptions(&validator_subscriptions) .await - .map_err(|e| format!("Failed to subscribe validators: {:?}", e)) - .map(move |status| { - match status { - PublishStatus::Valid => debug!( - log, - "Successfully subscribed validators"; - "count" => count - ), - PublishStatus::Unknown => error!( - log, - "Unknown response from subscription"; - ), - PublishStatus::Invalid(e) => error!( - log, - "Failed to subscribe validator"; - "error" => e - ), - }; - }) + .map_err(|e| format!("Failed to subscribe validators: {:?}", e))?; + debug!( + log, + "Successfully subscribed validators"; + "count" => count + ); } + + Ok(()) } } diff --git a/validator_client/src/fork_service.rs b/validator_client/src/fork_service.rs index b8db7b72e3c..e38a4cf3c1b 100644 --- a/validator_client/src/fork_service.rs +++ b/validator_client/src/fork_service.rs @@ -1,7 +1,7 @@ use environment::RuntimeContext; +use eth2::{types::StateId, BeaconNodeHttpClient}; use futures::StreamExt; use parking_lot::RwLock; -use remote_beacon_node::RemoteBeaconNode; use slog::{debug, trace}; use slot_clock::SlotClock; use std::ops::Deref; @@ -16,7 +16,7 @@ const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(80); pub struct ForkServiceBuilder { fork: Option, slot_clock: Option, - beacon_node: Option>, + beacon_node: Option, context: Option>, } @@ -35,7 +35,7 @@ impl ForkServiceBuilder { self } - pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode) -> Self { + pub fn beacon_node(mut self, beacon_node: BeaconNodeHttpClient) -> Self { self.beacon_node = Some(beacon_node); self } @@ -66,7 +66,7 @@ impl ForkServiceBuilder { /// Helper to minimise `Arc` usage. pub struct Inner { fork: RwLock>, - beacon_node: RemoteBeaconNode, + beacon_node: BeaconNodeHttpClient, context: RuntimeContext, slot_clock: T, } @@ -141,9 +141,7 @@ impl ForkService { let fork = self .inner .beacon_node - .http - .beacon() - .get_fork() + .get_beacon_states_fork(StateId::Head) .await .map_err(|e| { trace!( @@ -151,7 +149,15 @@ impl ForkService { "Fork update failed"; "error" => format!("Error retrieving fork: {:?}", e) ) - })?; + })? + .ok_or_else(|| { + trace!( + log, + "Fork update failed"; + "error" => "The beacon head fork is unknown" + ) + })? + .data; if self.fork.read().as_ref() != Some(&fork) { *(self.fork.write()) = Some(fork); diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 436dcb4bae3..c100ba10e38 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -50,10 +50,12 @@ pub enum Error { UnableToSaveDefinitions(validator_definitions::Error), /// It is not legal to try and initialize a disabled validator definition. UnableToInitializeDisabledValidator, - /// It is not legal to try and initialize a disabled validator definition. - PasswordUnknown(PathBuf), /// There was an error reading from stdin. UnableToReadPasswordFromUser(String), + /// There was an error running a tokio async task. + TokioJoin(tokio::task::JoinError), + /// There was a filesystem error when deleting a lockfile. + UnableToDeleteLockfile(io::Error), } /// A method used by a validator to sign messages. @@ -86,7 +88,7 @@ impl InitializedValidator { /// If the validator is unable to be initialized for whatever reason. pub fn from_definition( def: ValidatorDefinition, - strict_lockfiles: bool, + delete_lockfiles: bool, log: &Logger, ) -> Result { if !def.enabled { @@ -150,16 +152,17 @@ impl InitializedValidator { })?; if voting_keystore_lockfile_path.exists() { - if strict_lockfiles { - return Err(Error::LockfileExists(voting_keystore_lockfile_path)); - } else { - // If **not** respecting lockfiles, just raise a warning if the voting - // keypair cannot be unlocked. + if delete_lockfiles { warn!( log, - "Ignoring validator lockfile"; + "Deleting validator lockfile"; "file" => format!("{:?}", voting_keystore_lockfile_path) ); + + fs::remove_file(&voting_keystore_lockfile_path) + .map_err(Error::UnableToDeleteLockfile)?; + } else { + return Err(Error::LockfileExists(voting_keystore_lockfile_path)); } } else { // Create a new lockfile. @@ -279,7 +282,7 @@ pub struct InitializedValidators { impl InitializedValidators { /// Instantiates `Self`, initializing all validators in `definitions`. - pub fn from_definitions( + pub async fn from_definitions( definitions: ValidatorDefinitions, validators_dir: PathBuf, strict_lockfiles: bool, @@ -292,7 +295,7 @@ impl InitializedValidators { validators: HashMap::default(), log, }; - this.update_validators()?; + this.update_validators().await?; Ok(this) } @@ -328,7 +331,8 @@ impl InitializedValidators { /// validator will be removed from `self.validators`. /// /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. - pub fn set_validator_status( + #[allow(dead_code)] // Will be used once HTTP API is enabled. + pub async fn set_validator_status( &mut self, voting_public_key: &PublicKey, enabled: bool, @@ -342,7 +346,7 @@ impl InitializedValidators { def.enabled = enabled; } - self.update_validators()?; + self.update_validators().await?; self.definitions .save(&self.validators_dir) @@ -362,7 +366,7 @@ impl InitializedValidators { /// A validator is considered "already known" and skipped if the public key is already known. /// I.e., if there are two different definitions with the same public key then the second will /// be ignored. - fn update_validators(&mut self) -> Result<(), Error> { + async fn update_validators(&mut self) -> Result<(), Error> { for def in self.definitions.as_slice() { if def.enabled { match &def.signing_definition { @@ -371,11 +375,23 @@ impl InitializedValidators { continue; } - match InitializedValidator::from_definition( - def.clone(), - self.strict_lockfiles, - &self.log, - ) { + // Decoding a local keystore can take several seconds, therefore it's best + // to keep if off the core executor. This also has the fortunate effect of + // interrupting the potentially long-running task during shut down. + let inner_def = def.clone(); + let strict_lockfiles = self.strict_lockfiles; + let inner_log = self.log.clone(); + let result = tokio::task::spawn_blocking(move || { + InitializedValidator::from_definition( + inner_def, + strict_lockfiles, + &inner_log, + ) + }) + .await + .map_err(Error::TokioJoin)?; + + match result { Ok(init) => { self.validators .insert(init.voting_public_key().clone(), init); diff --git a/validator_client/src/is_synced.rs b/validator_client/src/is_synced.rs index e1017ac7719..f967d629c10 100644 --- a/validator_client/src/is_synced.rs +++ b/validator_client/src/is_synced.rs @@ -1,8 +1,6 @@ -use remote_beacon_node::RemoteBeaconNode; -use rest_types::SyncingResponse; -use slog::{debug, error, Logger}; +use eth2::BeaconNodeHttpClient; +use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; -use types::EthSpec; /// A distance in slots. const SYNC_TOLERANCE: u64 = 4; @@ -17,19 +15,19 @@ const SYNC_TOLERANCE: u64 = 4; /// /// The second condition means the even if the beacon node thinks that it's syncing, we'll still /// try to use it if it's close enough to the head. -pub async fn is_synced( - beacon_node: &RemoteBeaconNode, +pub async fn is_synced( + beacon_node: &BeaconNodeHttpClient, slot_clock: &T, log_opt: Option<&Logger>, ) -> bool { - let resp = match beacon_node.http.node().syncing_status().await { + let resp = match beacon_node.get_node_syncing().await { Ok(resp) => resp, Err(e) => { if let Some(log) = log_opt { error!( log, "Unable connect to beacon node"; - "error" => format!("{:?}", e) + "error" => e.to_string() ) } @@ -37,44 +35,38 @@ pub async fn is_synced( } }; - match &resp { - SyncingResponse { - is_syncing: false, .. - } => true, - SyncingResponse { - is_syncing: true, - sync_status, - } => { - if let Some(log) = log_opt { - debug!( - log, - "Beacon node sync status"; - "status" => format!("{:?}", resp), - ); - } + let is_synced = !resp.data.is_syncing || (resp.data.sync_distance.as_u64() < SYNC_TOLERANCE); + + if let Some(log) = log_opt { + if !is_synced { + debug!( + log, + "Beacon node sync status"; + "status" => format!("{:?}", resp), + ); - let now = if let Some(slot) = slot_clock.now() { - slot - } else { - // There's no good reason why we shouldn't be able to read the slot clock, so we'll - // indicate we're not synced if that's the case. - return false; - }; + warn!( + log, + "Beacon node is syncing"; + "msg" => "not receiving new duties", + "sync_distance" => resp.data.sync_distance.as_u64(), + "head_slot" => resp.data.head_slot.as_u64(), + ); + } - if sync_status.current_slot + SYNC_TOLERANCE >= now { - true - } else { - if let Some(log) = log_opt { - error!( - log, - "Beacon node is syncing"; - "msg" => "not receiving new duties", - "target_slot" => sync_status.highest_slot.as_u64(), - "current_slot" => sync_status.current_slot.as_u64(), - ); - } - false + if let Some(local_slot) = slot_clock.now() { + let remote_slot = resp.data.head_slot + resp.data.sync_distance; + if remote_slot + 1 < local_slot || local_slot + 1 < remote_slot { + error!( + log, + "Time discrepancy with beacon node"; + "msg" => "check the system time on this host and the beacon node", + "beacon_node_slot" => remote_slot, + "local_slot" => local_slot, + ); } } } + + is_synced } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 220d82a66ae..5b51bf57b83 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -7,6 +7,7 @@ mod fork_service; mod initialized_validators; mod is_synced; mod notifier; +mod validator_duty; mod validator_store; pub use cli::cli_app; @@ -18,17 +19,18 @@ use block_service::{BlockService, BlockServiceBuilder}; use clap::ArgMatches; use duties_service::{DutiesService, DutiesServiceBuilder}; use environment::RuntimeContext; +use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, StatusCode, Url}; use fork_service::{ForkService, ForkServiceBuilder}; use futures::channel::mpsc; use initialized_validators::InitializedValidators; use notifier::spawn_notifier; -use remote_beacon_node::RemoteBeaconNode; use slog::{error, info, Logger}; use slot_clock::SlotClock; use slot_clock::SystemTimeSlotClock; +use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::time::{delay_for, Duration}; -use types::EthSpec; +use types::{EthSpec, Hash256, YamlConfig}; use validator_store::ValidatorStore; /// The interval between attempts to contact the beacon node during startup. @@ -60,7 +62,7 @@ impl ProductionValidatorClient { /// Instantiates the validator client, _without_ starting the timers to trigger block /// and attestation production. - pub async fn new(mut context: RuntimeContext, config: Config) -> Result { + pub async fn new(context: RuntimeContext, config: Config) -> Result { let log = context.log().clone(); info!( @@ -90,9 +92,10 @@ impl ProductionValidatorClient { let validators = InitializedValidators::from_definitions( validator_defs, config.data_dir.clone(), - config.strict_lockfiles, + config.delete_lockfiles, log.clone(), ) + .await .map_err(|e| format!("Unable to initialize validators: {:?}", e))?; info!( @@ -102,77 +105,35 @@ impl ProductionValidatorClient { "enabled" => validators.num_enabled(), ); + let beacon_node_url: Url = config + .http_server + .parse() + .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?; + let beacon_node_http_client = ClientBuilder::new() + .timeout(HTTP_TIMEOUT) + .build() + .map_err(|e| format!("Unable to build HTTP client: {:?}", e))?; let beacon_node = - RemoteBeaconNode::new_with_timeout(config.http_server.clone(), HTTP_TIMEOUT) - .map_err(|e| format!("Unable to init beacon node http client: {}", e))?; - - // TODO: check if all logs in wait_for_node are produed while awaiting - let beacon_node = wait_for_node(beacon_node, &log).await?; - let eth2_config = beacon_node - .http - .spec() - .get_eth2_config() - .await - .map_err(|e| format!("Unable to read eth2 config from beacon node: {:?}", e))?; - let genesis_time = beacon_node - .http - .beacon() - .get_genesis_time() - .await - .map_err(|e| format!("Unable to read genesis time from beacon node: {:?}", e))?; - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to read system time: {:?}", e))?; - let genesis = Duration::from_secs(genesis_time); - - // If the time now is less than (prior to) genesis, then delay until the - // genesis instant. - // - // If the validator client starts before genesis, it will get errors from - // the slot clock. - if now < genesis { - info!( - log, - "Starting node prior to genesis"; - "seconds_to_wait" => (genesis - now).as_secs() - ); - - delay_for(genesis - now).await - } else { - info!( - log, - "Genesis has already occurred"; - "seconds_ago" => (now - genesis).as_secs() + BeaconNodeHttpClient::from_components(beacon_node_url, beacon_node_http_client); + + // Perform some potentially long-running initialization tasks. + let (yaml_config, genesis_time, genesis_validators_root) = tokio::select! { + tuple = init_from_beacon_node(&beacon_node, &context) => tuple?, + () = context.executor.exit() => return Err("Shutting down".to_string()) + }; + let beacon_node_spec = yaml_config.apply_to_chain_spec::(&T::default_spec()) + .ok_or_else(|| + "The minimal/mainnet spec type of the beacon node does not match the validator client. \ + See the --testnet command.".to_string() + )?; + + if context.eth2_config.spec != beacon_node_spec { + return Err( + "The beacon node is using a different Eth2 specification to this validator client. \ + See the --testnet command." + .to_string(), ); } - let genesis_validators_root = beacon_node - .http - .beacon() - .get_genesis_validators_root() - .await - .map_err(|e| { - format!( - "Unable to read genesis validators root from beacon node: {:?}", - e - ) - })?; - - // Do not permit a connection to a beacon node using different spec constants. - if context.eth2_config.spec_constants != eth2_config.spec_constants { - return Err(format!( - "Beacon node is using an incompatible spec. Got {}, expected {}", - eth2_config.spec_constants, context.eth2_config.spec_constants - )); - } - - // Note: here we just assume the spec variables of the remote node. This is very useful - // for testnets, but perhaps a security issue when it comes to mainnet. - // - // A damaging attack would be for a beacon node to convince the validator client of a - // different `SLOTS_PER_EPOCH` variable. This could result in slashable messages being - // produced. We are safe from this because `SLOTS_PER_EPOCH` is a type-level constant - // for Lighthouse. - context.eth2_config = eth2_config; let slot_clock = SystemTimeSlotClock::new( context.eth2_config.spec.genesis_slot, @@ -246,7 +207,10 @@ impl ProductionValidatorClient { self.duties_service .clone() - .start_update_service(block_service_tx, &self.context.eth2_config.spec) + .start_update_service( + block_service_tx, + Arc::new(self.context.eth2_config.spec.clone()), + ) .map_err(|e| format!("Unable to start duties service: {}", e))?; self.fork_service @@ -270,22 +234,86 @@ impl ProductionValidatorClient { } } +async fn init_from_beacon_node( + beacon_node: &BeaconNodeHttpClient, + context: &RuntimeContext, +) -> Result<(YamlConfig, u64, Hash256), String> { + // Wait for the beacon node to come online. + wait_for_node(beacon_node, context.log()).await?; + + let yaml_config = beacon_node + .get_config_spec() + .await + .map_err(|e| format!("Unable to read spec from beacon node: {:?}", e))? + .data; + + let genesis = loop { + match beacon_node.get_beacon_genesis().await { + Ok(genesis) => break genesis.data, + Err(e) => { + // A 404 error on the genesis endpoint indicates that genesis has not yet occurred. + if e.status() == Some(StatusCode::NOT_FOUND) { + info!( + context.log(), + "Waiting for genesis"; + ); + } else { + error!( + context.log(), + "Error polling beacon node"; + "error" => format!("{:?}", e) + ); + } + } + } + + delay_for(RETRY_DELAY).await; + }; + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to read system time: {:?}", e))?; + let genesis_time = Duration::from_secs(genesis.genesis_time); + + // If the time now is less than (prior to) genesis, then delay until the + // genesis instant. + // + // If the validator client starts before genesis, it will get errors from + // the slot clock. + if now < genesis_time { + info!( + context.log(), + "Starting node prior to genesis"; + "seconds_to_wait" => (genesis_time - now).as_secs() + ); + + delay_for(genesis_time - now).await; + } else { + info!( + context.log(), + "Genesis has already occurred"; + "seconds_ago" => (now - genesis_time).as_secs() + ); + } + + Ok(( + yaml_config, + genesis.genesis_time, + genesis.genesis_validators_root, + )) +} + /// Request the version from the node, looping back and trying again on failure. Exit once the node /// has been contacted. -async fn wait_for_node( - beacon_node: RemoteBeaconNode, - log: &Logger, -) -> Result, String> { +async fn wait_for_node(beacon_node: &BeaconNodeHttpClient, log: &Logger) -> Result<(), String> { // Try to get the version string from the node, looping until success is returned. loop { let log = log.clone(); let result = beacon_node - .clone() - .http - .node() - .get_version() + .get_node_version() .await - .map_err(|e| format!("{:?}", e)); + .map_err(|e| format!("{:?}", e)) + .map(|body| body.data.version); match result { Ok(version) => { @@ -295,7 +323,7 @@ async fn wait_for_node( "version" => version, ); - return Ok(beacon_node); + return Ok(()); } Err(e) => { error!( diff --git a/validator_client/src/validator_duty.rs b/validator_client/src/validator_duty.rs new file mode 100644 index 00000000000..5119188d581 --- /dev/null +++ b/validator_client/src/validator_duty.rs @@ -0,0 +1,130 @@ +use eth2::{ + types::{BeaconCommitteeSubscription, StateId, ValidatorId}, + BeaconNodeHttpClient, +}; +use serde::{Deserialize, Serialize}; +use types::{CommitteeIndex, Epoch, PublicKey, PublicKeyBytes, Slot}; + +/// This struct is being used as a shim since we deprecated the `rest_api` in favour of `http_api`. +/// +/// Tracking issue: https://github.com/sigp/lighthouse/issues/1643 +// NOTE: if you add or remove fields, please adjust `eq_ignoring_proposal_slots` +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct ValidatorDuty { + /// The validator's BLS public key, uniquely identifying them. + pub validator_pubkey: PublicKey, + /// The validator's index in `state.validators` + pub validator_index: Option, + /// The slot at which the validator must attest. + pub attestation_slot: Option, + /// The index of the committee within `slot` of which the validator is a member. + pub attestation_committee_index: Option, + /// The position of the validator in the committee. + pub attestation_committee_position: Option, + /// The committee count at `attestation_slot`. + pub committee_count_at_slot: Option, + /// The number of validators in the committee. + pub committee_length: Option, + /// The slots in which a validator must propose a block (can be empty). + /// + /// Should be set to `None` when duties are not yet known (before the current epoch). + pub block_proposal_slots: Option>, +} + +impl ValidatorDuty { + /// Instantiate `Self` as if there are no known dutes for `validator_pubkey`. + fn no_duties(validator_pubkey: PublicKey) -> Self { + ValidatorDuty { + validator_pubkey, + validator_index: None, + attestation_slot: None, + attestation_committee_index: None, + attestation_committee_position: None, + committee_count_at_slot: None, + committee_length: None, + block_proposal_slots: None, + } + } + + /// Instantiate `Self` by performing requests on the `beacon_node`. + /// + /// Will only request proposer duties if `current_epoch == request_epoch`. + pub async fn download( + beacon_node: &BeaconNodeHttpClient, + current_epoch: Epoch, + request_epoch: Epoch, + pubkey: PublicKey, + ) -> Result { + let pubkey_bytes = PublicKeyBytes::from(&pubkey); + + let validator_index = if let Some(index) = beacon_node + .get_beacon_states_validator_id( + StateId::Head, + &ValidatorId::PublicKey(pubkey_bytes.clone()), + ) + .await + .map_err(|e| format!("Failed to get validator index: {}", e))? + .map(|body| body.data.index) + { + index + } else { + return Ok(Self::no_duties(pubkey)); + }; + + if let Some(attester) = beacon_node + .get_validator_duties_attester(request_epoch, Some(&[validator_index])) + .await + .map_err(|e| format!("Failed to get attester duties: {}", e))? + .data + .first() + { + let block_proposal_slots = if current_epoch == request_epoch { + beacon_node + .get_validator_duties_proposer(current_epoch) + .await + .map_err(|e| format!("Failed to get proposer indices: {}", e))? + .data + .into_iter() + .filter(|data| data.pubkey == pubkey_bytes) + .map(|data| data.slot) + .collect() + } else { + vec![] + }; + + Ok(ValidatorDuty { + validator_pubkey: pubkey, + validator_index: Some(attester.validator_index), + attestation_slot: Some(attester.slot), + attestation_committee_index: Some(attester.committee_index), + attestation_committee_position: Some(attester.validator_committee_index as usize), + committee_count_at_slot: Some(attester.committees_at_slot), + committee_length: Some(attester.committee_length), + block_proposal_slots: Some(block_proposal_slots), + }) + } else { + Ok(Self::no_duties(pubkey)) + } + } + + /// Return `true` if these validator duties are equal, ignoring their `block_proposal_slots`. + pub fn eq_ignoring_proposal_slots(&self, other: &Self) -> bool { + self.validator_pubkey == other.validator_pubkey + && self.validator_index == other.validator_index + && self.attestation_slot == other.attestation_slot + && self.attestation_committee_index == other.attestation_committee_index + && self.attestation_committee_position == other.attestation_committee_position + && self.committee_count_at_slot == other.committee_count_at_slot + } + + /// Generate a subscription for `self`, if `self` has appropriate attestation duties. + pub fn subscription(&self, is_aggregator: bool) -> Option { + Some(BeaconCommitteeSubscription { + validator_index: self.validator_index?, + committee_index: self.attestation_committee_index?, + committees_at_slot: self.committee_count_at_slot?, + slot: self.attestation_slot?, + is_aggregator, + }) + } +}