From 528b27d66b129aad30e2abbd9214f99e233b145f Mon Sep 17 00:00:00 2001 From: realbigsean Date: Wed, 23 Sep 2020 22:59:43 +0000 Subject: [PATCH 01/33] Update key derivation to latest EIP-2333 (#1633) ## Issue Addressed #1624 ## Proposed Changes Updates to match [EIP-2333](`https://eips.ethereum.org/EIPS/eip-2333`) ## Additional Info In order to have compatibility with the eth2.0-deposit-cli, [this PR](https://github.com/ethereum/eth2.0-deposit-cli/pull/108) must also be merged --- crypto/eth2_key_derivation/src/derived_key.rs | 1080 +++++++++-------- .../tests/eip2333_vectors.rs | 16 +- 2 files changed, 559 insertions(+), 537 deletions(-) diff --git a/crypto/eth2_key_derivation/src/derived_key.rs b/crypto/eth2_key_derivation/src/derived_key.rs index 74dfcfbf3d8..8ed6c9bd44d 100644 --- a/crypto/eth2_key_derivation/src/derived_key.rs +++ b/crypto/eth2_key_derivation/src/derived_key.rs @@ -2,6 +2,7 @@ use crate::{lamport_secret_key::LamportSecretKey, secret_bytes::SecretBytes, Zer use num_bigint_dig::BigUint; use ring::hkdf::{KeyType, Prk, Salt, HKDF_SHA256}; use sha2::{Digest, Sha256}; +use std::convert::TryFrom; use zeroize::Zeroize; /// The byte size of a SHA256 hash. @@ -21,7 +22,7 @@ pub const R: &str = "52435875175126190479447740508185965837690552500527637822603 /// /// In EIP-2333 this value is defined as: /// -/// `ceil((1.5 * ceil(log2(r))) / 8)` +/// `ceil((3 * ceil(log2(r))) / 16)` pub const MOD_R_L: usize = 48; /// A BLS secret key that is derived from some `seed`, or generated as a child from some other @@ -81,9 +82,30 @@ fn derive_child_sk(parent_sk: &[u8], index: u32) -> ZeroizeHash { /// /// Equivalent to `HKDF_mod_r` in EIP-2333. fn hkdf_mod_r(ikm: &[u8]) -> ZeroizeHash { - let prk = hkdf_extract(b"BLS-SIG-KEYGEN-SALT-", ikm); - let okm = &hkdf_expand(prk, MOD_R_L); - mod_r(okm.as_bytes()) + // ikm = ikm + I2OSP(0,1) + let mut ikm_with_postfix = SecretBytes::zero(ikm.len() + 1); + ikm_with_postfix.as_mut_bytes()[..ikm.len()].copy_from_slice(ikm); + + // info = "" + I2OSP(L, 2) + let info = u16::try_from(MOD_R_L) + .expect("MOD_R_L too large") + .to_be_bytes(); + + let mut output = ZeroizeHash::zero(); + let zero_hash = ZeroizeHash::zero(); + + let mut salt = b"BLS-SIG-KEYGEN-SALT-".to_vec(); + while output.as_bytes() == zero_hash.as_bytes() { + let mut hasher = Sha256::new(); + hasher.update(salt.as_slice()); + salt = hasher.finalize().to_vec(); + + let prk = hkdf_extract(&salt, ikm_with_postfix.as_bytes()); + let okm = &hkdf_expand(prk, &info, MOD_R_L); + + output = mod_r(okm.as_bytes()); + } + output } /// Interprets `bytes` as a big-endian integer and returns that integer modulo the order of the @@ -145,7 +167,7 @@ fn parent_sk_to_lamport_pk(ikm: &[u8], index: u32) -> ZeroizeHash { /// Equivalent to `IKM_to_lamport_SK` in EIP-2333. fn ikm_to_lamport_sk(salt: &[u8], ikm: &[u8]) -> LamportSecretKey { let prk = hkdf_extract(salt, ikm); - let okm = hkdf_expand(prk, HASH_SIZE * LAMPORT_ARRAY_SIZE as usize); + let okm = hkdf_expand(prk, &[], HASH_SIZE * LAMPORT_ARRAY_SIZE as usize); LamportSecretKey::from_bytes(okm.as_bytes()) } @@ -159,7 +181,7 @@ fn hkdf_extract(salt: &[u8], ikm: &[u8]) -> Prk { /// Peforms a `HKDF-Expand` on the `pkr` (pseudo-random key), returning `l` bytes. /// /// Defined in [RFC5869](https://tools.ietf.org/html/rfc5869). -fn hkdf_expand(prk: Prk, l: usize) -> SecretBytes { +fn hkdf_expand(prk: Prk, info: &[u8], l: usize) -> SecretBytes { struct ExpandLen(usize); impl KeyType for ExpandLen { @@ -169,7 +191,7 @@ fn hkdf_expand(prk: Prk, l: usize) -> SecretBytes { } let mut okm = SecretBytes::zero(l); - prk.expand(&[], ExpandLen(l)) + prk.expand(&[info], ExpandLen(l)) .expect("expand len is constant and cannot be too large") .fill(okm.as_mut_bytes()) .expect("fill len is constant and cannot be too large"); @@ -307,528 +329,528 @@ mod test { /// Returns the copy-paste values from the spec. fn get_raw_vector() -> RawTestVector { RawTestVector { - seed: "0xc55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04", - master_sk: - "12513733877922233913083619867448865075222526338446857121953625441395088009793", - child_index: 0, - lamport_0: vec![ - "0x7b4a587eac94d7f56843e718a04965d4832ef826419b4001a3ad0ba77eb44a3b", - "0x90f45a712112122429412921ece5c30eb2a6daf739dc9034fc79424daeb5eff6", - "0xd061c2799de00b2be90eb1cc295f4c31e22d4b45c59a9b9b2554379bea7783cb", - "0x3ad17e4cda2913b5180557fbe7db04b5ba440ce8bb035ae27878d66fbfa50d2c", - "0xf5b954490933ad47f8bf612d4a4f329b3aa8914b1b83d59e15e271e2a087e002", - "0x95d68d505bf4ff3e5149bc5499cf4b2f00686c674a29a8d903f70e569557d867", - "0x1b59c76d9bb2170b220a87833582ede5970d4a336d91c99a812825afe963e056", - "0x4310ff73cfbbf7b81c39ecbf1412da33e9388c1a95d71a75e51fe12256551ceb", - "0xee696343f823e5716e16747f3bbae2fc6de233fe10eea8e45b4579018da0874f", - "0xae12a437aaa7ae59f7d8328944b6a2b973a43565c55d5807dc2faf223a33aa73", - "0x2a3ae0b47f145bab629452661ff7741f111272e33ec571030d0eb222e1ed1390", - "0x1a3ea396e8cbd1d97733ef4753d6840b42c0795d2d693f18e6f0e7b3fff2beb2", - "0x472429d0643c888bfdfe6e6ccfdeee6d345d60c6710859ac29fc289fd3656347", - "0xa32d4d955949b8bed0eb20f586d8fd516d6ddec84fbbc36998d692633c349822", - "0xe5ac8ac5ee1d40e53a7abf36e8269d5d5fce450a87feae8e59f432a44bcc7666", - "0xddf9e497ed78032fbd72d9b8abd5204d81c3475f29afa44cdf1ded8ea72dd1dc", - "0x945c62e88fb1e5f3c15ff57cd5eb1586ee93ec5ec80154c5a9c50241c5adae0a", - "0xc8868b50fc8423c96b7efa1ede4d3203a6b835dbeb6b2ababc58397e6b31d9dd", - "0x66de9bd86b50e2b6a755310520af655759c1753bff34b79a5cd63d6811fc8c65", - "0x5b13786c6068df7735343e5591393bea8aee92ac5826d6132bf4f5ebf1098776", - "0xa2038fc7d8e3cb2eda2bd303cfa76a9e5d8b88293918bec8b2fc03be75684f14", - "0x47a13f6b2308a50eded830fdee7c504bf49d1fe6a95e337b0825d0d77a520129", - "0xb534cdddcf1aa1c6b4cbba46d1db31b766d958e0a0306450bc031d1e3ed79d97", - "0x54aa051b754c31658377f7bff00b7deaa861e74cb12e1eb84216666e19b23d69", - "0x0220d57f63435948818eb376367b113c188e37451c216380f65d1ad55f73f527", - "0xf9dd2e391565534a4db84980433bf5a56250f45fe294fce2679bcf115522c081", - "0x1166591ee2ca59b9f4e525900f085141be8879c66ef18529968babeb87c44814", - "0xf4fa2e8de39bdbeb29b64d8b440d3a6c9a6ca5bdce543877eaee93c11bd70ab8", - "0x07f466d73b93db283b3f7bfaf9c39ae296adc376ab307ef12312631d0926790e", - "0xb2ecff93acb4fa44c1dbf8464b81734a863b6d7142b02f5c008907ea4dc9aaa1", - "0xa1d9c342f6c293ac6ef8b5013cba82c4bad6ed7024d782948cb23cd490039ba1", - "0xc7d04a639ba00517ece4dbc5ef4aaf20e0ccde6e4a24c28936fabe93dec594db", - "0xe3cbb9810472d9dd1cdb5eed2f74b67ea60e973d2d2e897bd64728c9b1aa0679", - "0xe36884703413958ff2aba7a1f138a26d0ac0a371270f0169219beb00a5add5f0", - "0xe5ea300a09895b3f98de5232d92a36d5611cbcf9aaf9e7bb20cf6d1696ad1cb4", - "0xc136cda884e18175ab45148ed4f9d0d1a3c5e11ad0275058e61ae48eb151a81f", - "0x3ee1101e944c040021187e93b6e0beb1048c75fb74f3fdd67756b1c8517a311f", - "0x016964fd6fc32b9ad07a630949596715dee84d78230640368ff0929a280cf3a2", - "0xe33865fc03120b94333bb754fd097dc0f90e69ff6fd221d6aae59fcf2d762d76", - "0xe80bb3515a09ac6ecb4ec59de22701cdf954b1ae8a677fd85508c5b041f28058", - "0x3889af7cd325141ec288021ede136652a0411d20364005b9d3ca9102cb368f57", - "0x18dad0bc975cf8800addd54c7867389d3f7fe1b97d348bd8412a6cbfb75c520a", - "0x09035218686061ee91bd2ad57dc6fb6da7243b8177a153484524b2b228da5314", - "0x688fd7a97551c64eae33f91abb073a46eafbbacd5595c6bac2e57dd536acdfe2", - "0x1fc164dce565a1d0da59cc8048b334cc5eb84bf04de2399ddb847c22a7e32ab7", - "0xa2a340ba05c8a30dd1cab886a926b761758eba0e41b5c4c5dfd4a42f249655c1", - "0xc43dffe01479db836a6a1a74564b297fad0d69c6b06cf593f6db9f26b4f307d5", - "0x73cef7f3ff724a30a79e1dca74cef74954afeefa2e476c4dec65afe50c16c5c4", - "0xa54002253ab7b95cc5b664b3f08976400475cc56f170b939f6792e730ff5170b", - "0x9ade43053d41afebc002f09476dffd1b13ecbf67f810791540b92ca56d5e63e4", - "0x234e7cbfbe45b22a871db26738fa05de09213a925439d7f3e5108132e521b280", - "0x066b712417332c7cfca871fb1bb5839f0341acf9266229603a3eddbc8a93b59f", - "0xb5857acdcf636330da2cfcc99c81d9fdbd20c506a3c0e4f4f6a139d2a64f051c", - "0xe119908a150a49704b6bbba2c470cd619a0ae10dd9736e8d491890e3c8509fff", - "0xb8a5c5dbb51e6cb73cca95b4ad63ea3c7399cd16b05ab6261535495b3af2ca51", - "0x05624a1d4d2d2a31160bc48a6314bbf13eaddf56cddb0f0aa4ed3fb87f8b479f", - "0x483daceff1c3baa0ed0f3be7e534eebf5f4aed424ecd804edfbf5c56b3476b50", - "0x424d04694e7ae673707c77eb1c6d0996d250cfab6832ee3506a12e0384a3c5c9", - "0xa11fed0ed8057966bfe7136a15a814d06a516fbc9d44aeef87c509137a26190e", - "0x3694d22d1bc64658f3adbe2cc9f1716aee889066e0950e0b7a2fd576ed36bb76", - "0x49a13000a87f39f93d0ae9c3a4cfccbf440c0a75cce4c9d70dac627b6d6958b3", - "0xb3ff0cdd878d5ac1cb12e7d0b300d649fdd008800d498ae4f9fbf9510c74249a", - "0xe52a867cfb87d2fe7102d23d8d64925f7b75ca3f7d6bb763f7337352c255e0be", - "0x6513b372e4e557cca59979e48ec27620e9d7cdb238fcf4a9f19c3ba502963be0", - "0x9f69d82d4d51736902a987c8b5c30c2b25a895f2af5d2c846667ff6768bcc774", - "0x049a220dbe3340749f94643a429cb3cba3c92b561dc756a733d652d838728ab3", - "0x4fa2cd877aa115b476082b11053309f3537fa03d9158085f5f3f4bab6083e6da", - "0xed12db4069eb9f347735816afcee3fe43d4a6999fef8240b91bf4b05447d734f", - "0x3ecbe5eda469278f68548c450836a05cc500864664c7dda9b7526f084a891032", - "0x690d8f928fc61949c22e18cceaa2a446f8e1b65bd2e7af9e0a8e8284134ab3d2", - "0x99e09167a09f8261e7e8571d19148b7d7a75990d0702d9d582a2e4a96ac34f8e", - "0x6d33931693ed7c2e1d080b6a37da52c279a06cec5f534305819f7adf7db0afe3", - "0xc4b735462a9a656e28a52b1d4992ea9dea826b858971d698453a4be534d6bb70", - "0xedf92b10302dc41f8d362b360f4c2ef551d50e2ded012312c964002d2afc46d7", - "0x58f6691cca081ae5c3661dd171b87cc49c90359bb03cc0e57e503f7fcf14aefc", - "0x5d29b8b4ee295a73c4a8618927b3d14b76c7da049133a2257192b10be8c17a6a", - "0x646802fa42801e0ae24011fb4f62e87219ef1da01f7fc14bf8d6bd2d9e7c21f1", - "0x23abf45eee65cc4c1e95ccab42ad280a00bb3b14d243e2021a684075f900141e", - "0x2b1ae95c975bf9c387eae506fdb5e58afd2d198f00a21cd3fddb5855e8021e4d", - "0x0ef9f6e1c0583493d343e75f9c0c557fa6da0dc12b17a96c5757292916b72ee3", - "0x04c7fc76195c64a3285af14161077c045ff6ddbb67c0ff91b080f98eb6781e5c", - "0xba12679b97027d0e7076e6d19086c07792eaa7f78350842fbef8ddf5bcd3ecc0", - "0xcead458e6799df4d2f6cbf7f13cb3afec3441a354816e3071856ed49cbdbb1a7", - "0xbe6c56256556bb5c6727a1d9cb641d969677f56bb5ad7f8f7a7c9cfd128427b4", - "0xc80f11963ff40cb1888054b83c0463d32f737f2e7d42098e639023db0dfc84d4", - "0xac80006c1296bcfde86697efebb87fb0fddfb70dd34dd2ee4c152482af4687eb", - "0xbb7d13ce184249df4576fc3d13351e1683500e48726cd4198423f14f9094068b", - "0x1b2d9c40c55bd7362664fa46c1268e094d56c8193e3d991c08dc7a6e4ca14fa1", - "0x9bd236254d0565f5b2d24552d4b4d732de43b0adaa64ecd8be3efc6508577591", - "0x38078cefccc04e8312d79e0636e0e3157434c50a2ad4e3e87cc6584c41eec8b5", - "0xb5d15a8527ff3fa254ba61ffceb02d2570b53361894f351a9e839c0bb716857d", - "0x6763dad684bf2e914f40ae0a7ee0cdf12c97f41fc05a485d5991b4daad21a3f8", - "0xc80363c20df589333ecbe05bd5f2c19942ebc2593626dc50d00835c40fb8d005", - "0x48502b56ae93acd2794f847cbe825525d5d5f59f0f75c67aff84e5338776b3af", - "0xfd8e033493ba8af264a855a78ab07f37d936351d2879b95928909ed8df1b4f91", - "0x11f75bee9eac7356e65ebc7f004ccdc1da80807380d69143293d1421f50b1c97", - "0x903a88a3ebe84ca1c52a752b1faffa9ca1daedac9cbf1aa70942efc9beb44b79", - "0x2c0dcd68837f32a69da651045ad836b8cd6b48f2c8c5d73a3bd3bba6148d345a", - "0x0aa0f49b3476f3fdb6393f2ab601e0009586090b72ee54a525734f51598960d5", - "0xf7a789f013f702731656c562caa15b04cb7c9957376c4d80b8839167bb7fa626", - "0x4e0be1b19e305d82db3fd8affd67b0d2559da3edbfb08d19632a5cc46a90ed07", - "0x3caaccfc546d84d543eaf4f4c50c9c8fd831c12a8de56fdb9dfd04cc082882fe", - "0x894f6a01fd34f0642077e22981752011678548eb70eb55e8072c1caffc16fe02", - "0xae7eb54adaa68679348ea3537a49be669d1d61001fbab9fac259ba727dbc9a1a", - "0x291a1cbdceff957b5a65440ab67fb8672de881230fe3108a15ca487c2662c2c7", - "0x891d43b867137bf8beb9df4da2d951b5984a266a8cd74ec1593801d005f83f08", - "0xc558407f6491b37a10835e0ad7ce74f4e368aa49157a28873f7229310cb2d7fd", - "0x9ce061b0a072e1fe645f3479dac089b5bfb78cfa6cfbe5fd603bcdb504711315", - "0xa8e30d07b09275115dd96472ecf9bc316581caf307735176ca226d4cd9022925", - "0x918ee6d2efba7757266577691203f973cf4f4cac10f7d5f86acd2a797ff66583", - "0xfa31ba95e15d1635d087522f3d0da9cf7acac4ed6d0ac672654032a3c39244a6", - "0xf2952b58f015d6733af06938cd1f82fbddb3b796823bee7a3dbffa04efc117c2", - "0x46f8f742d3683de010ede528128d1181e8819f4252474f51371a177bfa518fa4", - "0x4ca1cc80094f2910cf83a9e65ad70e234690ffb9142793911ec7cf71663545b3", - "0x381965037b5725c71bfa6989d4c432f6611de8e8ec387f3cfc0dcb1a15191b73", - "0x2562b88ed3b86ba188be056805a3b7a47cb1a3f630d0e2f39647b0792ec6b7d8", - "0x565f6d14e7f22724f06d40f54465ad40d265b6de072b34a09d6e37a97a118cd8", - "0xc2982c861ad3278063b4a5f584eaf866db684cc4e712d64230fc9ee33bb4253b", - "0xfd806c91927e549d8d400ab7aa68dbe60af988fbabf228483ab0c8de7dab7eee", - "0xafae6ff16c168a3a3b5c2f1742d3f89fa4777c4bd0108f174014debf8f4d629c", - "0xaf5a4be694de5e53632be9f1a49bd582bf76002259460719197079c8c4be7e66", - "0xa8df4a4b4c5bf7a4498a11186f8bb7679137395f28e5c2179589e1c1f26504b5", - "0xce8b77c64c646bb6023f3efaed21ca2e928e21517422b124362cf8f4d9667405", - "0x62e67a8c423bc6c6c73e6cd8939c5c1b110f1a38b2ab75566988823762087693", - "0x7e778f29937daaa272d06c62d6bf3c9c0112d45a3df1689c602d828b5a315a9f", - "0xe9b5abd46c2377e602ff329050afa08afe152f4b0861db8a887be910ff1570bf", - "0xa267b1b2ccd5d96ae8a916b0316f06fafb886b3bb41286b20763a656e3ca0052", - "0xb8ed85a67a64b3453888a10dedf4705bd27719664deff0996a51bb82bc07194f", - "0x57907c3c88848f9e27bc21dd8e7b9d61de48765f64d0e943e7a6bb94cc2021ab", - "0xd2f6f1141a3b76bf9bf581d49091142944c7f9f323578f5bdd5522ba32291243", - "0xc89f104200ed4c5d5f7046d99e68ae6f8ec31e2eeceb568eb05087e3aa546a74", - "0xc9f367fae45c39299693b134229bb6dd0da112fd1a7d19b7f4772c01e5cbe479", - "0x64e2d4ad51948764dd578d26357e29e8e4d076d65c05cffdf8211b624fefe9ac", - "0xf9a9b4e6d5be7fc051df8ecd9c389d16b1af86c749308e6a23f7ff4871f0ba9a", - "0x0d2b2a228b86ebf9499e1bf7674335087ced2eb35ce0eb90954a0f75751a2bf4", - "0xff8531b45420a960d6e48ca75d77758c25733abde83cd4a6160beae978aa735e", - "0xd6d412bd1cb96a2b568d30e7986b7e8994ca92fd65756a758295499e11ea52b6", - "0xad8533fccbecdd4a0b00d648bfe992360d265f7be70c41d9631cefad5d4fe2f6", - "0x31fbf2afb8d5cc896d517cfc5201ee24527e8d283f9c37ca10233bef01000a20", - "0x2fd67b7365efc258131eb410f46bf3b1cbd3e9c76fd6e9c3e86c9ff1054116ff", - "0xab6aa29f33d18244be26b23abadb39679a8aa56dafc0dd7b87b672df5f5f5db6", - "0xbad3b0f401ca0a53a3d465de5cecd57769ec9d4df2c04b78f8c342a7ed35bbee", - "0xbdc24d46e471835d83ce8c5b9ecbe675aab2fd8f7831c548e8efd268c2ee2232", - "0x87265fabd7397d08f0729f13a2f3a25bbc8c874b6b50f65715c92b62f665f925", - "0xa379fd268e7ff392c067c2dd823996f72714bf3f936d5eeded71298859f834cb", - "0xf3ab452c9599ebfbb234f72a86f3062aed12ae1f634abbe542ff60f5cefc1fcf", - "0x2b17ebb053a3034c07da36ed2ba42c25ad8e61dec87b5527f5e1c755eb55405a", - "0x305b40321bd67bf48bfd121ee4d5d347268578bd4b8344560046594771a11129", - "0xe7029c9bea020770d77fe06ca53b521b180ad6a9e747545aadc1c74beef7241c", - "0xabc357cec0f4351a5ada22483d3b103890392f8d8f9cb8073a61969ed1be4e08", - "0x97f88c301946508428044d05584dc41af2e6a0de946de7d7f5269c05468afe20", - "0xbdc08fe8d6f9a05ad8350626b622ad8eec80c52331d154a3860c98676719cfbd", - "0x161590fc9f7fcf4eaba2f950cf588e6da79e921f139d3c2d7ebe017003a4799e", - "0x91b658db75bc3d1954bfde2ef4bc12980ff1688e09d0537f170c9ab47c162320", - "0x76d995f121406a63ce26502e7ec2b653c221cda357694a8d53897a99e6ce731e", - "0x3d6b2009586aceb7232c01259bb9428523c02b0f42c2100ec0d392418260c403", - "0x14ca74ecbc8ec0c67444c6cb661a2bce907aa2a1453b11f16002b815b94a1c49", - "0x553b4dc88554ebe7b0a3bd0813104fd1165a1f950ceace11f5841aa74b756d85", - "0x4025bf4ad86751a156d447ce3cabafde9b688efcdafd8aa4be69e670f8a06d9e", - "0x74260cf266997d19225e9a0351a9acfa17471fccdf5edc9ccc3bb0d23ef551c5", - "0xf9dbca3e16d234e448cf03877746baeb62a8a25c261eff42498b1813565c752a", - "0x2652ec98e05c1b6920fb6ddc3b57e366d514ffa4b35d068f73b5603c47f68f2f", - "0x83f090efeb36db91eb3d4dfbb17335c733fce7c64317d0d3324d7caaaf880af5", - "0x1e86257f1151fb7022ed9ed00fb961a9a9989e58791fb72043bb63ed0811791c", - "0xd59e4dcc97cba88a48c2a9a2b29f79125099a39f74f4fb418547de8389cd5d15", - "0x875a19b152fe1eb3fe1de288fa9a84864a84a79bac30b1dbd70587b519a9770e", - "0x9c9dc2d3c8f2f6814cfc61b42ee0852bbaf3f523e0409dd5df3081b750a5b301", - "0xf6f7f81c51581c2e5861a00b66c476862424151dd750efeb20b7663d552a2e94", - "0x723fcb7ca43a42483b31443d4be9b756b34927176f91a391c71d0b774c73a299", - "0x2b02d8acf63bc8f528706ed4d5463a58e9428d5b71d577fd5daa13ba48ac56cf", - "0x2ff6911f574c0f0498fc6199da129446b40fca35ccbf362bc76534ba71c7ca22", - "0x1ef4b959b11bc87b11e4a5f84b4d757c6bdcfad874acec9a6c9eee23dc4bbe1b", - "0x68e2df9f512be9f64b7e3a2dee462149dac50780073d78b569a20256aea5f751", - "0xd1a3682e12b90ae1eab27fc5dc2aef3b8e4dbb813925e9a91e58d6c9832767b6", - "0x75778ccc102d98c5e0b4b83f7d4ef7fe8bc7263cc3317723001cb0b314d1e9e8", - "0xc7f44e2cead108dc167f0036ac8a278d3549cc3dd5cc067d074ccad9b1d9f8d4", - "0x4cba0223c5df2796b0ee9fbc084d69f10e6aedda8f0cf86171bebb156ede676c", - "0x628deda825661f586a5713e43c806fdd55e1a53fbe90a4ddb5f3786570740954", - "0xfc82a253bc7e0ac96252b238fbb411a54e0adf78d089f804a7fc83a4959b401e", - "0x72a6491f5daae0ceb85b61a5ed69009dd2a167c64cb35cabf38b846e27268e9d", - "0xee139a913d4fcf25ba54bb36fc8051b91f2ec73ba820cc193c46fb2f7c37a106", - "0x7f75021f2b1d0c78859478e27f6f40646b5776c060f1a5f6f0944c840a0121f8", - "0x5b60a1b78feca1d2602ac8110d263ad6b3663cbf49e6bdc1077b4b80af2feb6f", - "0xd61f15d80b1e88469b6a76ed6a6a2b94143b6acc3bd717357264818f9f2d5c6d", - "0xea85da1780b3879a4d81b685ba40b91c060866abd5080b30fbbb41730724a7dd", - "0xb9b9da9461e83153f3ae0af59fbd61febfde39eb6ac72db5ed014797495d4c26", - "0xf737762fe8665df8475ff341b3762aaeb90e52974fe5612f5efd0fc1c409d7f8", - "0xaaa25d934a1d5aa6b2a1863704d7a7f04794ed210883582c1f798be5ca046cf7", - "0x932f46d0b6444145221b647f9d3801b6cb8b1450a1a531a959abdaacf2b5656b", - "0xf4a8b0e52f843ad27635c4f5a467fbf98ba06ba9a2b93a8a97170b5c41bf4958", - "0x196ed380785ee2925307ec904161dc02a4596a55499e5b0a3897f95485b3e74a", - "0x772e829a405219e4f8cd93a1ef15c250be85c828c1e29ef6b3f7b46958a85b44", - "0xd66cfc9af9941515d788f9f5e3b56fddb92464173ddb67b83bf265e7ea502170", - "0xf5b040bfc246425278e2423b1953d8ad518de911cf04d16c67d8580a09f90e62", - "0xd2d18b2ae8a53dde14b4000e5e7e414505825f50401a3797dd8820cf510dc448", - "0xc01dcc064e644266739cd0ec7edf92fc2ef8e92e0beedf0e8aa30efcff1644fe", - "0x24720d325913ba137daf031924ad3bfaa1c8c00a53a2d048fe5667aef45efce3", - "0x70a24e1c89b3ea78d76ef458d498dcb5b8561d484853b2a8b2adcd61869857df", - "0x0ff3313997f14e1b1dcd80f1d62c58aaefb19efd7c0ea15dde21aa4e2a516e80", - "0x960c1f50062a4df851638f42c0259b6e0a0217300884f13a3c5c8d94adb34f21", - "0xb71ca7cc8578149da556131268f4625b51620dfc3a6e9fbd47f5df03afbd410e", - "0xa1a3eeec0addec7b9e15f416a07608a1b5d94f0b42d5c203b8ced03a07484f5b", - "0xa4bb8b059aa122ca4652115b83b17af80cfbea0d3e1e8979a396a667f94e85f3", - "0x31c4d2f252167fe2a4d41944224a80b2f1afaf76f8dd6a3d52d71751849e44bb", - "0x79642dd6a255f96c9efe569304d58c327a441448db0431aa81fe072d0d359b52", - "0x42a4b504714aba1b67defe9458fff0c8cb1f216dcab28263cef67a65693b2036", - "0xe3d2f6a9d882d0f026ef316940dfcbf131342060ea28944475fe1f56392c9ad2", - "0x986af9aeff236394a0afa83823e643e76f7624e9bfd47d5468f9b83758a86caa", - "0xafe2de6ede50ee351d63ed38d1f2ae5203174c731f41bbed95db467461ad5492", - "0x9ad40f0785fe1c8a5e4c3342b3c91987cd47a862ece6573674b52fa0456f697a", - "0xde4cde6d0fc6def3a89b79da0e01accdbec049f1c9471d13a5d59286bd679af1", - "0xecd0d1f70116d6b3ae21c57fb06ad90eed33d040e2c5c3d12714b3be934fa5ce", - "0x3c53c5bf2d1b1d4038e1f0e8a2e6d12e0d4613d5cd12562578b6909921224c10", - "0x36087382b37e9e306642cc6e867e0fb2971b6b2b28b6caf2f9c96b790e8db70a", - "0xa957496d6a4218a19998f90282d05bd93e6baabf55e55e8a5f74a933a4dec045", - "0x077d6f094e8467a21f02c67753565ec5755156015d4e86f1f82a22f9cf21c869", - "0x12dd3b1f29e1462ca392c12388a77c58044151154cf86f23873f92a99b6bb762", - "0x7fdbcdedcc02ecf16657792bd8ef4fa4adeee497f30207d4cc060eb0d528b26b", - "0x245554b12bf8edf9e9732d6e2fa50958376e355cb695515c94676e64c6e97009", - "0xccd3b1841b517f7853e35f85471710777e437a8665e352a0b61c7d7083c3babc", - "0xd970545a326dcd92e31310d1fdce3703dff8ef7c0f3411dfa74fab8b4b0763ac", - "0xd24163068918e2783f9e79c8f2dcc1c5ebac7796ce63070c364837aac91ee239", - "0x256a330055357e20691e53ca5be846507c2f02cfde09cafb5809106f0af9180e", - "0xfa446a5d1876c2051811af2a341a35dbcd3f7f8e2e4f816f501139d27dd7cd82", - "0xbafbc7a8f871d95736a41e5721605d37e7532e41eb1426897e33a72ed2f0bf1d", - "0x8055af9a105b6cf17cfeb3f5320e7dab1a6480500ff03a16c437dfec0724c290", - "0x1de6ee3e989497c1cc7ca1d16b7b01b2f336524aa2f75a823eaa1716c3a1a294", - "0x12bb9508d646dda515745d104199f71276d188b3e164083ad27dfdcdc68e290b", - "0x7ea9f9939ad4f3b44fe7b780e0587da4417c34459b2996b3a449bb5b3ff8c8cb", - "0xa88d2f8f35bc669aa6480ce82571df65fea366834670b4084910c7bb6a735dde", - "0x9486e045adb387a550b3c7a603c30e07ed8625d322d1158f4c424d30befe4a65", - "0xb283a70ba539fe1945be096cb90edb993fac77e8bf53616bde35cdcaa04ab732", - "0xab39a81558e9309831a2caf03e9df22e8233e20b1769f16e613debcdb8e2610f", - "0x1fc12540473fbbad97c08770c41f517ce19dc7106aa2be2e9b77867046627509", - "0xec33dbec9d655c4c581e07d1c40a587cf3217bc8168a81521b2d0021bd0ec133", - "0xc8699e3b41846bc291209bbb9c06f565f66c6ccecbf03ebc27593e798c21fe94", - "0x240d7eae209c19d453b666c669190db22db06279386aa30710b6edb885f6df94", - "0xb181c07071a750fc7638dd67e868dddbeeee8e8e0dcbc862539ee2084674a89e", - "0xb8792555c891b3cbfddda308749122a105938a80909c2013637289e115429625", - "0xfe3e9e5b4a5271d19a569fee6faee31814e55f156ba843b6e8f8dc439d60e67a", - "0x912e9ba3b996717f89d58f1e64243d9cca133614394e6ae776e2936cf1a9a859", - "0xa0671c91a21fdfd50e877afa9fe3974aa3913855a2a478ae2c242bcdb71c73d7", - "0x5b55d171b346db9ba27b67105b2b4800ca5ba06931ed6bd1bafb89d31e6472e6", - "0x68438458f1af7bd0103ef33f8bc5853fa857b8c1f84b843882d8c328c595940d", - "0x21fe319fe8c08c1d00f977d33d4a6f18aecaa1fc7855b157b653d2d3cbd8357f", - "0x23cce560bc31f68e699ece60f21dd7951c53c292b3f5522b9683eb2b3c85fc53", - "0x917fa32d172c352e5a77ac079df84401cdd960110c93aa9df51046d1525a9b49", - "0x3fc397180b65585305b88fe500f2ec17bc4dccb2ec254dbb72ffb40979f14641", - "0xf35fb569e7a78a1443b673251ac70384abea7f92432953ca9c0f31c356be9bd9", - "0x7955afa3cd34deb909cd031415e1079f44b76f3d6b0aaf772088445aaff77d08", - "0x45c0ca029356bf6ecfc845065054c06024977786b6fbfaea74b773d9b26f0e6c", - "0xe5c1dac2a6181f7c46ab77f2e99a719504cb1f3e3c89d720428d019cb142c156", - "0x677b0e575afcccf9ddefc9470e96a6cfff155e626600b660247b7121b17b030a", - "0xbeed763e9a38277efe57b834a946d05964844b1f51dba2c92a5f3b8d0b7c67d0", - "0x962b17ed1a9343d8ebfae3873162eef13734985f528ca06c90b0c1e68adfdd89", - ], - lamport_1: vec![ - "0xb3a3a79f061862f46825c00fec4005fb8c8c3462a1eb0416d0ebe9028436d3a9", - "0x6692676ce3b07f4c5ad4c67dc2cf1dfa784043a0e95dd6965e59dc00b9eaff2d", - "0xbf7b849feb312db230e6e2383681b9e35c064e2d037cbc3c9cc9cd49220e80c9", - "0xa54e391dd3b717ea818f5954eec17b4a393a12830e28fabd62cbcecf509c17dc", - "0x8d26d800ac3d4453c211ef35e9e5bb23d3b9ede74f26c1c417d6549c3110314d", - "0xbb8153e24a52398d92480553236850974576876c7da561651bc551498f184d10", - "0x0d30e0e203dc4197f01f0c1aba409321fbf94ec7216e47ab89a66fb45e295eff", - "0x01dc81417e36e527776bf37a3f9d74a4cf01a7fb8e1f407f6bd525743865791d", - "0xa6318e8a57bec438245a6834f44eb9b7fb77def1554d137ea12320fc572f42c9", - "0xd25db9df4575b595130b6159a2e8040d3879c1d877743d960bf9aa88363fbf9f", - "0x61bb8baeb2b92a4f47bb2c8569a1c68df31b3469e634d5e74221bc7065f07a96", - "0xb18962aee4db140c237c24fec7fd073b400b2e56b0d503f8bc74a9114bf183bf", - "0x205473cc0cdab4c8d0c6aeceda9262c225b9db2b7033babfe48b7e919751a2c6", - "0xc5aa7df7552e5bb17a08497b82d8b119f93463ccb67282960aee306e0787f228", - "0x36da99e7d38ce6d7eab90ea109ba26615ad75233f65b3ae5056fba79c0c6682a", - "0xd68b71bba6266b68aec0df39b7c2311e54d46a3eab35f07a9fe60d70f52eec58", - "0xbbe56f1274ada484277add5cb8c90ef687d0b69a4c95da29e32730d90a2d059f", - "0x0982d1d1c15a560339d9151dae5c05e995647624261022bbedce5dce8a220a31", - "0x8ef54ad546d2c6144fc26e1e2ef92919c676d7a76cfdfb5c6a64f09a54e82e71", - "0x1e3ac0133eef9cdbeb590f14685ce86180d02b0eea3ef600fd515c38992b1f26", - "0x642e6b1c4bec3d4ba0ff2f15fbd69dcb57e4ba8785582e1bc2b452f0c139b590", - "0xca713c8cf4afa9c5d0c2db4fc684a8a233b3b01c219b577f0a053548bedf8201", - "0xd0569ba4e1f6c02c69018b9877d6a409659cb5e0aa086df107c2cc57aaba62da", - "0x4ebe68755e14b74973e7f0fa374b87cee9c370439318f5783c734f00bb13e4b5", - "0x788b5292dc5295ae4d0ea0be345034af97a61eec206fda885bbc0f049678c574", - "0x0ebd88acd4ae195d1d3982038ced5af1b6f32a07349cf7fffbff3ce410c10df2", - "0xc7faf0a49234d149036c151381d38427b74bae9bd1601fc71663e603bc15a690", - "0xc5247bf09ebe9fa4e1013240a1f88c703f25a1437196c71ee02ca3033a61f946", - "0x719f8c68113d9f9118b4281e1f42c16060def3e3eeef15f0a10620e886dc988f", - "0x28da4f8d9051a8b4d6158503402bdb6c49ba2fb1174344f97b569c8f640504e6", - "0x96f6773576af69f7888b40b0a15bc18cc9ec8ca5e1bb88a5de58795c6ddf678e", - "0x8d80d188a4e7b85607deccf654a58616b6607a0299dd8c3f1165c453fd33d2e4", - "0x9c08dcc4f914486d33aa24d10b89fd0aabcc635aa2f1715dfb1a18bf4e66692a", - "0x0ff7045b5f6584cc22c140f064dec0692762aa7b9dfa1defc7535e9a76a83e35", - "0x8e2dae66fa93857b39929b8fc531a230a7cfdd2c449f9f52675ab5b5176461d5", - "0xf449017c5d429f9a671d9cc6983aafd0c70dd39b26a142a1d7f0773de091ac41", - "0xed3d4cab2d44fec0d5125a97b3e365a77620db671ecdda1b3c429048e2ebdae6", - "0x836a332a84ee2f4f5bf24697df79ed4680b4f3a9d87c50665f46edaeed309144", - "0x7a79278754a4788e5c1cf3b9145edb55a2ba0428ac1c867912b5406bb7c4ce96", - "0x51e6e2ba81958328b38fd0f052208178cec82a9c9abd403311234e93aff7fa70", - "0x217ec3ec7021599e4f34410d2c14a8552fff0bc8f6894ebb52ec79bf6ec80dc9", - "0x8a95bf197d8e359edabab1a77f5a6d04851263352aa46830f287d4e0564f0be0", - "0x60d0cbfb87340b7c92831872b48997ce715da91c576296df215070c6c20046d4", - "0x1739fbca476c540d081b3f699a97387b68af5d14be52a0768d5185bc9b26961b", - "0xac277974f945a02d89a0f8275e02de9353e960e319879a4ef137676b537a7240", - "0x959b7640821904ba10efe8561e442fbdf137ccb030aee7472d10095223e320ba", - "0xdba61c8785a64cb332342ab0510126c92a7d61f6a8178c5860d018d3dad571c6", - "0xc191fb6a92eb1f1fb9e7eb2bdecd7ec3b2380dd79c3198b3620ea00968f2bd74", - "0x16ef4e88e182dfc03e17dc9efaa4a9fbf4ff8cb143304a4a7a9c75d306729832", - "0x39080e4124ca577ff2718dfbcb3415a4220c5a7a4108729e0d87bd05adda5970", - "0xa29a740eef233956baff06e5b11c90ed7500d7947bada6da1c6b5d9336fc37b6", - "0x7fda7050e6be2675251d35376bacc895813620d245397ab57812391d503716ee", - "0x401e0bf36af9992deb87efb6a64aaf0a4bc9f5ad7b9241456b3d5cd650418337", - "0x814e70c57410e62593ebc351fdeb91522fe011db310fcf07e54ac3f6fefe6be5", - "0x03c1e52ecbef0d79a4682af142f012dc6b037a51f972a284fc7973b1b2c66dcf", - "0x57b22fb091447c279f8d47bdcc6a801a946ce78339e8cd2665423dfcdd58c671", - "0x53aeb39ab6d7d4375dc4880985233cba6a1be144289e13cf0bd04c203257d51b", - "0x795e5d1af4becbca66c8f1a2e751dcc8e15d7055b6fc09d0e053fa026f16f48f", - "0x1cd02dcd183103796f7961add835a7ad0ba636842f412643967c58fe9545bee4", - "0x55fc1550be9abf92cacb630acf58bad11bf734114ebe502978a261cc38a4dd70", - "0x6a044e0ea5c361d3fb2ca1ba795301e7eb63db4e8a0314638f42e358ea9cfc3e", - "0x57d9f15d4db199cbcb7cbd6524c52a1b799d52b0277b5a270d2985fcee1e2acb", - "0x66c78c412e586bd01febc3e4d909cc278134e74d51d6f60e0a55b35df6fb5b09", - "0x1076799e15a49d6b15c2486032f5e0b50f43c11bc076c401e0779d224e33f6fc", - "0x5f70e3a2714d8b4483cf3155865ba792197e957f5b3a6234e4c408bf2e55119d", - "0x9b105b0f89a05eb1ff7caed74cf9573dc55ac8bc4881529487b3700f5842de16", - "0x1753571b3cfadca4277c59aee89f607d1b1e3a6aa515d9051bafb2f0d8ce0daa", - "0x4014fff940b0950706926a19906a370ccbd652836dab678c82c539c00989201a", - "0x0423fa59ee58035a0beb9653841036101b2d5903ddeabddabf697dbc6f168e61", - "0x78f6781673d991f9138aa1f5142214232d6e3d6986acb6cc7fb000e1a055f425", - "0x21b8a1f6733b5762499bf2de90c9ef06af1c6c8b3ddb3a04cce949caad723197", - "0x83847957e909153312b5bd9a1a37db0bd6c72a417024a69df3e18512973a18b4", - "0x948addf423afd0c813647cfe32725bc55773167d5065539e6a3b50e6ebbdab38", - "0x0b0485d1bec07504a2e5e3a89addd6f25d497cd37a0c04bc38355f8bdb01cd48", - "0x31be8bda5143d39ea2655e9eca6a294791ca7854a829904d8574bedc5057ddc4", - "0x16a0d2d657fadce0d81264320e42e504f4d39b931dff9888f861f3cc78753f99", - "0xb43786061420c5231bf1ff638cb210f89bf4cd2d3e8bafbf34f497c9a298a13b", - "0x1f5986cbd7107d2a3cbc1826ec6908d976addbf9ae78f647c1d159cd5397e1bd", - "0xa883ccdbfd91fad436be7a4e2e74b7796c0aadfe03b7eea036d492eaf74a1a6f", - "0x5bc9eb77bbbf589db48bca436360d5fc1d74b9195237f11946349951f2a9f7f6", - "0xb6bc86de74a887a5dceb012d58c62399897141cbcc51bad9cb882f53991f499c", - "0xa6c3260e7c2dd13f26cf22bf4cd667688142ff7a3511ec895bc8f92ebfa694b6", - "0xb97da27e17d26608ef3607d83634d6e55736af10cc7e4744940a3e35d926c2ad", - "0x9df44067c2dc947c2f8e07ecc90ba54db11eac891569061a8a8821f8f9773694", - "0x865cc98e373800825e2b5ead6c21ac9112ff25a0dc2ab0ed61b16dc30a4a7cd7", - "0xe06a5b157570c5e010a52f332cacd4e131b7aed9555a5f4b5a1c9c4606caca75", - "0x824eccb5cf079b5943c4d17771d7f77555a964a106245607cedac33b7a14922e", - "0xe86f721d7a3b52524057862547fc72de58d88728868f395887057153bccaa566", - "0x3344e76d79f019459188344fb1744c93565c7a35799621d7f4505f5b6119ac82", - "0x401b3589bdd1b0407854565329e3f22251657912e27e1fb2d978bf41c435c3ac", - "0xb12fd0b2567eb14a562e710a6e46eef5e280187bf1411f5573bb86ecbe05e328", - "0xe6dc27bab027cbd9fbb5d80054a3f25b576bd0b4902527a0fc6d0de0e45a3f9f", - "0x1de222f0e731001c60518fc8d2be7d7a48cc84e0570f03516c70975fdf7dc882", - "0xb8ff6563e719fc182e15bbe678cf045696711244aacc7ce4833c72d2d108b1b9", - "0x53e28ac2df219bcbbc9b90272e623d3f6ca3221e57113023064426eff0e2f4f2", - "0x8a4e0776f03819e1f35b3325f20f793d026ccae9a769d6e0f987466e00bd1ce7", - "0x2f65f20089a31f79c2c0ce668991f4440b576ecf05776c1f6abea5e9b14b570f", - "0x448e124079a48f62d0d79b96d5ed1ffb86610561b10d5c4236280b01f8f1f406", - "0x419b34eca1440c847f7bff9e948c9913075d8e13c270e67f64380a3f31de9bb2", - "0x2f6e4fee667acaa81ba8e51172b8329ed936d57e9756fb31f635632dbc2709b7", - "0xdd5afc79e8540fcee6a896c43887bd59c9de5d61b3d1b86539faeb41a14b251d", - "0xc707bed926a46cc451a6b05e642b6098368dbdbf14528c4c28733d5d005af516", - "0x153e850b606eb8a05eacecc04db4b560d007305e664bbfe01595cb69d26b8597", - "0x1b91cc07570c812bb329d025e85ef520132981337d7ffc3d84003f81a90bf7a7", - "0x4ca32e77a12951a95356ca348639ebc451170280d979e91b13316844f65ed42a", - "0xe49ea1998e360bd68771bd69c3cd4cf406b41ccca4386378bec66ea210c40084", - "0x01aaffbde1a672d253e0e317603c2dc1d0f752100d9e853f840bca96e57f314c", - "0x170d0befcbbaafb317c8684213a4989368332f66e889824cc4becf148f808146", - "0x56f973308edf5732a60aa3e7899ae1162c7a2c7b528c3315237e20f9125b34e0", - "0x66c54fd5f6d480cab0640e9f3ec1a4eafbafc0501528f57bb0d5c78fd03068ef", - "0xaca6c83f665c64d76fbc4858da9f264ead3b6ecdc3d7437bb800ef7240abffb9", - "0xf1d4e02e7c85a92d634d16b12dc99e1d6ec9eae3d8dfbca77e7c609e226d0ce7", - "0x094352545250e843ced1d3c6c7957e78c7d8ff80c470974778930adbe9a4ed1a", - "0x76efa93070d78b73e12eb1efa7f36d49e7944ddcc3a043b916466ee83dca52ce", - "0x1772a2970588ddb584eadf02178cdb52a98ab6ea8a4036d29e59f179d7ba0543", - "0xe4bbf2d97d65331ac9f680f864208a9074d1def3c2433458c808427e0d1d3167", - "0x8ccfb5252b22c77ea631e03d491ea76eb9b74bc02072c3749f3e9d63323b44df", - "0x9e212a9bdf4e7ac0730a0cecd0f6cc49afc7e3eca7a15d0f5f5a68f72e45363b", - "0x52e548ea6445aae3f75509782a7ab1f4f02c2a85cdd0dc928370f8c76ae8802d", - "0xb62e7d73bf76c07e1a6f822a8544b78c96a6ba4f5c9b792546d94b56ca12c8b9", - "0x595cb0e985bae9c59af151bc748a50923921a195bbec226a02157f3b2e066f5b", - "0x1c7aa6b36f402cec990bafefbdbb845fc6c185c7e08b6114a71dd388fe236d32", - "0x01ee2ff1a1e88858934a420258e9478585b059c587024e5ec0a77944821f798c", - "0x420a963a139637bffa43cb007360b9f7d305ee46b6a694b0db91db09618fc2e5", - "0x5a8e2ad20f8da35f7c885e9af93e50009929357f1f4b38a6c3073e8f58fae49e", - "0x52a405fdd84c9dd01d1da5e9d1c4ba95cb261b53bf714c651767ffa2f9e9ad81", - "0xa1a334c901a6d5adc8bac20b7df025e906f7c4cfc0996bfe2c62144691c21990", - "0xb789a00252f0b34bded3cb14ae969effcf3eb29d97b05a578c3be8a9e479c213", - "0xb9dbf7e9ddb638a515da245845bea53d07becdf3f8d1ec17de11d495624c8eab", - "0xaf566b41f5ed0c026fa8bc709533d3fa7a5c5d69b03c39971f32e14ab523fa3d", - "0x8121e0b2d9b106bb2aefd364fd6a450d88b88ee1f5e4aad7c0fcd8508653a112", - "0x8581c1be74279216b93e0a0d7272f4d6385f6f68be3eef3758d5f68b62ee7b6c", - "0x85386f009278f9a1f828404fa1bbfa02dfb9d896554f0a52678eb6ec8feadc55", - "0xf483ed167d92a0035ac65a1cfdb7906e4952f74ae3a1d86324d21f241daffcb7", - "0x3872485e2a520a350884accd990a1860e789dd0d0664ad14f50186a92c7be7be", - "0xc6c1a3301933019105f5650cabcb22bfbf221965ffcfc1329315b24ea3d77fd4", - "0xcee901330a60d212a867805ce0c28f53c6cc718f52156c9e74390d18f5df6280", - "0xa67ae793b1cd1a828a607bae418755c84dbb61adf00833d4c61a94665363284f", - "0x80d8159873b517aa6815ccd7c8ed7cfb74f84298d703a6c5a2f9d7d4d984ddde", - "0x1de5a8b915f2d9b45c97a8e134871e2effb576d05f4922b577ade8e3cd747a79", - "0x6ea17c5ece9b97dddb8b2101b923941a91e4b35e33d536ab4ff15b647579e1f5", - "0xcb78631e09bc1d79908ce1d3e0b6768c54b272a1a5f8b3b52485f98d6bba9245", - "0xd7c38f9d3ffdc626fe996218c008f5c69498a8a899c7fd1d63fbb03e1d2a073f", - "0x72cdef54267088d466244a92e4e6f10742ae5e6f7f6a615eef0da049a82068f9", - "0x60b3c490ba8c502656f9c0ed37c47283e74fe1bc7f0e9f651cbc76552a0d88eb", - "0x56bd0c66987a6f3761d677097be9440ea192c1cb0f5ec38f42789abe347e0ea9", - "0x3caac3e480f62320028f6f938ee147b4c78e88a183c464a0c9fb0df937ae30c1", - "0x7a4d2f11bddda1281aba5a160df4b814d23aef07669affe421a861fac2b4ec0f", - "0x9bb4d11299922dc309a4523959298a666ebe4063a9ee3bad1b93988ed59fb933", - "0x957323fffbaf8f938354662452115ae5acba1290f0d3f7b2a671f0359c109292", - "0x877624e31497d32e83559e67057c7a605fb888ed8e31ba68e89e02220eac7096", - "0x8456546ae97470ff6ea98daf8ae632e59b309bd3ff8e9211f7d21728620ed1e5", - "0xbacb26f574a00f466ce354e846718ffe3f3a64897d14d5ffb01afcf22f95e72b", - "0x0228743a6e543004c6617bf2c9a7eba1f92ebd0072fb0383cb2700c3aed38ba0", - "0x04f093f0f93c594549436860058371fb44e8daf78d6e5f563ba63a46b61ddbf0", - "0x0ba17c1ec93429ceaff08eb81195c9844821b64f2b5363926c2a6662f83fb930", - "0xd71605d8446878c677f146837090797e888416cfc9dc4e79ab11776cc6639d3f", - "0x33dde958dc5a6796138c453224d4d6e7f2ae740cceef3b52a8b669eb4b9691a1", - "0x3c39838295d1495e90e61ce59f6fcc693b31c292d02d31759719df6fe3214559", - "0x8aecc66f38644296cf0e6693863d57a243a31a4929130e22ab44cb6157b1af41", - "0xdf7153a7eab9521f2b37124067166c72de8f342249ac0e0f5350bd32f1251053", - "0xa498840b58897cf3bed3981b94c86d85536dfebbc437d276031ebd9352e171eb", - "0xb1df15a081042ab665458223a0449ffc71a10f85f3d977beb20380958fd92262", - "0x15d3bdbdee2a61b01d7a6b72a5482f6714358eedf4bece7bb8458e100caf8fba", - "0x0c96b7a0ea09c3ef758424ffb93654ce1520571e32e1f83aecbeded2388c3a7a", - "0xb4a3a8023266d141ecd7c8a7ca5282a825410b263bc11c7d6cab0587c9b5446e", - "0xf38f535969d9592416d8329932b3a571c6eacf1763de10fb7b309d3078b9b8d4", - "0x5a1e7b1c3b3943158341ce6d7f9f74ae481975250d89ae4d69b2fcd4c092eb4e", - "0xdad31e707d352f6cca78840f402f2ac9292094b51f55048abf0d2badfeff5463", - "0x097e290170068e014ceda3dd47b28ede57ff7f916940294a13c9d4aa2dc98aad", - "0x22e2dcedb6bb7f8ace1e43facaa502daa7513e523be98daf82163d2a76a1e0be", - "0x7ef2b211ab710137e3e8c78b72744bf9de81c2adde007aef6e9ce92a05e7a2c5", - "0x49b427805fc5186f31fdd1df9d4c3f51962ab74e15229e813072ec481c18c717", - "0xe60f6caa09fa803d97613d58762e4ff7f22f47d5c30b9d0116cdc6a357de4464", - "0xab3507b37ee92f026c72cc1559331630bc1c7335b374e4418d0d02687df1a9dd", - "0x50825ae74319c9adebc8909ed7fc461702db8230c59975e8add09ad5e7a647ab", - "0x0ee8e9c1d8a527a42fb8c2c8e9e51faf727cffc23ee22b5a95828f2790e87a29", - "0x675c21c290ddb40bec0302f36fbcd2d1832717a4bc05d113c6118a62bc8f9aca", - "0x580bafab24f673317b533148d7226d485e211eaa3d6e2be2529a83ca842b58a7", - "0x540e474776cae597af24c147dc1ae0f70a6233e98cf5c3ce31f38b830b75c99a", - "0x36eaf9f286e0f356eaaf8d81f71cc52c81d9ebc838c3b4859009f8567a224d16", - "0x0e2cbbb40954be047d02b1450a3dbd2350506448425dc25fd5faf3a66ee8f5c4", - "0x7eb0390cfe4c4eb120bbe693e87adc8ecab51d5fd8ce8f911c8ff07fad8cbe20", - "0xbf77589f5c2ebb465b8d7936f6260a18a243f59bd87390ee22cf579f6f020285", - "0x695b96bb28693f6928777591ef64146466d27521280a295936a52ec60707c565", - "0x22a0d018cbd4274caa8b9e7fb132e0a7ed787874046ca683a7d81d1c7c8b8f15", - "0x84092b122bb35e5ad85407b4b55f33707b86e0238c7970a8583f3c44308ed1d9", - "0xea346067ca67255235f9cae949f06e4b6c93846a7abc7c8c8cd786e9c4b3e4bc", - "0xa6df0716b125dc696b5d0e520cb49c1c089397c754efc146792e95bc58cc7159", - "0x7377b5d3953029fc597fb10bb6479ee34133d38f08783fbb61c7d070f34ea66f", - "0x7d79b00ffb976a10cd24476a394c8ed22f93837c51a58a3ddc7418153a5a8ea1", - "0x01e55182e80dff26cc3e06bb736b4a63745bde8ae28c604fa7fb97d99de5f416", - "0x062a2d5a207f8d540764d09648afecbf5033b13aec239f722b9033a762acf18b", - "0x48be60a3221d98b4d62f0b89d3bef74c70878dd65c6f79b34c2c36d0ddaa1da0", - "0x41e11f33543cf045c1a99419379ea31523d153bdf664549286b16207b9648c85", - "0xeef4d30b4700813414763a199e7cc6ab0faec65ef8b514faa01c6aa520c76334", - "0xea7cfe990422663417715e7859fc935ca47f47c943a1254044b6bc5934c94bc8", - "0xbbd3c834e5403b98a0ca346c915a23310f3d58880786628bc6cfbe05ba29c3c5", - "0xe216379f385bc9995ae0f37f1409a78d475c56b8aeb4ee434326724ec20124f7", - "0xdd328a1eee19d09b6fef06e252f8ad0ae328fbf900ef745f5950896803a3899d", - "0xa16fde34b0d743919feb0781eca0c525a499d279119af823cb3a8817000335db", - "0x7a28d108c59b83b12c85cd9aabc1d1d994a9a0329ae7b64a32aadcd61ebe50e3", - "0xb28bc82fceae74312eb837a805f0a8a01c0f669b99bb03fde31c4d58bedff89b", - "0x1b0d8f37d349781e846900b51a90c828aa384afe9b8ee1f88aeb8dba4b3168f2", - "0xbfd0301ff964c286c3331a30e09e0916da6f484e9c9596dbf1cae3cc902dbf9e", - "0xbb8254cb9ef6b485b8fb6caeafe45f920affc30f6b9d671e9a454530536f4fef", - "0xcad2317cf63dfa7147ded5c7e15f5f72e78f42d635e638f1ece6bc722ca3638b", - "0xb6c6e856fd45117f54775142f2b38f31114539d8943bcbcf823f6c7650c001e4", - "0x869f1baa35684c8f67a5bc99b294187852e6c85243a2f36481d0891d8b043020", - "0x14c6ccf145ee40ff56e3810058d2fba9a943ffc7c7087c48a08b2451c13dc788", - "0x263c1bcb712890f155b7e256cefa4abf92fe4380f3ffc11c627d5e4e30864d18", - "0x69f4eaf655e31ad7f7a725cd415ce7e45dd4a8396ac416950d42ed33155c3487", - "0x47e8eec2c5e33c9a54fe1f9b09e7744b614fb16531c36b862aa899424be13b05", - "0x5c985de270e62c44f0b49157882e8e83641b906ce47959e337fe8423e125a2eb", - "0x4e13b11e13202439bb5de5eea3bb75d2d7bf90f91411163ade06161a9cf424db", - "0x583a8fa159bb74fa175d72f4e1705e9a3b8ffe26ec5ad6e720444b99288f1213", - "0x903d2a746a98dfe2ee2632606d57a9b0fa6d8ccd895bb18c2245fd91f8a43676", - "0xa35a51330316012d81ec7249e3f2b0c9d7fcbb99dd98c62fe880d0a152587f51", - "0x33818a7beb91730c7b359b5e23f68a27b429967ea646d1ea99c314353f644218", - "0x183650af1e0b67f0e7acb59f8c72cc0e60acc13896184db2a3e4613f65b70a8b", - "0x857ff2974bef960e520937481c2047938a718cea0b709282ed4c2b0dbe2ef8fa", - "0x95a367ecb9a401e98a4f66f964fb0ece783da86536410a2082c5dbb3fc865799", - "0x56c606a736ac8268aedadd330d2681e7c7919af0fe855f6c1c3d5c837aa92338", - "0x5c97f7abf30c6d0d4c23e762c026b94a6052a444df4ed942e91975419f68a3a4", - "0x0b571de27d2022158a3128ae44d23a8136e7dd2dee74421aa4d6ed15ee1090a0", - "0xa17f6bc934a2f3c33cea594fee8c96c1290feec934316ebbbd9efab4937bf9f9", - "0x9ff57d70f27aad7281841e76435285fd27f10dad256b3f5cabde4ddc51b70eff", - "0xafa3071a847215b3ccdf51954aa7cb3dd2e6e2a39800042fc42009da705508b2", - "0x5e3bea33e4ac6f7c50a077d19571b1796e403549b1ce7b15e09905a0cc5a4acf", - "0x0dc7ba994e632ab95f3ecb7848312798810cf761d1c776181882d17fd6dda075", - "0xb4f7158679dad9f7370a2f64fbe617a40092849d17453b4f50a93ca8c6885844", - "0x094564b00f53c6f27c121fd8adfe1685b258b259e585a67b57c85efb804c57b2", - "0x9cd21a4249ba3fccffad550cdb8409dc12d8b74a7192874b6bafe2363886f318", - "0xbb22e0dad55cb315c564c038686419d40ef7f13af2143a28455bf445f6e10393", - "0x2a71d5e00821178c2cd39e7501e07da5cca6680eb7cdbe996f52dccafadb3735", - "0x9619406093b121e044a5b403bb1713ae160aeb52ad441f82dc6c63e4b323b969", - "0x3b8bd1d82c6d67ae707e19b889f1cb1f7bba912f12ae4284298f3a70c3644c79", - "0xd7a70c50d47d48785b299dbea01bf03ef18b8495de3c35cb265bc8f3295c4e15", - "0x8802ecce8dd6b6190af8ac79aafda3479c29f548d65e5798c0ca51a529b19108", - "0x4b630e1df52ec5fd650f4a4e76b3eeddda39e1e9eab996f6d3f02eefdf690990", - "0x0bfbff60fcf7f411d469f7f6f0a58ca305fd84eb529ee3ac73c00174793d723e", - "0x535f78b5f3a99a1c498e2c19dc1acb0fbbaba8972ba1d7d66936c28ab3667ebe", - "0x06ba92d8129db98fec1b75f9489a394022854f22f2e9b9450b187a6fc0d94a86", - "0xb7ae275ba10f80fb618a2cf949d5ad2e3ae24eb2eb37dcf1ec8c8b148d3ba27f", - "0xb275579bcf2584d9794dd3fc7f999902b13d33a9095e1980d506678e9c263de1", - "0x843ccd52a81e33d03ad2702b4ef68f07ca0419d4495df848bff16d4965689e48", - "0xde8b779ca7250f0eb867d5abdffd1d28c72a5a884d794383fc93ca40e5bf6276", - "0x6b789a2befccb8788941c9b006e496b7f1b03dbb8e530ba339db0247a78a2850", - "0xfccd4dca80bc52f9418f26b0528690255e320055327a34b50caf088235d2f660", - "0x18479ebfbe86c1e94cd05c70cb6cace6443bd9fdac7e01e9c9535a9e85141f2f", - "0x5350c8f3296441db954a261238c88a3a0c51ab418a234d566985f2809e211148", - "0xa5636614135361d03a381ba9f6168e2fd0bd2c1105f9b4e347c414df8759dea3", - "0xe7bb69e600992e6bd41c88a714f50f450153f1a05d0ddb4213a3fc4ba1f48c3f", - "0x17b42e81bae19591e22aa2510be06803bcb5c39946c928c977d78f346d3ca86b", - "0x30a10c07dc9646b7cbb3e1ab722a94d2c53e04c0c19efaaea7dccba1b00f2a20", - ], - compressed_lamport_pk: - "0x672ba456d0257fe01910d3a799c068550e84881c8d441f8f5f833cbd6c1a9356", - child_sk: - "7419543105316279183937430842449358701327973165530407166294956473095303972104" + seed: "0xc55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04", + master_sk: + "6083874454709270928345386274498605044986640685124978867557563392430687146096", + child_index: 0, + lamport_0: vec![ + "0xe345d0ad7be270737de05cf036f688f385d5f99c7fddb054837658bdd2ebd519", + "0x65050bd4db9c77c051f67dcc801bf1cdf33d81131e608505bb3e4523868eb76c", + "0xc4f8e8d251fbdaed41bdd9c135b9ed5f83a614f49c38fffad67775a16575645a", + "0x638ad0feace7567255120a4165a687829ca97e0205108b8b73a204fba6a66faa", + "0xb29f95f64d0fcd0f45f265f15ff7209106ab5f5ce6a566eaa5b4a6f733139936", + "0xbcfbdd744c391229f340f02c4f2d092b28fe9f1201d4253b9045838dd341a6bf", + "0x8b9cf3531bfcf0e4acbfd4d7b4ed614fa2be7f81e9f4eaef53bedb509d0b186f", + "0xb32fcc5c4e2a95fb674fa629f3e2e7d85335f6a4eafe7f0e6bb83246a7eced5f", + "0xb4fe80f7ac23065e30c3398623b2761ac443902616e67ce55649aaa685d769ce", + "0xb99354f04cfe5f393193c699b8a93e5e11e6be40ec16f04c739d9b58c1f55bf3", + "0x93963f58802099ededb7843219efc66a097fab997c1501f8c7491991c780f169", + "0x430f3b027dbe9bd6136c0f0524a0848dad67b253a11a0e4301b44074ebf82894", + "0xd635c39b4a40ad8a54d9d49fc8111bd9d11fb65c3b30d8d3eaef7d7556aac805", + "0x1f7253a6474cf0b2c05b02a7e91269137acddedcb548144821f9a90b10eccbab", + "0x6e3bdb270b00e7b6eb8b044dbfae07b51ea7806e0d24218c59a807a7fd099c18", + "0x895488ad2169d8eaae332ce5b0fe1e60ffab70e62e1cb15a2a1487544af0a6e8", + "0x32d45a99d458c90e173a3087ea3661ab62d429b285089e92806a9663ba825342", + "0xc15c52106c3177f5848a173076a20d46600ca65958a1e3c7d45a593aaa9670ed", + "0xd8180c550fbe4cd6d5b676ff75e0728729d8e28a3b521d56152594ac6959d563", + "0x58fe153fac8f4213aaf175e458435e06304548024bcb845844212c774bdffb2a", + "0x10fff610a50f4bee5c978f512efa6ab4fafacb65929606951ba5b93eeb617b5a", + "0x78ac9819799b52eba329f13dd52cf0f6148a80bf04f93341814c4b47bb4aa5ec", + "0xa5c3339caa433fc11e74d1765bec577a13b054381a44b23c2482e750696876a9", + "0x9f716640ab5cdc2a5eb016235cddca2dc41fa4ec5acd7e58af628dade99ec376", + "0x2544364320e67577c4fed8c7c7c839deed93c24076d5343c5b8faca4cc6dc2d8", + "0x62553e782541f822c589796be5d5c83bfc814819100b2be0710b246f5aa7149c", + "0x229fb761c46c04b22ba5479f2696be0f936fded68d54dd74bcd736b8ba512afb", + "0x0af23996a65b98a0ebaf19f3ec0b3ef20177d1bfd6eb958b3bd36e0bdbe04c8c", + "0x6f0954f9deab52fd4c8d2daba69f73a80dea143dd49d9705c98db3d653adf98c", + "0xfa9221dd8823919a95b35196c1faeb59713735827f3e84298c25c83ac700c480", + "0x70c428e3ff9e5e3cda92d6bb85018fb89475c19f526461cca7cda64ebb2ff544", + "0xdcaac3413e22314f0f402f8058a719b62966b3a7429f890d947be952f2e314ba", + "0xb6b383cb5ec25afa701234824491916bfe6b09d28cf88185637e2367f0cf6edc", + "0x7b0d91488fc916aba3e9cb61a5a5645b9def3b02e4884603542f679f602afb8d", + "0xe9c20abca284acfde70c59584b9852b85c52fa7c263bb981389ff8d638429cd7", + "0x838524f798daee6507652877feb9597f5c47e9bb5f9aa52a35fb6fff796813b9", + "0xbe1ca18faf9bf322474fad1b3d9b4f1bc76ae9076e38e6dd2b16e2faf487742b", + "0xbf02d70f1a8519343a16d24bade7f7222912fd57fe4f739f367dfd99d0337e8e", + "0xc979eb67c107ff7ab257d1c0f4871adf327a4f2a69e01c42828ea27407caf058", + "0xf769123d3a3f19eb7b5c3fd4f467a042944a7c5ff8834cebe427f47dbd71460c", + "0xaefc8edc23257e1168a35999fe3832bcbc25053888cc89c38667482d6748095b", + "0x8ff399f364d3a2428b1c92213e4fdc5341e7998007da46a5a2f671929b42aaab", + "0xcf2a3d9e6963b24c5001fbba1e5ae7f45dd6cf520fd24861f745552db86bab48", + "0xb380e272d7f3091e5c887fa2e7c690c67d59f4d95f8376d150e555da8c738559", + "0xc006a749b091d91204dbb64f59059d284899de5986a7f84f8877afd5e0e4c253", + "0x818d8bb9b7da2dafa2ef059f91975e7b6257f5e199d217320de0a576f020de5c", + "0x7aabf4a1297d2e550a2ee20acb44c1033569e51b6ec09d95b22a8d131e30fd32", + "0xdd01c80964a5d682418a616fb10810647c9425d150df643c8ddbbe1bfb2768b7", + "0x1e2354e1d97d1b06eb6cfe9b3e611e8d75b5c57a444523e28a8f72a767eff115", + "0x989c9a649dca0580256113e49ea0dd232bbfd312f68c272fe7c878acc5da7a2c", + "0x14ee1efe512826fff9c028f8c7c86708b841f9dbf47ce4598298b01134ebdc1a", + "0x6f861dba4503f85762d9741fa8b652ce441373f0ef2b7ebbd5a794e48cdab51b", + "0xda110c9492ffdb87efe790214b7c9f707655a5ec08e5af19fb2ab2acc428e7dc", + "0x5576aa898f6448d16e40473fcb24c46c609a3fc46a404559faa2d0d34d7d49ce", + "0x9bd9a35675f2857792bc45893655bfdf905ffeaee942d93ad39fbcadd4ca9e11", + "0xfa95e4c37db9303d5213890fd984034089cbc9c6d754741625da0aa59cc45ccf", + "0xfef7d2079713f17b47239b76c8681bf7f800b1bfeac7a53265147579572ddf29", + "0x39aa7c0fecf9a1ed037c685144745fda16da36f6d2004844cf0e2d608ef6ed0e", + "0x5530654d502d6ba30f2b16f49cc5818279697308778fd8d40db8e84938144fb6", + "0xb1beaa36397ba1521d7bf7df16536969d8a716e63510b1b82a715940180eb29f", + "0x21abe342789f7c15a137afa373f686330c0db8c861572935a3cd8dcf9e4e1d45", + "0x27b5a1acda55b4e0658887bd884d3203696fcae0e94f19e31bfe931342b1c257", + "0x58401a02502d7708a812c0c72725f768f5a556480517258069f2d72543cda888", + "0x4b38f291548f51bee7e4cf8cc5c8aa8f4ad3ec2461dba4ccbab70f1c1bfd7feb", + "0x9b39a53fdafaaf1d23378e0aa8ae65d38480de69821de2910873eefc9f508568", + "0x932200566a3563ee9141913d12fd1812cb008cb735724e8610890e101ec10112", + "0x6a72f70b4ec5491f04780b17c4776a335fcc5bff5073d775150e08521dc74c91", + "0x86d5c60e627a4b7d5d075b0ba33e779c45f3f46d22ed51f31360afd140851b67", + "0x5ca2a736bb642abc4104faa781c9aff13d692a400d91dc961aec073889836946", + "0xa14bca5a262ac46ceac21388a763561fc85fb9db343148d786826930f3e510cd", + "0x87be03a87a9211504aa70ec149634ee1b97f7732c96377a3c04e98643dcba915", + "0x8fe283bc19a377823377e9c326374ebb3f29527c12ea77bfb809c18eef8943b0", + "0x8f519078b39a3969f7e4caeca9839d4e0eccc883b89e4a86d0e1731bfc5e33fc", + "0x33d7c28c3d26fdfc015a8c2131920e1392ef0aea55505637b54ea63069c7858e", + "0xe57de7c189fcc9170320c7acedb38798562a48dbc9943b2a8cd3441d58431128", + "0x513dac46017050f82751a07b6c890f14ec43cadf687f7d202d2369e35b1836b4", + "0xfd967d9f805bb7e78f7b7caa7692fdd3d6b5109c41ad239a08ad0a38eeb0ac4c", + "0xf2013e4da9abcc0f03ca505ed94ec097556dbfd659088cd24ec223e02ac43329", + "0xe0dcfac50633f7417f36231df2c81fa1203d358d5f57e896e1ab4b512196556b", + "0xf022848130e73fe556490754ef0ecfcdaaf3b9ff16ae1eda7d38c95c4f159ded", + "0x2147163a3339591ec7831d2412fb2d0588c38da3cd074fa2a4d3e5d21f9f1d2d", + "0x11ee2404731962bf3238dca0d9759e06d1a5851308b4e6321090886ec5190b69", + "0xf7679ecd07143f8ac166b66790fa09aed39352c09c0b4766bbe500b1ebace5a5", + "0xc7a0e95f09076472e101813a95e6ea463c35bd5ee9cfda3e5d5dbccb35888ef0", + "0xde625d3b547eb71bea5325a0191a592fa92a72e4b718a499fdba32e245ddf37e", + "0x7e5bdccd95df216e8c59665073249072cb3c9d0aef6b341afc0ca90456942639", + "0xc27f65fd9f797ede374e06b4ddb6e8aa59c7d6f36301f18b42c48b1889552fe3", + "0x8175730a52ea571677b035f8e2482239dda1cfbff6bc5cde00603963511a81af", + "0x09e440f2612dad1259012983dc6a1e24a73581feb1bd69d8a356eea16ba5fd0e", + "0x59dcc81d594cbe735a495e38953e8133f8b3825fd84767af9e4ea06c49dbabfa", + "0x6c8480b59a1a958c434b9680edea73b1207077fb9a8a19ea5f9fbbf6f47c4124", + "0x81f5c89601893b7a5a231a7d37d6ab9aa4c57f174fcfc6b40002fa808714c3a1", + "0x41ba4d6b4da141fcc1ee0f4b47a209cfd143d34e74fc7016e9956cedeb2db329", + "0x5e0b5b404c60e9892040feacfb4a84a09c2bc4a8a5f54f3dad5dca4acdc899dc", + "0xe922eebf1f5f15000d8967d16862ed274390cde808c75137d2fb9c2c0a80e391", + "0xbf49d31a59a20484f0c08990b2345dfa954509aa1f8901566ab9da052b826745", + "0xb84e07da828ae668c95d6aa31d4087504c372dbf4b5f8a8e4ded1bcf279fd52b", + "0x89288bf52d8c4a9561421ad199204d794038c5d19ae9fee765ee2b5470e68e7e", + "0xf6f618be99b85ec9a80b728454a417c647842215e2160c6fe547dd5a69bd9302", + "0xdd9adc002f98c9a47c7b704fc0ce0a5c7861a5e2795b6014749cde8bcb8a034b", + "0xd119a4b2c0db41fe01119115bcc35c4b7dbfdb42ad3cf2cc3f01c83732acb561", + "0x9c66bc84d416b9193bad9349d8c665a9a06b835f82dc93ae0cccc218f808aad0", + "0xd4b50eefcd2b5df075f14716cf6f2d26dfc8ae02e3993d711f4a287313038fde", + "0xaf72bfb346c2f336b8bc100bff4ba35d006a3dad1c5952a0adb40789447f2704", + "0xc43ca166f01dc955e7b4330227635feb1b0e0076a9c5633ca5c614a620244e5b", + "0x5efca76970629521cfa053fbbbda8d3679cadc018e2e891043b0f52989cc2603", + "0x35c57de1c788947f187051ce032ad1e899d9887d865266ec6fcfda49a8578b2b", + "0x56d4be8a65b257216eab7e756ee547db5a882b4edcd12a84ed114fbd4f5be1f1", + "0x257e858f8a4c07a41e6987aabaa425747af8b56546f2a3406f60d610bcc1f269", + "0x40bd9ee36d52717ab22f1f6b0ee4fb38b594f58399e0bf680574570f1b4b8c90", + "0xcb6ac01c21fc288c12973427c5df6eb8f6aefe64b92a6420c6388acdf36bc096", + "0xa5716441312151a5f0deb52993a293884c6c8f445054ce1e395c96adeee66c6d", + "0xe15696477f90113a10e04ba8225c28ad338c3b6bdd7bdeb95c0722921115ec85", + "0x8faeaa52ca2f1d791cd6843330d16c75eaf6257e4ba236e3dda2bc1a644aee00", + "0xc847fe595713bf136637ce8b43f9de238762953fed16798878344da909cc76ae", + "0xb5740dc579594dd110078ce430b9696e6a308078022dde2d7cfe0ef7647b904e", + "0x551a06d0771fcd3c53aea15aa8bf700047138ef1aa22265bee7fb965a84c9615", + "0x9a65397a5907d604030508d41477de621ce4a0d79b772e81112d634455e7a4da", + "0x6462d4cc2262d7faf8856812248dc608ae3d197bf2ef410f00c3ae43f2040995", + "0x6782b1bd319568e30d54b324ab9ed8fdeac6515e36b609e428a60785e15fb301", + "0x8bcdcf82c7eb2a07e14db20d80d9d2efea8d40320e121923784c92bf38250a8e", + "0x46ed84fa17d226d5895e44685747ab82a97246e97d6237014611aaaba65ed268", + "0x147e87981673326c5a2bdb06f5e90eaaa9583857129451eed6dde0c117fb061f", + "0x4141d6fe070104c29879523ba6669552f3d457c0929bb878d2751f4ff059b895", + "0xd866ce4ef226d74841f950fc28cdf2235db21e0e3f07a0c8f807704464db2210", + "0xa804f9118bf92558f684f90c2bda832a4f51ef771ffb2765cde3ec6f48124f32", + "0xc436d4a65910124e00cded9a637178914a8fbc090400f3f031c03eac4d0295a5", + "0x643fdb9243656512316528de04dcc7344ca33783580ad0c3debf8c4a6e7c8bc4", + "0x7f4a345b41706b281b2de998e91ff62d908eb29fc333ee336221757753c96e23", + "0x6bdc086a5b11de950cabea33b72d98db886b291c4c2f02d3e997edc36785d249", + "0xfb10b5b47d374078c0a52bff7174bf1cd14d872c7d20b4a009e2afd3017a9a17", + "0x1e07e605312db5380afad8f3d7bd602998102fdd39565b618ac177b13a6527e6", + "0xc3161b5a7b93aabf05652088b0e5b4803a18be693f590744c42c24c7aaaeef48", + "0xa47e4f25112a7d276313f153d359bc11268b397933a5d5375d30151766bc689a", + "0xb24260e2eff88716b5bf5cb75ea171ac030f5641a37ea89b3ac45acb30aae519", + "0x2bcacbebc0a7f34406db2c088390b92ee34ae0f2922dedc51f9227b9afb46636", + "0xc78c304f6dbe882c99c5e1354ce6077824cd42ed876db6706654551c7472a564", + "0x6e2ee19d3ee440c78491f4e354a84fa593202e152d623ed899e700728744ac85", + "0x2a3f438c5dc012aa0997b66f661b8c10f4a0cd7aa5b6e5922b1d73020561b27f", + "0xd804f755d93173408988b95e9ea0e9feae10d404a090f73d9ff84df96f081cf7", + "0xe06fda941b6936b8b33f00ffa02c8b05fd78fbec953da61da2043f5644b30a50", + "0x45ee279b465d53148850a16cc7f6bd33e7627aef554a9418ed012ca8f9717f80", + "0x9c79348c1bcd6aa2135452491d73564413a247ea8cc38fa7dcc6c43f8a2d61d5", + "0x7c91e056f89f2a77d3e3642e595bcf4973c3bca68dd2b10f51ca0d8945e4255e", + "0x669f976ebe38cbd22c5b1f785e14b76809d673d2cb1458983dbda41f5adf966b", + "0x8bc71e99ffcc119fd8bd604af54c0663b0325a3203a214810fa2c588089ed5a7", + "0x36b3f1ffeae5d9855e0965eef33f4c5133d99685802ac5ce5e1bb288d308f889", + "0x0aad33df38b3f31598e04a42ec22f20bf2e2e9472d02371eb1f8a06434621180", + "0x38c5632b81f90efbc51a729dcae03626a3063aa1f0a102fd0e4326e86a08a732", + "0x6ea721753348ed799c98ffa330d801e6760c882f720125250889f107915e270a", + "0xe700dd57ce8a653ce4269e6b1593a673d04d3de8b79b813354ac7c59d1b99adc", + "0xe9294a24b560d62649ca898088dea35a644d0796906d41673e29e4ea8cd16021", + "0xf20bb60d13a498a0ec01166bf630246c2f3b7481919b92019e2cfccb331f2791", + "0xf639a667209acdd66301c8e8c2385e1189b755f00348d614dc92da14e6866b38", + "0x49041904ee65c412ce2cd66d35570464882f60ac4e3dea40a97dd52ffc7b37a2", + "0xdb36b16d3a1010ad172fc55976d45df7c03b05eab5432a77be41c2f739b361f8", + "0x71400cdd2ea78ac1bf568c25a908e989f6d7e2a3690bc869c7c14e09c255d911", + "0xf0d920b2d8a00b88f78e7894873a189c580747405beef5998912fc9266220d98", + "0x1a2baefbbd41aa9f1cc5b10e0a7325c9798ba87de6a1302cf668a5de17bc926a", + "0x449538a20e52fd61777c45d35ff6c2bcb9d9165c7eb02244d521317f07af6691", + "0x97006755b9050b24c1855a58c4f4d52f01db4633baff4b4ef3d9c44013c5c665", + "0xe441363a27b26d1fff3288222fa8ed540f8ca5d949ddcc5ff8afc634eec05336", + "0xed587aa8752a42657fea1e68bc9616c40c68dcbbd5cb8d781e8574043e29ef28", + "0x47d896133ba81299b8949fbadef1c00313d466827d6b13598685bcbb8776c1d2", + "0x7786bc2cb2d619d07585e2ea4875f15efa22110e166af87b29d22af37b6c047d", + "0x956b76194075fe3daf3ca508a6fad161deb05d0026a652929e37c2317239cbc6", + "0xec9577cb7b85554b2383cc4239d043d14c08d005f0549af0eca6994e203cb4e7", + "0x0722d0c68d38b23b83330b972254bbf9bfcf32104cc6416c2dad67224ac52887", + "0x532b19d54fb6d77d96452d3e562b79bfd65175526cd793f26054c5f6f965df39", + "0x4d62e065e57cbf60f975134a360da29cabdcea7fcfc664cf2014d23c733ab3b4", + "0x09be0ea6b363fd746b303e482cb4e15ef25f8ae57b7143e64cbd5c4a1d069ebe", + "0x69dcddc3e05147860d8d0e90d602ac454b609a82ae7bb960ee2ecd1627d77777", + "0xa5e2ae69d902971000b1855b8066a4227a5be7234ac9513b3c769af79d997df4", + "0xc287d4bc953dcff359d707caf2ccba8cc8312156eca8aafa261fb72412a0ea28", + "0xb27584fd151fb30ed338f9cba28cf570f7ca39ebb03eb2e23140423af940bd96", + "0x7e02928194441a5047af89a6b6555fea218f1df78bcdb5f274911b48d847f5f8", + "0x9ba611add61ea6ba0d6d494c0c4edd03df9e6c03cafe10738cee8b7f45ce9476", + "0x62647ec3109ac3db3f3d9ea78516859f0677cdde3ba2f27f00d7fda3a447dd01", + "0xfa93ff6c25bfd9e17d520addf5ed2a60f1930278ff23866216584853f1287ac1", + "0x3b391c2aa79c2a42888102cd99f1d2760b74f772c207a39a8515b6d18e66888a", + "0xcc9ae3c14cbfb40bf01a09bcde913a3ed208e13e4b4edf54549eba2c0c948517", + "0xc2b8bce78dd4e876da04c54a7053ca8b2bedc8c639cee82ee257c754c0bea2b2", + "0xdb186f42871f438dba4d43755c59b81a6788cb3b544c0e1a3e463f6c2b6f7548", + "0xb7f8ba137c7783137c0729de14855e20c2ac4416c33f5cac3b235d05acbab634", + "0x282987e1f47e254e86d62bf681b0803df61340fdc9a8cf625ef2274f67fc6b5a", + "0x04aa195b1aa736bf8875777e0aebf88147346d347613b5ab77bef8d1b502c08c", + "0x3f732c559aee2b1e1117cf1dec4216a070259e4fa573a7dcadfa6aab74aec704", + "0x72699d1351a59aa73fcede3856838953ee90c6aa5ef5f1f7e21c703fc0089083", + "0x6d9ce1b8587e16a02218d5d5bed8e8d7da4ac40e1a8b46eeb412df35755c372c", + "0x4f9c19b411c9a74b8616db1357dc0a7eaf213cb8cd2455a39eb7ae4515e7ff34", + "0x9163dafa55b2b673fa7770b419a8ede4c7122e07919381225c240d1e90d90470", + "0x268ff4507b42e623e423494d3bb0bc5c0917ee24996fb6d0ebedec9ce8cd9d5c", + "0xff6e6169d233171ddc834e572024586eeb5b1bda9cb81e5ad1866dbc53dc75fe", + "0xb379a9c8279205e8753b6a5c865fbbf70eb998f9005cd7cbde1511f81aed5256", + "0x3a6b145e35a592e037c0992c9d259ef3212e17dca81045e446db2f3686380558", + "0x60fb781d7b3137481c601871c1c3631992f4e01d415841b7f5414743dcb4cfd7", + "0x90541b20b0c2ea49bca847e2db9b7bba5ce15b74e1d29194a12780e73686f3dd", + "0xe2b0507c13ab66b4b769ad1a1a86834e385b315da2f716f7a7a8ff35a9e8f98c", + "0xeefe54bc9fa94b921b20e7590979c28a97d8191d1074c7c68a656953e2836a72", + "0x8676e7f59d6f2ebb0edda746fc1589ef55e07feab00d7008a0f2f6f129b7bb3a", + "0x78a3d93181b40152bd5a8d84d0df7f2adde5db7529325c13bc24a5b388aed3c4", + "0xcc0e2d0cba7aaa19c874dbf0393d847086a980628f7459e9204fda39fad375c0", + "0x6e46a52cd7745f84048998df1a966736d2ac09a95a1c553016fef6b9ec156575", + "0x204ac2831d2376d4f9c1f5c106760851da968dbfc488dc8a715d1c764c238263", + "0xbdb8cc7b7e5042a947fca6c000c10b9b584e965c3590f92f6af3fe4fb23e1358", + "0x4a55e4b8a138e8508e7b11726f617dcf4155714d4600e7d593fd965657fcbd89", + "0xdfe064bb37f28d97b16d58b575844964205e7606dce914a661f2afa89157c45b", + "0x560e374fc0edda5848eef7ff06471545fcbdd8aefb2ecddd35dfbb4cb03b7ddf", + "0x10a66c82e146da5ec6f48b614080741bc51322a60d208a87090ad7c7bf6b71c6", + "0x62534c7dc682cbf356e6081fc397c0a17221b88508eaeff798d5977f85630d4f", + "0x0138bba8de2331861275356f6302b0e7424bbc74d88d8c534479e17a3494a15b", + "0x580c7768bf151175714b4a6f2685dc5bcfeb088706ee7ed5236604888b84d3e4", + "0xd290adb1a5dfc69da431c1c0c13da3be788363238d7b46bc20185edb45ab9139", + "0x1689879db6c78eb4d3038ed81be1bc106f8cfa70a7c6245bd4be642bfa02ebd7", + "0x6064c384002c8b1594e738954ed4088a0430316738def62822d08b2285514918", + "0x01fd23493f4f1cc3c5ff4e96a9ee386b2a144b50a428a6b5db654072bddadfe7", + "0xd5d05bb7f23ab0fa2b82fb1fb14ac29c2477d81a85423d0a45a4b7d5bfd81619", + "0xd72b9a73ae7b24db03b84e01106cea734d4b9d9850b0b7e9d65d6001d859c772", + "0x156317cb64578db93fee2123749aff58c81eae82b189b0d6f466f91de02b59df", + "0x5fba299f3b2c099edbac18d785be61852225890fc004bf6be0787d62926a79b3", + "0x004154f28f685bdbf0f0d6571e7a962a4c29b6c3ebedaaaf66097dfe8ae5f756", + "0x4b45816f9834c3b289affce7a3dc80056c2b7ffd3e3c250d6dff7f923e7af695", + "0x6ca53bc37816fff82346946d83bef87860626bbee7fd6ee9a4aeb904d893a11f", + "0xf48b2f43184358d66d5b5f7dd2b14a741c7441cc7a33ba3ebcc94a7b0192d496", + "0x3cb98f4baa429250311f93b46e745174f65f901fab4eb8075d380908aaaef650", + "0x343dfc26b4473b3a20e706a8e87e5202a4e6b96b53ed448afb9180c3f766e5f8", + "0x1ace0e8a735073bcbaea001af75b681298ef3b84f1dbab46ea52cee95ab0e7f9", + "0xd239b110dd71460cdbc41ddc99494a7531186c09da2a697d6351c116e667733b", + "0x22d6955236bd275969b8a6a30c23932670a6067f68e236d2869b6a8b4b493b83", + "0x53c1c01f8d061ac89187e5815ef924751412e6a6aa4dc8e3abafb1807506b4e0", + "0x2f56dd20c44d7370b713e7d7a1bfb1a800cac33f8a6157f278e17a943806a1f7", + "0xc99773d8a5b3e60115896a65ac1d6c15863317d403ef58b90cb89846f4715a7f", + "0x9f4b6b77c254094621cd336da06fbc6cbb7b8b1d2afa8e537ceca1053c561ef5", + "0x87944d0b210ae0a6c201cba04e293f606c42ebaed8b4a5d1c33f56863ae7e1b5", + "0xa7d116d962d03ca31a455f9cda90f33638fb36d3e3506605aa19ead554487a37", + "0x4042e32e224889efd724899c9edb57a703e63a404129ec99858048fbc12f2ce0", + "0x36759f7a0faeea1cd4cb91e404e4bf09908de6e53739603d5f0db52b664158a3", + "0xa4d50d005fb7b9fea8f86f1c92439cc9b8446efef7333ca03a8f6a35b2d49c38", + "0x80cb7c3e20f619006542edbe71837cdadc12161890a69eea8f41be2ee14c08a3", + "0xbb3c44e1df45f2bb93fb80e7f82cee886c153ab484c0095b1c18df03523629b4", + "0x04cb749e70fac3ac60dea779fceb0730b2ec5b915b0f8cf28a6246cf6da5db29", + "0x4f5189b8f650687e65a962ef3372645432b0c1727563777433ade7fa26f8a728", + "0x322eddddf0898513697599b68987be5f88c0258841affec48eb17cf3f61248e8", + "0x6416be41cda27711d9ec22b3c0ed4364ff6975a24a774179c52ef7e6de9718d6", + "0x0622d31b8c4ac7f2e30448bdadfebd5baddc865e0759057a6bf7d2a2c8b527e2", + "0x40f096513588cc19c08a69e4a48ab6a43739df4450b86d3ec2fb3c6a743b5485", + "0x09fcf7d49290785c9ea2d54c3d63f84f6ea0a2e9acfcdbb0cc3a281ce438250e", + "0x2000a519bf3da827f580982d449b5c70fcc0d4fa232addabe47bb8b1c471e62e", + "0xf4f80008518e200c40b043f34fb87a6f61b82f8c737bd784292911af3740245e", + "0x939eaab59f3d2ad49e50a0220080882319db7633274a978ced03489870945a65", + "0xadcad043d8c753fb10689280b7670f313253f5d719039e250a673d94441ee17c", + "0x58b7b75f090166b8954c61057074707d7e38d55ce39d9b2251bbc3d72be458f8", + "0xf61031890c94c5f87229ec608f2a9aa0a3f455ba8094b78395ae312cbfa04087", + "0x356a55def50139f94945e4ea432e7a9defa5db7975462ebb6ca99601c614ea1d", + "0x65963bb743d5db080005c4db59e29c4a4e86f92ab1dd7a59f69ea7eaf8e9aa79", + ], + lamport_1: vec![ + "0x9c0bfb14de8d2779f88fc8d5b016f8668be9e231e745640096d35dd5f53b0ae2", + "0x756586b0f3227ab0df6f4b7362786916bd89f353d0739fffa534368d8d793816", + "0x710108dddc39e579dcf0819f9ad107b3c56d1713530dd94325db1d853a675a37", + "0x8862b5f428ce5da50c89afb50aa779bb2c4dfe60e6f6a070b3a0208a4a970fe5", + "0x54a9cd342fa3a4bf685c01d1ce84f3068b0d5b6a58ee22dda8fbac4908bb9560", + "0x0fa3800efeaddd28247e114a1cf0f86b9014ccae9c3ee5f8488168b1103c1b44", + "0xbb393428b7ebfe2eda218730f93925d2e80c020d41a29f4746dcbb9138f7233a", + "0x7b42710942ef38ef2ff8fe44848335f26189c88c22a49fda84a51512ac68cd5d", + "0x90e99786a3e8b04db95ccd44d01e75558d75f3ddd12a1e9a2c2ce76258bf4813", + "0x3f6f71e40251728aa760763d25deeae54dc3a9b53807c737deee219120a2230a", + "0xe56081a7933c6eaf4ef2c5a04e21ab8a3897785dd83a34719d1b62d82cfd00c2", + "0x76cc54fa15f53e326575a9a2ac0b8ed2869403b6b6488ce4f3934f17db0f6bee", + "0x1cd9cd1d882ea3830e95162b5de4beb5ddff34fdbf7aec64e83b82a6d11b417c", + "0xb8ca8ae36d717c448aa27405037e44d9ee28bb8c6cc538a5d22e4535c8befd84", + "0x5c4492108c25f873a23d5fd7957b3229edc22858e8894febe7428c0831601982", + "0x907bcd75e7465e9791dc34e684742a2c0dc7007736313a95070a7e6b961c9c46", + "0xe7134b1511559e6b2440672073fa303ec3915398e75086149eb004f55e893214", + "0x2ddc2415e4753bfc383d48733e8b2a3f082883595edc5515514ebb872119af09", + "0xf2ad0f76b08ffa1eee62228ba76f4982fab4fbede5d4752c282c3541900bcd5b", + "0x0a84a6b15abd1cbc2da7092bf7bac418b8002b7000236dfba7c8335f27e0f1d4", + "0x97404e02b9ff5478c928e1e211850c08cc553ebac5d4754d13efd92588b1f20d", + "0xfa6ca3bcff1f45b557cdec34cb465ab06ade397e9d9470a658901e1f0f124659", + "0x5bd972d55f5472e5b08988ee4bccc7240a8019a5ba338405528cc8a38b29bc21", + "0x52952e4f96c803bb76749800891e3bfe55f7372facd5b5a587a39ac10b161bcc", + "0xf96731ae09abcad016fd81dc4218bbb5b2cb5fe2e177a715113f381814007314", + "0xe7d79e07cf9f2b52623491519a21a0a3d045401a5e7e10dd8873a85076616326", + "0xe4892f3777a4614ee6770b22098eaa0a3f32c5c44b54ecedacd69789d676dffe", + "0x20c932574779e2cc57780933d1dc6ce51a5ef920ce5bf681f7647ac751106367", + "0x057252c573908e227cc07797117701623a4835f4b047dcaa9678105299e48e70", + "0x20bad780930fa2a036fe1dea4ccbf46ac5b3c489818cdb0f97ae49d6e2f11fbf", + "0xc0d7dd26ffecdb098585a1694e45a54029bb1e31c7c5209289058efebb4cc91b", + "0x9a8744beb1935c0abe4b11812fc02748ef7c8cb650db3024dde3c5463e9d8714", + "0x8ce6eea4585bbeb657b326daa4f01f6aef34954338b3ca42074aedd1110ba495", + "0x1c85b43f5488b370721290d2faea19d9918d094c99963d6863acdfeeca564363", + "0xe88a244347e448349e32d0525b40b18533ea227a9d3e9b78a9ff14ce0a586061", + "0x352ca61efc5b8ff9ee78e738e749142dd1606154801a1449bbb278fa6bcc3dbe", + "0xa066926f9209220b24ea586fb20eb8199a05a247c82d7af60b380f6237429be7", + "0x3052337ccc990bfbae26d2f9fe5d7a4eb8edfb83a03203dca406fba9f4509b6e", + "0x343ce573a93c272688a068d758df53c0161aa7f9b55dec8beced363a38b33069", + "0x0f16b5593f133b58d706fe1793113a10750e8111eadee65301df7a1e84f782d3", + "0x808ae8539357e85b648020f1e9d255bc4114bee731a6220d7c5bcb5b85224e03", + "0x3b2bd97e31909251752ac57eda6015bb05b85f2838d475095cfd146677430625", + "0xe4f857c93b2d8b250050c7381a6c7c660bd29066195806c8ef11a2e6a6640236", + "0x23d91589b5070f443ddcefa0838c596518d54928119251ecf3ec0946a8128f52", + "0xb72736dfad52503c7f5f0c59827fb6ef4ef75909ff9526268abc0f296ee37296", + "0x80a8c66436d86b8afe87dde7e53a53ef87e057a5d4995963e76d159286de61b6", + "0xbec92c09ee5e0c84d5a8ba6ca329683ff550ace34631ea607a3a21f99cd36d67", + "0x83c97c9807b9ba6d9d914ae49dabdb4c55e12e35013f9b179e6bc92d5d62222b", + "0x8d9c79f6af3920672dc4cf97a297c186e75083d099aeb5c1051207bad0c98964", + "0x2aaa5944a2bd852b0b1be3166e88f357db097b001c1a71ba92040b473b30a607", + "0x46693d27ec4b764fbb516017c037c441f4558aebfe972cdcd03da67c98404e19", + "0x903b25d9e12208438f203c9ae2615b87f41633d5ffda9cf3f124c1c3922ba08f", + "0x3ec23dc8bc1b49f5c7160d78008f3f235252086a0a0fa3a7a5a3a53ad29ec410", + "0xa1fe74ceaf3cccd992001583a0783d7d7b7a245ea374f369133585b576b9c6d8", + "0xb2d6b0fe4932a2e06b99531232398f39a45b0f64c3d4ebeaaebc8f8e50a80607", + "0xe19893353f9214eebf08e5d83c6d44c24bffe0eceee4dc2e840d42eab0642536", + "0x5b798e4bc099fa2e2b4b5b90335c51befc9bbab31b4dd02451b0abd09c06ee79", + "0xbab2cdec1553a408cac8e61d9e6e19fb8ccfb48efe6d02bd49467a26eeeca920", + "0x1c1a544c28c38e5c423fe701506693511b3bc5f2af9771b9b2243cd8d41bebfc", + "0x704d6549d99be8cdefeec9a58957f75a2be4af7bc3dc4655fa606e7f3e03b030", + "0x051330f43fe39b08ed7d82d68c49b36a8bfa31357b546bfb32068712df89d190", + "0xe69174c7b03896461cab2dfaab33d549e3aac15e6b0f6f6f466fb31dae709b9b", + "0xe5f668603e0ddbbcde585ac41c54c3c4a681fffb7a5deb205344de294758e6ac", + "0xca70d5e4c3a81c1f21f246a3f52c41eaef9a683f38eb7c512eac8b385f46cbcd", + "0x3173a6b882b21cd147f0fc60ef8f24bbc42104caed4f9b154f2d2eafc3a56907", + "0xc71469c192bf5cc36242f6365727f57a19f924618b8a908ef885d8f459833cc3", + "0x59c596fc388afd8508bd0f5a1e767f3dda9ed30f6646d15bc59f0b07c4de646f", + "0xb200faf29368581f551bd351d357b6fa8cbf90bdc73b37335e51cad36b4cba83", + "0x275cede69b67a9ee0fff1a762345261cb20fa8191470159cc65c7885cfb8313c", + "0x0ce4ef84916efbe1ba9a0589bed098793b1ea529758ea089fd79151cc9dc7494", + "0x0f08483bb720e766d60a3cbd902ce7c9d835d3f7fdf6dbe1f37bcf2f0d4764a2", + "0xb30a73e5db2464e6da47d10667c82926fa91fceb337d89a52db5169008bc6726", + "0x6b9c50fed1cc404bf2dd6fffbfd18e30a4caa1500bfeb080aa93f78d10331aaf", + "0xf17c84286df03ce175966f560600dd562e0f59f18f1d1276b4d8aca545d57856", + "0x11455f2ef96a6b2be69854431ee219806008eb80ea38c81e45b2e58b3f975a20", + "0x9a61e03e2157a5c403dfcde690f7b7d704dd56ea1716cf14cf7111075a8d6491", + "0x30312c910ce6b39e00dbaa669f0fb7823a51f20e83eaeb5afa63fb57668cc2f4", + "0x17c18d261d94fba82886853a4f262b9c8b915ed3263b0052ece5826fd7e7d906", + "0x2d8f6ea0f5b9d0e4bc1478161f5ed2ad3d8495938b414dcaec9548adbe572671", + "0x19954625f13d9bab758074bf6dee47484260d29ee118347c1701aaa74abd9848", + "0x842ef2ad456e6f53d75e91e8744b96398df80350cf7af90b145fea51fbbcf067", + "0x34a8b0a76ac20308aa5175710fb3e75c275b1ff25dba17c04e3a3e3c48ca222c", + "0x58efcbe75f32577afe5e9ff827624368b1559c32fcca0cf4fd704af8ce019c63", + "0x411b4d242ef8f14d92bd8b0b01cb4fa3ca6f29c6f9073cfdd3ce614fa717463b", + "0xf76dbda66ede5e789314a88cff87ecb4bd9ca418c75417d4d920e0d21a523257", + "0xd801821a0f87b4520c1b003fe4936b6852c410ee00b46fb0f81621c9ac6bf6b4", + "0x97ad11d6a29c8cf3c548c094c92f077014de3629d1e9053a25dbfaf7eb55f72d", + "0xa87012090cd19886d49521d564ab2ad0f18fd489599050c42213bb960c9ee8ff", + "0x8868d8a26e758d50913f2bf228da0444a206e52853bb42dd8f90f09abe9c859a", + "0xc257fb0cc9970e02830571bf062a14540556abad2a1a158f17a18f14b8bcbe95", + "0xfe611ce27238541b14dc174b652dd06719dfbcda846a027f9d1a9e8e9df2c065", + "0xc9b25ea410f420cc2d4fc6057801d180c6cab959bce56bf6120f555966e6de6d", + "0x95437f0524ec3c04d4132c83be7f1a603e6f4743a85ede25aa97a1a4e3f3f8fc", + "0x82a12910104065f35e983699c4b9187aed0ab0ec6146f91728901efecc7e2e20", + "0x6622dd11e09252004fb5aaa39e283333c0686065f228c48a5b55ee2060dbd139", + "0x89a2879f25733dab254e4fa6fddb4f04b8ddf018bf9ad5c162aea5c858e6faaa", + "0x8a71b62075a6011fd9b65d956108fa79cc9ebb8f194d64d3105a164e01cf43a6", + "0x103f4fe9ce211b6452181371f0dc4a30a557064b684645a4495136f4ebd0936a", + "0x97914adc5d7ce80147c2f44a6b29d0b495d38dedd8cc299064abcc62ed1ddabc", + "0x825c481da6c836a8696d7fda4b0563d204a9e7d9e4c47b46ded26db3e2d7d734", + "0xf8c0637ba4c0a383229f1d730db733bc11d6a4e33214216c23f69ec965dcaaad", + "0xaed3bdaf0cb12d37764d243ee0e8acdefc399be2cabbf1e51dc43454efd79cbd", + "0xe8427f56cc5cec8554e2f5f586b57adccbea97d5fc3ef7b8bbe97c2097cf848c", + "0xba4ad0abd5c14d526357fd0b6f8676ef6126aeb4a6d80cabe1f1281b9d28246c", + "0x4cff20b72e2ab5af3fafbf9222146949527c25f485ec032f22d94567ff91b22f", + "0x0d32925d89dd8fed989912afcbe830a4b5f8f7ae1a3e08ff1d3a575a77071d99", + "0xe51a1cbeae0be5d2fdbc7941aea904d3eade273f7477f60d5dd6a12807246030", + "0xfb8615046c969ef0fa5e6dc9628c8a9880e86a5dc2f6fc87aff216ea83fcf161", + "0x64dd705e105c88861470d112c64ca3d038f67660a02d3050ea36c34a9ebf47f9", + "0xb6ad148095c97528180f60fa7e8609bf5ce92bd562682092d79228c2e6f0750c", + "0x5bae0cd81f3bd0384ca3143a72068e6010b946462a73299e746ca639c026781c", + "0xc39a0fc7764fcfc0402b12fb0bbe78fe3633cbfb33c7f849279585a878a26d7c", + "0x2b752fda1c0c53d685cc91144f78d371db6b766725872b62cc99e1234cca8c1a", + "0x40ee6b9635d87c95a528757729212a261843ecb06d975de91352d43ca3c7f196", + "0x75e2005d3726cf8a4bb97ea5287849a361e3f8fdfadc3c1372feed1208c89f6b", + "0x0976f8ab556153964b58158678a5297da4d6ad92e284da46052a791ee667aee4", + "0xdbeef07841e41e0672771fb550a5b9233ae8e9256e23fa0d34d5ae5efe067ec8", + "0xa890f412ab6061c0c5ee661e80d4edc5c36b22fb79ac172ddd5ff26a7dbe9751", + "0xb666ae07f9276f6d0a33f9efeb3c5cfcba314fbc06e947563db92a40d7a341e8", + "0x83a082cf97ee78fbd7f31a01ae72e40c2e980a6dab756161544c27da86043528", + "0xfa726a919c6f8840c456dc77b0fec5adbed729e0efbb9317b75f77ed479c0f44", + "0xa8606800c54faeab2cbc9d85ff556c49dd7e1a0476027e0f7ce2c1dc2ba7ccbf", + "0x2796277836ab4c17a584c9f6c7778d10912cb19e541fb75453796841e1f6cd1c", + "0xf648b8b3c7be06f1f8d9cda13fd6d60f913e5048a8e0b283b110ca427eeb715f", + "0xa21d00b8fdcd77295d4064e00fbc30bed579d8255e9cf3a9016911d832390717", + "0xe741afcd98cbb3bb140737ed77bb968ac60d5c00022d722f9f04f56e97235dc9", + "0xbeecc9638fac39708ec16910e5b02c91f83f6321f6eb658cf8a96353cfb49806", + "0x912eee6cabeb0fed8d6e6ca0ba61977fd8e09ea0780ff8fbec995e2a85e08b52", + "0xc665bc0bb121a1229bc56ecc07a7e234fd24c523ea14700aa09e569b5f53ad33", + "0x39501621c2bdff2f62ab8d8e3fe47fe1701a98c665697c5b750ee1892f11846e", + "0x03d32e16c3a6c913daefb139f131e1e95a742b7be8e20ee39b785b4772a50e44", + "0x4f504eb46a82d440f1c952a06f143994bc66eb9e3ed865080cd9dfc6d652b69c", + "0xad753dc8710a46a70e19189d8fc7f4c773e4d9ccc7a70c354b574fe377328741", + "0xf7f5464a2d723b81502adb9133a0a4f0589b4134ca595a82e660987c6b011610", + "0x216b60b1c3e3bb4213ab5d43e04619d13e1ecedbdd65a1752bda326223e3ca3e", + "0x763664aa96d27b6e2ac7974e3ca9c9d2a702911bc5d550d246631965cf2bd4a2", + "0x292b5c8c8431b040c04d631f313d4e6b67b5fd3d4b8ac9f2edb09d13ec61f088", + "0x80db43c2b9e56eb540592f15f5900222faf3f75ce62e78189b5aa98c54568a5e", + "0x1b5fdf8969bcd4d65e86a2cefb3a673e18d587843f4f50db4e3ee77a0ba2ef1c", + "0x11e237953fff3e95e6572da50a92768467ffdfd0640d3384aa1c486357e7c24a", + "0x1fabd4faa8dba44808cc87d0bc389654a98496745578f3d17d134adc7f7b10f3", + "0x5eca4aa96f20a56197772ae6b600762154ca9d2702cab12664ea47cbff1a440c", + "0x0b4234f5bb02abcf3b5ce6c44ea85f55ec7db98fa5a7b90abef6dd0df034743c", + "0x316761e295bf350313c4c92efea591b522f1df4211ce94b22e601f30aefa51ef", + "0xe93a55ddb4d7dfe02598e8f909ff34b3de40a1c0ac8c7fba48cb604ea60631fb", + "0xe6e6c877b996857637f8a71d0cd9a6d47fdeb03752c8965766f010073332b087", + "0xa4f95c8874e611eddd2c4502e4e1196f0f1be90bfc37db35f8588e7d81d34aeb", + "0x9351710a5633714bb8b2d226e15ba4caa6f50f56c5508e5fa1239d5cc6a7e1aa", + "0x8d0aef52ec7266f37adb572913a6213b8448caaf0384008373dec525ae6cdff1", + "0x718e24c3970c85bcb14d2763201812c43abac0a7f16fc5787a7a7b2f37288586", + "0x3600ce44cebc3ee46b39734532128eaf715c0f3596b554f8478b961b0d6e389a", + "0x50dd1db7b0a5f6bd2d16252f43254d0f5d009e59f61ebc817c4bbf388519a46b", + "0x67861ed00f5fef446e1f4e671950ac2ddae1f3b564f1a6fe945e91678724ef03", + "0x0e332c26e169648bc20b4f430fbf8c26c6edf1a235f978d09d4a74c7b8754aad", + "0x6c9901015adf56e564dfb51d41a82bde43fb67273b6911c9ef7fa817555c9557", + "0x53c83391e5e0a024f68d5ade39b7a769f10664e12e4942c236398dd5dbce47a1", + "0x78619564f0b2399a9fcb229d938bf1e298d62b03b7a37fe6486034185d7f7d27", + "0x4625f15381a8723452ec80f3dd0293c213ae35de737c508f42427e1735398c3a", + "0x69542425ddb39d3d3981e76b41173eb1a09500f11164658a3536bf3e292f8b6a", + "0x82ac4f5bb40aece7d6706f1bdf4dfba5c835c09afba6446ef408d8ec6c09300f", + "0x740f9180671091b4c5b3ca59b9515bd0fc751f48e488a9f7f4b6848602490e21", + "0x9a04b08b4115986d8848e80960ad67490923154617cb82b3d88656ec1176c24c", + "0xf9ffe528eccffad519819d9eef70cef317af33899bcaee16f1e720caf9a98744", + "0x46da5e1a14b582b237f75556a0fd108c4ea0d55c0edd8f5d06c59a42e57410df", + "0x098f3429c8ccda60c3b5b9755e5632dd6a3f5297ee819bec8de2d8d37893968a", + "0x1a5b91af6025c11911ac072a98b8a44ed81f1f3c76ae752bd28004915db6f554", + "0x8bed50c7cae549ed4f8e05e02aa09b2a614c0af8eec719e4c6f7aee975ec3ec7", + "0xd86130f624b5dcc116f2dfbb5219b1afde4b7780780decd0b42694e15c1f8d8b", + "0x4167aa9bc0075f624d25d40eb29139dd2c452ebf17739fab859e14ac6765337a", + "0xa258ce5db20e91fb2ea30d607ac2f588bdc1924b21bbe39dc881e19889a7f5c6", + "0xe5ef8b5ab3cc8894452d16dc875b69a55fd925808ac7cafef1cd19485d0bb50a", + "0x120df2b3975d85b6dfca56bb98a82025ade5ac1d33e4319d2e0105b8de9ebf58", + "0xc964291dd2e0807a468396ebba3d59cfe385d949f6d6215976fc9a0a11de209a", + "0xf23f14cb709074b79abe166f159bc52b50de687464df6a5ebf112aa953c95ad5", + "0x622c092c9bd7e30f880043762e26d8e9c73ab7c0d0806f3c5e472a4152b35a93", + "0x8a5f090662731e7422bf651187fb89812419ab6808f2c62da213d6944fccfe9f", + "0xfbea3c0d92e061fd2399606f42647d65cc54191fa46d57b325103a75f5c22ba6", + "0x2babfbcc08d69b52c3747ddc8dcad4ea5511edabf24496f3ff96a1194d6f680e", + "0x4d3d019c28c779496b616d85aee201a3d79d9eecf35f728d00bcb12245ace703", + "0xe76fcee1f08325110436f8d4a95476251326b4827399f9b2ef7e12b7fb9c4ba1", + "0x4884d9c0bb4a9454ea37926591fc3eed2a28356e0506106a18f093035638da93", + "0x74c3f303d93d4cc4f0c1eb1b4378d34139220eb836628b82b649d1deb519b1d3", + "0xacb806670b278d3f0c84ba9c7a68c7df3b89e3451731a55d7351468c7c864c1c", + "0x8660fb8cd97e585ea7a41bccb22dd46e07eee8bbf34d90f0f0ca854b93b1ebee", + "0x2fc9c89cdca71a1c0224d469d0c364c96bbd99c1067a7ebe8ef412c645357a76", + "0x8ec6d5ab6ad7135d66091b8bf269be44c20af1d828694cd8650b5479156fd700", + "0x50ab4776e8cabe3d864fb7a1637de83f8fbb45d6e49645555ffe9526b27ebd66", + "0xbf39f5e17082983da4f409f91c7d9059acd02ccbefa69694aca475bb8d40b224", + "0x3135b3b981c850cc3fe9754ec6af117459d355ad6b0915beb61e84ea735c31bf", + "0xa7971dab52ce4bf45813223b0695f8e87f64b614c9c5499faac6f842e5c41be9", + "0x9e480f5617323ab104b4087ac4ef849a5da03427712fb302ac085507c77d8f37", + "0x57a6d474654d5e8d408159be39ad0e7026e6a4c6a6543e23a63d30610dc8dfc1", + "0x09eb3e01a5915a4e26d90b4c58bf0cf1e560fdc8ba53faed9d946ad3e9bc78fa", + "0x29c6d25da80a772310226b1b89d845c7916e4a4bc94d75aa330ec3eaa14b1e28", + "0x1a1ccfee11edeb989ca02e3cb89f062612a22a69ec816a625835d79370173987", + "0x1cb63dc541cf7f71c1c4e8cabd2619c3503c0ea1362dec75eccdf1e9efdbfcfc", + "0xac9dff32a69e75b396a2c250e206b36c34c63b955c9e5732e65eaf7ccca03c62", + "0x3e1b4f0c3ebd3d38cec389720147746774fc01ff6bdd065f0baf2906b16766a8", + "0x5cc8bed25574463026205e90aad828521f8e3d440970d7e810d1b46849681db5", + "0x255185d264509bd3a768bb0d50b568e66eb1fec96d573e33aaacc716d7c8fb93", + "0xe81b86ba631973918a859ff5995d7840b12511184c2865401f2693a71b9fa07e", + "0x61e67e42616598da8d36e865b282127c761380d3a56d26b8d35fbbc7641433c5", + "0x60c62ffef83fe603a34ca20b549522394e650dad5510ae68b6e074f0cd209a56", + "0x78577f2caf4a54f6065593535d76216f5f4075af7e7a98b79571d33b1822920c", + "0xfd4cb354f2869c8650200de0fe06f3d39e4dbebf19b0c1c2677da916ea84f44d", + "0x453769cef6ff9ba2d5c917982a1ad3e2f7e947d9ea228857556af0005665e0b0", + "0xe567f93f8f88bf1a6b33214f17f5d60c5dbbb531b4ab21b8c0b799b6416891e0", + "0x7e65a39a17f902a30ceb2469fe21cba8d4e0da9740fcefd5c647c81ff1ae95fa", + "0x03e4a7eea0cd6fc02b987138ef88e8795b5f839636ca07f6665bbae9e5878931", + "0xc3558e2b437cf0347cabc63c95fa2710d3f43c65d380feb998511903f9f4dcf0", + "0xe3a615f80882fb5dfbd08c1d7a8b0a4d3b651d5e8221f99b879cb01d97037a9c", + "0xb56db4a5fea85cbffaee41f05304689ea321c40d4c108b1146fa69118431d9b2", + "0xab28e1f077f18117945910c235bc9c6f9b6d2b45e9ef03009053006c637e3e26", + "0xefcabc1d5659fd6e48430dbfcc9fb4e08e8a9b895f7bf9b3d6c7661bfc44ada2", + "0xc7547496f212873e7c3631dafaca62a6e95ac39272acf25a7394bac6ea1ae357", + "0xc482013cb01bd69e0ea9f447b611b06623352e321469f4adc739e3ee189298eb", + "0x5942f42e91e391bb44bb2c4d40da1906164dbb6d1c184f00fa62899baa0dba2c", + "0xb4bcb46c80ad4cd603aff2c1baf8f2c896a628a46cc5786f0e58dae846694677", + "0xd0a7305b995fa8c317c330118fee4bfef9f65f70b54558c0988945b08e90ff08", + "0x687f801b7f32fdfa7d50274cc7b126efedbdae8de154d36395d33967216f3086", + "0xeb19ec10ac6c15ffa619fa46792971ee22a9328fa53bd69a10ed6e9617dd1bbf", + "0xa2bb3f0367f62abdb3a9fa6da34b20697cf214a4ff14fd42826da140ee025213", + "0x070a76511f32c882374400af59b22d88974a06fbc10d786dd07ca7527ebd8b90", + "0x8f195689537b446e946b376ec1e9eb5af5b4542ab47be550a5700fa5d81440d5", + "0x10cc09778699fc8ac109e7e6773f83391eeba2a6db5226fbe953dd8d99126ca5", + "0x8cc839cb7dc84fd3b8c0c7ca637e86a2f72a8715cc16c7afb597d12da717530b", + "0xa32504e6cc6fd0ee441440f213f082fcf76f72d36b5e2a0f3b6bdd50cdd825a2", + "0x8f45151db8878e51eec12c450b69fa92176af21a4543bb78c0d4c27286e74469", + "0x23f5c465bd35bcd4353216dc9505df68324a27990df9825a242e1288e40a13bb", + "0x35f409ce748af33c20a6ae693b8a48ba4623de9686f9834e22be4410e637d24f", + "0xb962e5845c1db624532562597a99e2acc5e434b97d8db0725bdeddd71a98e737", + "0x0f8364f99f43dd52b4cfa9e426c48f7b6ab18dc40a896e96a09eceebb3363afe", + "0xa842746868da7644fccdbb07ae5e08c71a6287ab307c4f9717eadb414c9c99f4", + "0xa59064c6b7fe7d2407792d99ed1218d2dc2f240185fbd8f767997438241b92e9", + "0xb6ea0d58e8d48e05b9ff4d75b2ebe0bd9752c0e2691882f754be66cdec7628d3", + "0xf16b78c9d14c52b2b5156690b6ce37a5e09661f49674ad22604c7d3755e564d1", + "0xbfa8ef74e8a37cd64b8b4a4260c4fc162140603f9c2494b9cf4c1e13de522ed9", + "0xf4b89f1776ebf30640dc5ec99e43de22136b6ef936a85193ef940931108e408a", + "0xefb9a4555d495a584dbcc2a50938f6b9827eb014ffae2d2d0aae356a57894de8", + "0x0627a466d42a26aca72cf531d4722e0e5fc5d491f4527786be4e1b641e693ac2", + "0x7d10d21542de3d8f074dbfd1a6e11b3df32c36272891aae54053029d39ebae10", + "0x0f21118ee9763f46cc175a21de876da233b2b3b62c6f06fa2df73f6deccf37f3", + "0x143213b96f8519c15164742e2350cc66e814c9570634e871a8c1ddae4d31b6b5", + "0x8d2877120abae3854e00ae8cf5c8c95b3ede10590ab79ce2be7127239507e18d", + "0xaccd0005d59472ac04192c059ed9c10aea42c4dabec9e581f6cb10b261746573", + "0x67bc8dd5422f39e741b9995e6e60686e75d6620aa0d745b84191f5dba9b5bb18", + "0x11b8e95f6a654d4373cefbbac29a90fdd8ae098043d1969b9fa7885318376b34", + "0x431a0b8a6f08760c942eeff5791e7088fd210f877825ce4dcabe365e03e4a65c", + "0x704007f11bae513f428c9b0d23593fd2809d0dbc4c331009856135dafec23ce4", + "0xc06dee39a33a05e30c522061c1d9272381bde3f9e42fa9bd7d5a5c8ef11ec6ec", + "0x66b4157baaae85db0948ad72882287a80b286df2c40080b8da4d5d3db0a61bd2", + "0xef1983b1906239b490baaaa8e4527f78a57a0a767d731f062dd09efb59ae8e3d", + "0xf26d0d5c520cce6688ca5d51dee285af26f150794f2ea9f1d73f6df213d78338", + "0x8b28838382e6892f59c42a7709d6d38396495d3af5a8d5b0a60f172a6a8940bd", + "0x261a605fa5f2a9bdc7cffac530edcf976e7ea7af4e443b625fe01ed39dad44b6", + ], + compressed_lamport_pk: + "0xdd635d27d1d52b9a49df9e5c0c622360a4dd17cba7db4e89bce3cb048fb721a5", + child_sk: + "20397789859736650942317412262472558107875392172444076792671091975210932703118", } } } diff --git a/crypto/eth2_key_derivation/tests/eip2333_vectors.rs b/crypto/eth2_key_derivation/tests/eip2333_vectors.rs index 6995bd087b4..e4406ab1f7e 100644 --- a/crypto/eth2_key_derivation/tests/eip2333_vectors.rs +++ b/crypto/eth2_key_derivation/tests/eip2333_vectors.rs @@ -65,9 +65,9 @@ fn assert_vector_passes(raw: RawTestVector) { fn eip2333_test_case_0() { assert_vector_passes(RawTestVector { seed: "0xc55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04", - master_sk: "12513733877922233913083619867448865075222526338446857121953625441395088009793", + master_sk: "6083874454709270928345386274498605044986640685124978867557563392430687146096", child_index: 0, - child_sk: "7419543105316279183937430842449358701327973165530407166294956473095303972104" + child_sk: "20397789859736650942317412262472558107875392172444076792671091975210932703118", }) } @@ -75,9 +75,9 @@ fn eip2333_test_case_0() { fn eip2333_test_case_1() { assert_vector_passes(RawTestVector { seed: "0x3141592653589793238462643383279502884197169399375105820974944592", - master_sk: "46029459550803682895343812821003080589696405386150182061394330539196052371668", + master_sk: "29757020647961307431480504535336562678282505419141012933316116377660817309383", child_index: 3141592653, - child_sk: "43469287647733616183478983885105537266268532274998688773496918571876759327260", + child_sk: "25457201688850691947727629385191704516744796114925897962676248250929345014287", }) } @@ -85,9 +85,9 @@ fn eip2333_test_case_1() { fn eip2333_test_case_2() { assert_vector_passes(RawTestVector { seed: "0x0099FF991111002299DD7744EE3355BBDD8844115566CC55663355668888CC00", - master_sk: "45379166311535261329029945990467475187325618028073620882733843918126031931161", + master_sk: "27580842291869792442942448775674722299803720648445448686099262467207037398656", child_index: 4294967295, - child_sk: "46475244006136701976831062271444482037125148379128114617927607151318277762946", + child_sk: "29358610794459428860402234341874281240803786294062035874021252734817515685787", }) } @@ -95,8 +95,8 @@ fn eip2333_test_case_2() { fn eip2333_test_case_3() { assert_vector_passes(RawTestVector { seed: "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3", - master_sk: "31740500954810567003972734830331791822878290325762596213711963944729383643688", + master_sk: "19022158461524446591288038168518313374041767046816487870552872741050760015818", child_index: 42, - child_sk: "51041472511529980987749393477251359993058329222191894694692317000136653813011", + child_sk: "31372231650479070279774297061823572166496564838472787488249775572789064611981", }) } From c8fb9d912fdd7b95c38dc16f1b0cd536bbfe25ad Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 24 Sep 2020 04:06:02 +0000 Subject: [PATCH 02/33] Fix validator lockfiles (#1586) ## Issue Addressed - Resolves #1313 ## Proposed Changes Changes the way we start the validator client and beacon node to ensure that we cleanly drop the validator keystores (which therefore ensures we cleanup their lockfiles). Previously we were holding the validator keystores in a tokio task that was being forcefully killed (i.e., without `Drop`). Now, we hold them in a task that can gracefully handle a shutdown. Also, switches the `--strict-lockfiles` flag to `--delete-lockfiles`. This means two things: 1. We are now strict on lockfiles by default (before we weren't). 1. There's a simple way for people delete the lockfiles if they experience a crash. ## Additional Info I've only given the option to ignore *and* delete lockfiles, not just ignore them. I can't see a strong need for ignore-only but could easily add it, if the need arises. I've flagged this as `api-breaking` since users that have lockfiles lingering around will be required to supply `--delete-lockfiles` next time they run. --- beacon_node/src/lib.rs | 5 +- lighthouse/src/main.rs | 98 +++++++------- validator_client/src/cli.rs | 12 +- validator_client/src/config.rs | 8 +- .../src/initialized_validators.rs | 51 ++++--- validator_client/src/lib.rs | 126 ++++++++++-------- 6 files changed, 169 insertions(+), 131 deletions(-) diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 19931916013..a09f8c6cd32 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -7,7 +7,7 @@ mod config; pub use beacon_chain; pub use cli::cli_app; pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; -pub use config::{get_data_dir, get_eth2_testnet_config, set_network_config}; +pub use config::{get_config, get_data_dir, get_eth2_testnet_config, set_network_config}; pub use eth2_config::Eth2Config; use beacon_chain::events::TeeEventHandler; @@ -17,7 +17,6 @@ use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, slot_clock::SystemTimeSlotClock, }; use clap::ArgMatches; -use config::get_config; use environment::RuntimeContext; use slog::{info, warn}; use std::ops::{Deref, DerefMut}; @@ -54,7 +53,7 @@ impl ProductionBeaconNode { /// configurations hosted remotely. pub async fn new_from_cli( context: RuntimeContext, - matches: &ArgMatches<'_>, + matches: ArgMatches<'static>, ) -> Result { let client_config = get_config::( &matches, diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 2df5c35396c..c174992e0e6 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -255,61 +255,63 @@ fn run( "name" => testnet_name ); - let beacon_node = if let Some(sub_matches) = matches.subcommand_matches("beacon_node") { - let runtime_context = environment.core_context(); - - let beacon = environment - .runtime() - .block_on(ProductionBeaconNode::new_from_cli( - runtime_context, - sub_matches, - )) - .map_err(|e| format!("Failed to start beacon node: {}", e))?; - - Some(beacon) - } else { - None - }; - - let validator_client = if let Some(sub_matches) = matches.subcommand_matches("validator_client") - { - let runtime_context = environment.core_context(); - - let mut validator = environment - .runtime() - .block_on(ProductionValidatorClient::new_from_cli( - runtime_context, - sub_matches, - )) - .map_err(|e| format!("Failed to init validator client: {}", e))?; - - environment - .core_context() - .executor - .runtime_handle() - .enter(|| { - validator - .start_service() - .map_err(|e| format!("Failed to start validator client service: {}", e)) - })?; - - Some(validator) - } else { - None + match matches.subcommand() { + ("beacon_node", Some(matches)) => { + let context = environment.core_context(); + let log = context.log().clone(); + let executor = context.executor.clone(); + let config = beacon_node::get_config::( + matches, + &context.eth2_config.spec_constants, + &context.eth2_config().spec, + context.log().clone(), + )?; + environment.runtime().spawn(async move { + if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await { + crit!(log, "Failed to start beacon node"; "reason" => e); + // Ignore the error since it always occurs during normal operation when + // shutting down. + let _ = executor + .shutdown_sender() + .try_send("Failed to start beacon node"); + } + }) + } + ("validator_client", Some(matches)) => { + let context = environment.core_context(); + let log = context.log().clone(); + let executor = context.executor.clone(); + let config = validator_client::Config::from_cli(&matches) + .map_err(|e| format!("Unable to initialize validator config: {}", e))?; + environment.runtime().spawn(async move { + let run = async { + ProductionValidatorClient::new(context, config) + .await? + .start_service()?; + + Ok::<(), String>(()) + }; + if let Err(e) = run.await { + crit!(log, "Failed to start validator client"; "reason" => e); + // Ignore the error since it always occurs during normal operation when + // shutting down. + let _ = executor + .shutdown_sender() + .try_send("Failed to start validator client"); + } + }) + } + _ => { + crit!(log, "No subcommand supplied. See --help ."); + return Err("No subcommand supplied.".into()); + } }; - if beacon_node.is_none() && validator_client.is_none() { - crit!(log, "No subcommand supplied. See --help ."); - return Err("No subcommand supplied.".into()); - } - // Block this thread until we get a ctrl-c or a task sends a shutdown signal. environment.block_until_shutdown_requested()?; info!(log, "Shutting down.."); environment.fire_signal(); - drop(beacon_node); - drop(validator_client); // Shutdown the environment once all tasks have completed. environment.shutdown_on_idle(); diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index ed320c24cde..7ac483439ce 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -37,11 +37,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { nodes using the same key. Automatically enabled unless `--strict` is specified", )) .arg( - Arg::with_name("strict-lockfiles") - .long("strict-lockfiles") + Arg::with_name("delete-lockfiles") + .long("delete-lockfiles") .help( - "If present, do not load validators that are guarded by a lockfile. Note: for \ - Eth2 mainnet, this flag will likely be removed and its behaviour will become default." + "If present, ignore and delete any keystore lockfiles encountered during start up. \ + This is useful if the validator client did not exit gracefully on the last run. \ + WARNING: lockfiles help prevent users from accidentally running the same validator \ + using two different validator clients, an action that likely leads to slashing. \ + Ensure you are certain that there are no other validator client instances running \ + that might also be using the same keystores." ) ) .arg( diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 482c4ed7007..4a11c5aecdc 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -24,8 +24,8 @@ pub struct Config { /// If true, the validator client will still poll for duties and produce blocks even if the /// beacon node is not synced at startup. pub allow_unsynced_beacon_node: bool, - /// If true, refuse to unlock a keypair that is guarded by a lockfile. - pub strict_lockfiles: bool, + /// If true, delete any validator keystore lockfiles that would prevent starting. + pub delete_lockfiles: bool, /// If true, don't scan the validators dir for new keystores. pub disable_auto_discover: bool, /// Graffiti to be inserted everytime we create a block. @@ -46,7 +46,7 @@ impl Default for Config { secrets_dir, http_server: DEFAULT_HTTP_SERVER.to_string(), allow_unsynced_beacon_node: false, - strict_lockfiles: false, + delete_lockfiles: false, disable_auto_discover: false, graffiti: None, } @@ -77,7 +77,7 @@ impl Config { } config.allow_unsynced_beacon_node = cli_args.is_present("allow-unsynced"); - config.strict_lockfiles = cli_args.is_present("strict-lockfiles"); + config.delete_lockfiles = cli_args.is_present("delete-lockfiles"); config.disable_auto_discover = cli_args.is_present("disable-auto-discover"); if let Some(secrets_dir) = parse_optional(cli_args, "secrets-dir")? { diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 436dcb4bae3..400768f5cb4 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -54,6 +54,10 @@ pub enum Error { PasswordUnknown(PathBuf), /// There was an error reading from stdin. UnableToReadPasswordFromUser(String), + /// There was an error running a tokio async task. + TokioJoin(tokio::task::JoinError), + /// There was a filesystem error when deleting a lockfile. + UnableToDeleteLockfile(io::Error), } /// A method used by a validator to sign messages. @@ -86,7 +90,7 @@ impl InitializedValidator { /// If the validator is unable to be initialized for whatever reason. pub fn from_definition( def: ValidatorDefinition, - strict_lockfiles: bool, + delete_lockfiles: bool, log: &Logger, ) -> Result { if !def.enabled { @@ -150,16 +154,17 @@ impl InitializedValidator { })?; if voting_keystore_lockfile_path.exists() { - if strict_lockfiles { - return Err(Error::LockfileExists(voting_keystore_lockfile_path)); - } else { - // If **not** respecting lockfiles, just raise a warning if the voting - // keypair cannot be unlocked. + if delete_lockfiles { warn!( log, - "Ignoring validator lockfile"; + "Deleting validator lockfile"; "file" => format!("{:?}", voting_keystore_lockfile_path) ); + + fs::remove_file(&voting_keystore_lockfile_path) + .map_err(Error::UnableToDeleteLockfile)?; + } else { + return Err(Error::LockfileExists(voting_keystore_lockfile_path)); } } else { // Create a new lockfile. @@ -279,7 +284,7 @@ pub struct InitializedValidators { impl InitializedValidators { /// Instantiates `Self`, initializing all validators in `definitions`. - pub fn from_definitions( + pub async fn from_definitions( definitions: ValidatorDefinitions, validators_dir: PathBuf, strict_lockfiles: bool, @@ -292,7 +297,7 @@ impl InitializedValidators { validators: HashMap::default(), log, }; - this.update_validators()?; + this.update_validators().await?; Ok(this) } @@ -328,7 +333,7 @@ impl InitializedValidators { /// validator will be removed from `self.validators`. /// /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. - pub fn set_validator_status( + pub async fn set_validator_status( &mut self, voting_public_key: &PublicKey, enabled: bool, @@ -342,7 +347,7 @@ impl InitializedValidators { def.enabled = enabled; } - self.update_validators()?; + self.update_validators().await?; self.definitions .save(&self.validators_dir) @@ -362,7 +367,7 @@ impl InitializedValidators { /// A validator is considered "already known" and skipped if the public key is already known. /// I.e., if there are two different definitions with the same public key then the second will /// be ignored. - fn update_validators(&mut self) -> Result<(), Error> { + async fn update_validators(&mut self) -> Result<(), Error> { for def in self.definitions.as_slice() { if def.enabled { match &def.signing_definition { @@ -371,11 +376,23 @@ impl InitializedValidators { continue; } - match InitializedValidator::from_definition( - def.clone(), - self.strict_lockfiles, - &self.log, - ) { + // Decoding a local keystore can take several seconds, therefore it's best + // to keep if off the core executor. This also has the fortunate effect of + // interrupting the potentially long-running task during shut down. + let inner_def = def.clone(); + let strict_lockfiles = self.strict_lockfiles; + let inner_log = self.log.clone(); + let result = tokio::task::spawn_blocking(move || { + InitializedValidator::from_definition( + inner_def, + strict_lockfiles, + &inner_log, + ) + }) + .await + .map_err(Error::TokioJoin)?; + + match result { Ok(init) => { self.validators .insert(init.voting_public_key().clone(), init); diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 220d82a66ae..6b709023faf 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -18,6 +18,7 @@ use block_service::{BlockService, BlockServiceBuilder}; use clap::ArgMatches; use duties_service::{DutiesService, DutiesServiceBuilder}; use environment::RuntimeContext; +use eth2_config::Eth2Config; use fork_service::{ForkService, ForkServiceBuilder}; use futures::channel::mpsc; use initialized_validators::InitializedValidators; @@ -28,7 +29,7 @@ use slot_clock::SlotClock; use slot_clock::SystemTimeSlotClock; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::time::{delay_for, Duration}; -use types::EthSpec; +use types::{EthSpec, Hash256}; use validator_store::ValidatorStore; /// The interval between attempts to contact the beacon node during startup. @@ -90,9 +91,10 @@ impl ProductionValidatorClient { let validators = InitializedValidators::from_definitions( validator_defs, config.data_dir.clone(), - config.strict_lockfiles, + config.delete_lockfiles, log.clone(), ) + .await .map_err(|e| format!("Unable to initialize validators: {:?}", e))?; info!( @@ -106,56 +108,11 @@ impl ProductionValidatorClient { RemoteBeaconNode::new_with_timeout(config.http_server.clone(), HTTP_TIMEOUT) .map_err(|e| format!("Unable to init beacon node http client: {}", e))?; - // TODO: check if all logs in wait_for_node are produed while awaiting - let beacon_node = wait_for_node(beacon_node, &log).await?; - let eth2_config = beacon_node - .http - .spec() - .get_eth2_config() - .await - .map_err(|e| format!("Unable to read eth2 config from beacon node: {:?}", e))?; - let genesis_time = beacon_node - .http - .beacon() - .get_genesis_time() - .await - .map_err(|e| format!("Unable to read genesis time from beacon node: {:?}", e))?; - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to read system time: {:?}", e))?; - let genesis = Duration::from_secs(genesis_time); - - // If the time now is less than (prior to) genesis, then delay until the - // genesis instant. - // - // If the validator client starts before genesis, it will get errors from - // the slot clock. - if now < genesis { - info!( - log, - "Starting node prior to genesis"; - "seconds_to_wait" => (genesis - now).as_secs() - ); - - delay_for(genesis - now).await - } else { - info!( - log, - "Genesis has already occurred"; - "seconds_ago" => (now - genesis).as_secs() - ); - } - let genesis_validators_root = beacon_node - .http - .beacon() - .get_genesis_validators_root() - .await - .map_err(|e| { - format!( - "Unable to read genesis validators root from beacon node: {:?}", - e - ) - })?; + // Perform some potentially long-running initialization tasks. + let (eth2_config, genesis_time, genesis_validators_root) = tokio::select! { + tuple = init_from_beacon_node(&beacon_node, &context) => tuple?, + () = context.executor.exit() => return Err("Shutting down".to_string()) + }; // Do not permit a connection to a beacon node using different spec constants. if context.eth2_config.spec_constants != eth2_config.spec_constants { @@ -270,12 +227,71 @@ impl ProductionValidatorClient { } } +async fn init_from_beacon_node( + beacon_node: &RemoteBeaconNode, + context: &RuntimeContext, +) -> Result<(Eth2Config, u64, Hash256), String> { + // Wait for the beacon node to come online. + wait_for_node(beacon_node, context.log()).await?; + + let eth2_config = beacon_node + .http + .spec() + .get_eth2_config() + .await + .map_err(|e| format!("Unable to read eth2 config from beacon node: {:?}", e))?; + let genesis_time = beacon_node + .http + .beacon() + .get_genesis_time() + .await + .map_err(|e| format!("Unable to read genesis time from beacon node: {:?}", e))?; + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to read system time: {:?}", e))?; + let genesis = Duration::from_secs(genesis_time); + + // If the time now is less than (prior to) genesis, then delay until the + // genesis instant. + // + // If the validator client starts before genesis, it will get errors from + // the slot clock. + if now < genesis { + info!( + context.log(), + "Starting node prior to genesis"; + "seconds_to_wait" => (genesis - now).as_secs() + ); + + delay_for(genesis - now).await; + } else { + info!( + context.log(), + "Genesis has already occurred"; + "seconds_ago" => (now - genesis).as_secs() + ); + } + let genesis_validators_root = beacon_node + .http + .beacon() + .get_genesis_validators_root() + .await + .map_err(|e| { + format!( + "Unable to read genesis validators root from beacon node: {:?}", + e + ) + })?; + + Ok((eth2_config, genesis_time, genesis_validators_root)) +} + /// Request the version from the node, looping back and trying again on failure. Exit once the node /// has been contacted. async fn wait_for_node( - beacon_node: RemoteBeaconNode, + beacon_node: &RemoteBeaconNode, log: &Logger, -) -> Result, String> { +) -> Result<(), String> { // Try to get the version string from the node, looping until success is returned. loop { let log = log.clone(); @@ -295,7 +311,7 @@ async fn wait_for_node( "version" => version, ); - return Ok(beacon_node); + return Ok(()); } Err(e) => { error!( From 137966d7b84ed2374818449624de54cb0e4382a3 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 29 Sep 2020 00:02:44 +0000 Subject: [PATCH 03/33] Directory restructure (#1532) Closes #1487 Closes #1427 Directory restructure in accordance with #1487. Also has temporary migration code to move the old directories into new structure. Also extracts all default directory names and utility functions into a `directory` crate to avoid repetitio. ~Since `validator_definition.yaml` stores absolute paths, users will have to manually change the keystore paths or delete the file to get the validators picked up by the vc.~. `validator_definition.yaml` is migrated as well from the default directories. Co-authored-by: realbigsean Co-authored-by: Paul Hauner --- Cargo.lock | 18 ++++- Cargo.toml | 1 + account_manager/Cargo.toml | 1 + account_manager/src/common.rs | 24 +----- account_manager/src/lib.rs | 2 +- account_manager/src/validator/create.rs | 52 +++++++------ account_manager/src/validator/deposit.rs | 18 +---- account_manager/src/validator/import.rs | 20 +---- account_manager/src/validator/list.rs | 27 ++----- account_manager/src/validator/mod.rs | 37 +++++++--- account_manager/src/validator/recover.rs | 36 +++------ account_manager/src/wallet/create.rs | 12 +-- account_manager/src/wallet/list.rs | 8 +- account_manager/src/wallet/mod.rs | 36 +++++---- beacon_node/Cargo.toml | 1 + beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/config.rs | 5 +- beacon_node/eth2_libp2p/Cargo.toml | 1 + beacon_node/eth2_libp2p/src/config.rs | 14 +++- beacon_node/src/config.rs | 21 ++++-- book/src/key-management.md | 14 ++-- book/src/validator-create.md | 10 ++- book/src/validator-management.md | 6 +- common/directory/Cargo.toml | 13 ++++ common/directory/src/lib.rs | 60 +++++++++++++++ consensus/types/Cargo.toml | 1 - .../builders/testing_beacon_state_builder.rs | 12 --- lcli/Cargo.toml | 1 + lcli/src/eth1_genesis.rs | 2 +- lcli/src/interop_genesis.rs | 2 +- lcli/src/new_testnet.rs | 2 +- lighthouse/Cargo.toml | 3 +- lighthouse/src/main.rs | 6 +- lighthouse/tests/account_manager.rs | 28 +++---- testing/node_test_rig/src/lib.rs | 8 +- validator_client/Cargo.toml | 1 + validator_client/src/cli.rs | 29 +++++++- validator_client/src/config.rs | 73 +++++++++++++------ validator_client/src/lib.rs | 10 +-- validator_client/src/validator_store.rs | 16 +++- 40 files changed, 367 insertions(+), 265 deletions(-) create mode 100644 common/directory/Cargo.toml create mode 100644 common/directory/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 6049d2e7f49..73c7d707a7a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9,6 +9,7 @@ dependencies = [ "clap", "clap_utils", "deposit_contract", + "directory", "dirs", "environment", "eth2_keystore", @@ -377,6 +378,7 @@ dependencies = [ "clap_utils", "client", "ctrlc", + "directory", "dirs", "environment", "eth2_config", @@ -758,6 +760,7 @@ version = "0.2.0" dependencies = [ "beacon_chain", "bus", + "directory", "dirs", "environment", "error-chain", @@ -1216,6 +1219,16 @@ dependencies = [ "generic-array 0.14.4", ] +[[package]] +name = "directory" +version = "0.1.0" +dependencies = [ + "clap", + "clap_utils", + "dirs", + "eth2_testnet_config", +] + [[package]] name = "dirs" version = "2.0.2" @@ -1522,6 +1535,7 @@ name = "eth2_libp2p" version = "0.2.0" dependencies = [ "base64 0.12.3", + "directory", "dirs", "discv5", "environment", @@ -2567,6 +2581,7 @@ dependencies = [ "clap", "clap_utils", "deposit_contract", + "directory", "dirs", "environment", "eth2_keystore", @@ -2929,6 +2944,7 @@ dependencies = [ "boot_node", "clap", "clap_utils", + "directory", "env_logger", "environment", "eth2_testnet_config", @@ -5829,7 +5845,6 @@ dependencies = [ "compare_fields_derive", "criterion", "derivative", - "dirs", "eth2_hashing", "eth2_interop_keypairs", "eth2_ssz", @@ -6022,6 +6037,7 @@ dependencies = [ "clap", "clap_utils", "deposit_contract", + "directory", "dirs", "environment", "eth2_config", diff --git a/Cargo.toml b/Cargo.toml index 92fb5bccf39..82922f5a5f6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,6 +20,7 @@ members = [ "common/compare_fields", "common/compare_fields_derive", "common/deposit_contract", + "common/directory", "common/eth2_config", "common/eth2_interop_keypairs", "common/eth2_testnet_config", diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 9a533aea28d..7127a2ddfc4 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -24,6 +24,7 @@ eth2_testnet_config = { path = "../common/eth2_testnet_config" } web3 = "0.11.0" futures = { version = "0.3.5", features = ["compat"] } clap_utils = { path = "../common/clap_utils" } +directory = { path = "../common/directory" } eth2_wallet = { path = "../crypto/eth2_wallet" } eth2_wallet_manager = { path = "../common/eth2_wallet_manager" } rand = "0.7.2" diff --git a/account_manager/src/common.rs b/account_manager/src/common.rs index 030092036b4..2b9c93fb1dc 100644 --- a/account_manager/src/common.rs +++ b/account_manager/src/common.rs @@ -1,10 +1,8 @@ use account_utils::PlainText; use account_utils::{read_input_from_user, strip_off_newlines}; -use clap::ArgMatches; use eth2_wallet::bip39::{Language, Mnemonic}; use std::fs; -use std::fs::create_dir_all; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::str::from_utf8; use std::thread::sleep; use std::time::Duration; @@ -12,26 +10,6 @@ use std::time::Duration; pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:"; pub const WALLET_NAME_PROMPT: &str = "Enter wallet name:"; -pub fn ensure_dir_exists>(path: P) -> Result<(), String> { - let path = path.as_ref(); - - if !path.exists() { - create_dir_all(path).map_err(|e| format!("Unable to create {:?}: {:?}", path, e))?; - } - - Ok(()) -} - -pub fn base_wallet_dir(matches: &ArgMatches, arg: &'static str) -> Result { - clap_utils::parse_path_with_default_in_home_dir( - matches, - arg, - PathBuf::new().join(".lighthouse").join("wallets"), - ) -} - -/// Reads in a mnemonic from the user. If the file path is provided, read from it. Otherwise, read -/// from an interactive prompt using tty, unless the `--stdin-inputs` flag is provided. pub fn read_mnemonic_from_cli( mnemonic_path: Option, stdin_inputs: bool, diff --git a/account_manager/src/lib.rs b/account_manager/src/lib.rs index 5300693dcff..8297567781d 100644 --- a/account_manager/src/lib.rs +++ b/account_manager/src/lib.rs @@ -10,7 +10,7 @@ use types::EthSpec; pub const CMD: &str = "account_manager"; pub const SECRETS_DIR_FLAG: &str = "secrets-dir"; pub const VALIDATOR_DIR_FLAG: &str = "validator-dir"; -pub const BASE_DIR_FLAG: &str = "base-dir"; +pub const WALLETS_DIR_FLAG: &str = "wallets-dir"; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new(CMD) diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index 9489429786c..0d4566e4610 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -1,10 +1,13 @@ use crate::common::read_wallet_name_from_cli; use crate::wallet::create::STDIN_INPUTS_FLAG; -use crate::{common::ensure_dir_exists, SECRETS_DIR_FLAG, VALIDATOR_DIR_FLAG}; +use crate::{SECRETS_DIR_FLAG, WALLETS_DIR_FLAG}; use account_utils::{ random_password, read_password_from_user, strip_off_newlines, validator_definitions, PlainText, }; use clap::{App, Arg, ArgMatches}; +use directory::{ + ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_SECRET_DIR, DEFAULT_WALLET_DIR, +}; use environment::Environment; use eth2_wallet_manager::WalletManager; use std::ffi::OsStr; @@ -14,7 +17,6 @@ use types::EthSpec; use validator_dir::Builder as ValidatorDirBuilder; pub const CMD: &str = "create"; -pub const BASE_DIR_FLAG: &str = "base-dir"; pub const WALLET_NAME_FLAG: &str = "wallet-name"; pub const WALLET_PASSWORD_FLAG: &str = "wallet-password"; pub const DEPOSIT_GWEI_FLAG: &str = "deposit-gwei"; @@ -44,14 +46,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true), ) .arg( - Arg::with_name(VALIDATOR_DIR_FLAG) - .long(VALIDATOR_DIR_FLAG) - .value_name("VALIDATOR_DIRECTORY") - .help( - "The path where the validator directories will be created. \ - Defaults to ~/.lighthouse/validators", - ) - .takes_value(true), + Arg::with_name(WALLETS_DIR_FLAG) + .long(WALLETS_DIR_FLAG) + .value_name(WALLETS_DIR_FLAG) + .help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{testnet}/wallets") + .takes_value(true) + .conflicts_with("datadir"), ) .arg( Arg::with_name(SECRETS_DIR_FLAG) @@ -59,8 +59,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("SECRETS_DIR") .help( "The path where the validator keystore passwords will be stored. \ - Defaults to ~/.lighthouse/secrets", + Defaults to ~/.lighthouse/{testnet}/secrets", ) + .conflicts_with("datadir") .takes_value(true), ) .arg( @@ -111,23 +112,25 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { pub fn cli_run( matches: &ArgMatches, mut env: Environment, - wallet_base_dir: PathBuf, + validator_dir: PathBuf, ) -> Result<(), String> { let spec = env.core_context().eth2_config.spec; let name: Option = clap_utils::parse_optional(matches, WALLET_NAME_FLAG)?; let stdin_inputs = matches.is_present(STDIN_INPUTS_FLAG); + let wallet_base_dir = if matches.value_of("datadir").is_some() { + let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; + path.join(DEFAULT_WALLET_DIR) + } else { + parse_path_or_default_with_flag(matches, WALLETS_DIR_FLAG, DEFAULT_WALLET_DIR)? + }; + let secrets_dir = if matches.value_of("datadir").is_some() { + let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; + path.join(DEFAULT_SECRET_DIR) + } else { + parse_path_or_default_with_flag(matches, SECRETS_DIR_FLAG, DEFAULT_SECRET_DIR)? + }; - let validator_dir = clap_utils::parse_path_with_default_in_home_dir( - matches, - VALIDATOR_DIR_FLAG, - PathBuf::new().join(".lighthouse").join("validators"), - )?; - let secrets_dir = clap_utils::parse_path_with_default_in_home_dir( - matches, - SECRETS_DIR_FLAG, - PathBuf::new().join(".lighthouse").join("secrets"), - )?; let deposit_gwei = clap_utils::parse_optional(matches, DEPOSIT_GWEI_FLAG)? .unwrap_or_else(|| spec.max_effective_balance); let count: Option = clap_utils::parse_optional(matches, COUNT_FLAG)?; @@ -136,6 +139,9 @@ pub fn cli_run( ensure_dir_exists(&validator_dir)?; ensure_dir_exists(&secrets_dir)?; + eprintln!("secrets-dir path {:?}", secrets_dir); + eprintln!("wallets-dir path {:?}", wallet_base_dir); + let starting_validator_count = existing_validator_count(&validator_dir)?; let n = match (count, at_most) { @@ -166,7 +172,7 @@ pub fn cli_run( let wallet_password = read_wallet_password_from_cli(wallet_password_path, stdin_inputs)?; let mgr = WalletManager::open(&wallet_base_dir) - .map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?; + .map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?; let mut wallet = mgr .wallet_by_name(&wallet_name) diff --git a/account_manager/src/validator/deposit.rs b/account_manager/src/validator/deposit.rs index 0e508cfd2ed..233e7634e4a 100644 --- a/account_manager/src/validator/deposit.rs +++ b/account_manager/src/validator/deposit.rs @@ -46,16 +46,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { The deposit contract address will be determined by the --testnet-dir flag on the \ primary Lighthouse binary.", ) - .arg( - Arg::with_name(VALIDATOR_DIR_FLAG) - .long(VALIDATOR_DIR_FLAG) - .value_name("VALIDATOR_DIRECTORY") - .help( - "The path to the validator client data directory. \ - Defaults to ~/.lighthouse/validators", - ) - .takes_value(true), - ) .arg( Arg::with_name(VALIDATOR_FLAG) .long(VALIDATOR_FLAG) @@ -209,14 +199,10 @@ where pub fn cli_run( matches: &ArgMatches<'_>, mut env: Environment, + validator_dir: PathBuf, ) -> Result<(), String> { let log = env.core_context().log().clone(); - let data_dir = clap_utils::parse_path_with_default_in_home_dir( - matches, - VALIDATOR_DIR_FLAG, - PathBuf::new().join(".lighthouse").join("validators"), - )?; let validator: String = clap_utils::parse_required(matches, VALIDATOR_FLAG)?; let eth1_ipc_path: Option = clap_utils::parse_optional(matches, ETH1_IPC_FLAG)?; let eth1_http_url: Option = clap_utils::parse_optional(matches, ETH1_HTTP_FLAG)?; @@ -225,7 +211,7 @@ pub fn cli_run( let confirmation_batch_size: usize = clap_utils::parse_required(matches, CONFIRMATION_BATCH_SIZE_FLAG)?; - let manager = ValidatorManager::open(&data_dir) + let manager = ValidatorManager::open(&validator_dir) .map_err(|e| format!("Unable to read --{}: {:?}", VALIDATOR_DIR_FLAG, e))?; let validators = match validator.as_ref() { diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 5216b3d9c0d..1998709d283 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -1,5 +1,4 @@ use crate::wallet::create::STDIN_INPUTS_FLAG; -use crate::{common::ensure_dir_exists, VALIDATOR_DIR_FLAG}; use account_utils::{ eth2_keystore::Keystore, read_password_from_user, @@ -55,16 +54,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .required_unless(KEYSTORE_FLAG) .takes_value(true), ) - .arg( - Arg::with_name(VALIDATOR_DIR_FLAG) - .long(VALIDATOR_DIR_FLAG) - .value_name("VALIDATOR_DIRECTORY") - .help( - "The path where the validator directories will be created. \ - Defaults to ~/.lighthouse/validators", - ) - .takes_value(true), - ) .arg( Arg::with_name(STDIN_INPUTS_FLAG) .long(STDIN_INPUTS_FLAG) @@ -77,19 +66,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) } -pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { +pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> { let keystore: Option = clap_utils::parse_optional(matches, KEYSTORE_FLAG)?; let keystores_dir: Option = clap_utils::parse_optional(matches, DIR_FLAG)?; - let validator_dir = clap_utils::parse_path_with_default_in_home_dir( - matches, - VALIDATOR_DIR_FLAG, - PathBuf::new().join(".lighthouse").join("validators"), - )?; let stdin_inputs = matches.is_present(STDIN_INPUTS_FLAG); let reuse_password = matches.is_present(REUSE_PASSWORD_FLAG); - ensure_dir_exists(&validator_dir)?; - let mut defs = ValidatorDefinitions::open_or_create(&validator_dir) .map_err(|e| format!("Unable to open {}: {:?}", CONFIG_FILENAME, e))?; diff --git a/account_manager/src/validator/list.rs b/account_manager/src/validator/list.rs index 1485643039f..dd97de156da 100644 --- a/account_manager/src/validator/list.rs +++ b/account_manager/src/validator/list.rs @@ -1,38 +1,21 @@ use crate::VALIDATOR_DIR_FLAG; -use clap::{App, Arg, ArgMatches}; +use clap::App; use std::path::PathBuf; use validator_dir::Manager as ValidatorManager; pub const CMD: &str = "list"; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) - .arg( - Arg::with_name(VALIDATOR_DIR_FLAG) - .long(VALIDATOR_DIR_FLAG) - .value_name("VALIDATOR_DIRECTORY") - .help( - "The path to search for validator directories. \ - Defaults to ~/.lighthouse/validators", - ) - .takes_value(true), - ) - .about("Lists the names of all validators.") + App::new(CMD).about("Lists the names of all validators.") } -pub fn cli_run(matches: &ArgMatches<'_>) -> Result<(), String> { - let data_dir = clap_utils::parse_path_with_default_in_home_dir( - matches, - VALIDATOR_DIR_FLAG, - PathBuf::new().join(".lighthouse").join("validators"), - )?; - - let mgr = ValidatorManager::open(&data_dir) +pub fn cli_run(validator_dir: PathBuf) -> Result<(), String> { + let mgr = ValidatorManager::open(&validator_dir) .map_err(|e| format!("Unable to read --{}: {:?}", VALIDATOR_DIR_FLAG, e))?; for (name, _path) in mgr .directory_names() - .map_err(|e| format!("Unable to list wallets: {:?}", e))? + .map_err(|e| format!("Unable to list validators: {:?}", e))? { println!("{}", name) } diff --git a/account_manager/src/validator/mod.rs b/account_manager/src/validator/mod.rs index 84ad6df3937..4c650dad087 100644 --- a/account_manager/src/validator/mod.rs +++ b/account_manager/src/validator/mod.rs @@ -4,9 +4,11 @@ pub mod import; pub mod list; pub mod recover; -use crate::common::base_wallet_dir; +use crate::VALIDATOR_DIR_FLAG; use clap::{App, Arg, ArgMatches}; +use directory::{parse_path_or_default_with_flag, DEFAULT_VALIDATOR_DIR}; use environment::Environment; +use std::path::PathBuf; use types::EthSpec; pub const CMD: &str = "validator"; @@ -15,11 +17,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new(CMD) .about("Provides commands for managing Eth2 validators.") .arg( - Arg::with_name("base-dir") - .long("base-dir") - .value_name("BASE_DIRECTORY") - .help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/wallets") - .takes_value(true), + Arg::with_name(VALIDATOR_DIR_FLAG) + .long(VALIDATOR_DIR_FLAG) + .value_name("VALIDATOR_DIRECTORY") + .help( + "The path to search for validator directories. \ + Defaults to ~/.lighthouse/{testnet}/validators", + ) + .takes_value(true) + .global(true) + .conflicts_with("datadir"), ) .subcommand(create::cli_app()) .subcommand(deposit::cli_app()) @@ -29,14 +36,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { } pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result<(), String> { - let base_wallet_dir = base_wallet_dir(matches, "base-dir")?; + let validator_base_dir = if matches.value_of("datadir").is_some() { + let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; + path.join(DEFAULT_VALIDATOR_DIR) + } else { + parse_path_or_default_with_flag(matches, VALIDATOR_DIR_FLAG, DEFAULT_VALIDATOR_DIR)? + }; + eprintln!("validator-dir path: {:?}", validator_base_dir); match matches.subcommand() { - (create::CMD, Some(matches)) => create::cli_run::(matches, env, base_wallet_dir), - (deposit::CMD, Some(matches)) => deposit::cli_run::(matches, env), - (import::CMD, Some(matches)) => import::cli_run(matches), - (list::CMD, Some(matches)) => list::cli_run(matches), - (recover::CMD, Some(matches)) => recover::cli_run(matches), + (create::CMD, Some(matches)) => create::cli_run::(matches, env, validator_base_dir), + (deposit::CMD, Some(matches)) => deposit::cli_run::(matches, env, validator_base_dir), + (import::CMD, Some(matches)) => import::cli_run(matches, validator_base_dir), + (list::CMD, Some(_)) => list::cli_run(validator_base_dir), + (recover::CMD, Some(matches)) => recover::cli_run(matches, validator_base_dir), (unknown, _) => Err(format!( "{} does not have a {} command. See --help", CMD, unknown diff --git a/account_manager/src/validator/recover.rs b/account_manager/src/validator/recover.rs index 376c21645ae..e3844d50028 100644 --- a/account_manager/src/validator/recover.rs +++ b/account_manager/src/validator/recover.rs @@ -1,11 +1,13 @@ use super::create::STORE_WITHDRAW_FLAG; -use crate::common::{ensure_dir_exists, read_mnemonic_from_cli}; +use crate::common::read_mnemonic_from_cli; use crate::validator::create::COUNT_FLAG; use crate::wallet::create::STDIN_INPUTS_FLAG; -use crate::{SECRETS_DIR_FLAG, VALIDATOR_DIR_FLAG}; +use crate::SECRETS_DIR_FLAG; use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; use account_utils::random_password; use clap::{App, Arg, ArgMatches}; +use directory::ensure_dir_exists; +use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR}; use eth2_wallet::bip39::Seed; use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType, ValidatorKeystores}; use std::path::PathBuf; @@ -48,23 +50,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .takes_value(true) ) - .arg( - Arg::with_name(VALIDATOR_DIR_FLAG) - .long(VALIDATOR_DIR_FLAG) - .value_name("VALIDATOR_DIRECTORY") - .help( - "The path where the validator directories will be created. \ - Defaults to ~/.lighthouse/validators", - ) - .takes_value(true), - ) .arg( Arg::with_name(SECRETS_DIR_FLAG) .long(SECRETS_DIR_FLAG) .value_name("SECRETS_DIR") .help( "The path where the validator keystore passwords will be stored. \ - Defaults to ~/.lighthouse/secrets", + Defaults to ~/.lighthouse/{testnet}/secrets", ) .takes_value(true), ) @@ -84,17 +76,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) } -pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { - let validator_dir = clap_utils::parse_path_with_default_in_home_dir( - matches, - VALIDATOR_DIR_FLAG, - PathBuf::new().join(".lighthouse").join("validators"), - )?; - let secrets_dir = clap_utils::parse_path_with_default_in_home_dir( - matches, - SECRETS_DIR_FLAG, - PathBuf::new().join(".lighthouse").join("secrets"), - )?; +pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> { + let secrets_dir = if matches.value_of("datadir").is_some() { + let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; + path.join(DEFAULT_SECRET_DIR) + } else { + parse_path_or_default_with_flag(matches, SECRETS_DIR_FLAG, DEFAULT_SECRET_DIR)? + }; let first_index: u32 = clap_utils::parse_required(matches, FIRST_INDEX_FLAG)?; let count: u32 = clap_utils::parse_required(matches, COUNT_FLAG)?; let mnemonic_path: Option = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?; diff --git a/account_manager/src/wallet/create.rs b/account_manager/src/wallet/create.rs index 04d141b48b4..a769cc019c1 100644 --- a/account_manager/src/wallet/create.rs +++ b/account_manager/src/wallet/create.rs @@ -1,5 +1,5 @@ use crate::common::read_wallet_name_from_cli; -use crate::BASE_DIR_FLAG; +use crate::WALLETS_DIR_FLAG; use account_utils::{ is_password_sufficiently_complex, random_password, read_password_from_user, strip_off_newlines, }; @@ -80,7 +80,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) } -pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> { +pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), String> { let mnemonic_output_path: Option = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?; // Create a new random mnemonic. @@ -88,7 +88,7 @@ pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> { // The `tiny-bip39` crate uses `thread_rng()` for this entropy. let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); - let wallet = create_wallet_from_mnemonic(matches, &base_dir.as_path(), &mnemonic)?; + let wallet = create_wallet_from_mnemonic(matches, &wallet_base_dir.as_path(), &mnemonic)?; if let Some(path) = mnemonic_output_path { create_with_600_perms(&path, mnemonic.phrase().as_bytes()) @@ -121,7 +121,7 @@ pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> { pub fn create_wallet_from_mnemonic( matches: &ArgMatches, - base_dir: &Path, + wallet_base_dir: &Path, mnemonic: &Mnemonic, ) -> Result { let name: Option = clap_utils::parse_optional(matches, NAME_FLAG)?; @@ -134,8 +134,8 @@ pub fn create_wallet_from_mnemonic( unknown => return Err(format!("--{} {} is not supported", TYPE_FLAG, unknown)), }; - let mgr = WalletManager::open(&base_dir) - .map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?; + let mgr = WalletManager::open(&wallet_base_dir) + .map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?; let wallet_password: PlainText = match wallet_password_path { Some(path) => { diff --git a/account_manager/src/wallet/list.rs b/account_manager/src/wallet/list.rs index 85096dc5ff4..5b671b1dcec 100644 --- a/account_manager/src/wallet/list.rs +++ b/account_manager/src/wallet/list.rs @@ -1,4 +1,4 @@ -use crate::BASE_DIR_FLAG; +use crate::WALLETS_DIR_FLAG; use clap::App; use eth2_wallet_manager::WalletManager; use std::path::PathBuf; @@ -9,9 +9,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new(CMD).about("Lists the names of all wallets.") } -pub fn cli_run(base_dir: PathBuf) -> Result<(), String> { - let mgr = WalletManager::open(&base_dir) - .map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?; +pub fn cli_run(wallet_base_dir: PathBuf) -> Result<(), String> { + let mgr = WalletManager::open(&wallet_base_dir) + .map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?; for (name, _uuid) in mgr .wallets() diff --git a/account_manager/src/wallet/mod.rs b/account_manager/src/wallet/mod.rs index e8315b77a3d..d745cbcd2ce 100644 --- a/account_manager/src/wallet/mod.rs +++ b/account_manager/src/wallet/mod.rs @@ -2,11 +2,10 @@ pub mod create; pub mod list; pub mod recover; -use crate::{ - common::{base_wallet_dir, ensure_dir_exists}, - BASE_DIR_FLAG, -}; +use crate::WALLETS_DIR_FLAG; use clap::{App, Arg, ArgMatches}; +use directory::{ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_WALLET_DIR}; +use std::path::PathBuf; pub const CMD: &str = "wallet"; @@ -14,11 +13,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new(CMD) .about("Manage wallets, from which validator keys can be derived.") .arg( - Arg::with_name(BASE_DIR_FLAG) - .long(BASE_DIR_FLAG) - .value_name("BASE_DIRECTORY") - .help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/wallets") - .takes_value(true), + Arg::with_name(WALLETS_DIR_FLAG) + .long(WALLETS_DIR_FLAG) + .value_name("WALLETS_DIRECTORY") + .help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{testnet}/wallets") + .takes_value(true) + .global(true) + .conflicts_with("datadir"), ) .subcommand(create::cli_app()) .subcommand(list::cli_app()) @@ -26,13 +27,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { } pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { - let base_dir = base_wallet_dir(matches, BASE_DIR_FLAG)?; - ensure_dir_exists(&base_dir)?; + let wallet_base_dir = if matches.value_of("datadir").is_some() { + let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; + path.join(DEFAULT_WALLET_DIR) + } else { + parse_path_or_default_with_flag(matches, WALLETS_DIR_FLAG, DEFAULT_WALLET_DIR)? + }; + ensure_dir_exists(&wallet_base_dir)?; + + eprintln!("wallet-dir path: {:?}", wallet_base_dir); match matches.subcommand() { - (create::CMD, Some(matches)) => create::cli_run(matches, base_dir), - (list::CMD, Some(_)) => list::cli_run(base_dir), - (recover::CMD, Some(matches)) => recover::cli_run(matches, base_dir), + (create::CMD, Some(matches)) => create::cli_run(matches, wallet_base_dir), + (list::CMD, Some(_)) => list::cli_run(wallet_base_dir), + (recover::CMD, Some(matches)) => recover::cli_run(matches, wallet_base_dir), (unknown, _) => Err(format!( "{} does not have a {} command. See --help", CMD, unknown diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 0351b1cb4b3..deb965af187 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -30,6 +30,7 @@ tokio = { version = "0.2.21", features = ["time"] } exit-future = "0.2.0" dirs = "2.0.2" logging = { path = "../common/logging" } +directory = {path = "../common/directory"} futures = "0.3.5" environment = { path = "../lighthouse/environment" } genesis = { path = "genesis" } diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index de6f7e59d76..ba98eb946d4 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -41,3 +41,4 @@ lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } time = "0.2.16" bus = "2.2.3" +directory = {path = "../../common/directory"} diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 19088e785b5..fdcd3d6e819 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,11 +1,10 @@ +use directory::DEFAULT_ROOT_DIR; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; use types::Graffiti; -pub const DEFAULT_DATADIR: &str = ".lighthouse"; - /// The number initial validators when starting the `Minimal`. const TESTNET_SPEC_CONSTANTS: &str = "minimal"; @@ -72,7 +71,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { Self { - data_dir: PathBuf::from(DEFAULT_DATADIR), + data_dir: PathBuf::from(DEFAULT_ROOT_DIR), db_name: "chain_db".to_string(), freezer_db_path: None, log_file: PathBuf::from(""), diff --git a/beacon_node/eth2_libp2p/Cargo.toml b/beacon_node/eth2_libp2p/Cargo.toml index 2df5123b55b..de916f8fa61 100644 --- a/beacon_node/eth2_libp2p/Cargo.toml +++ b/beacon_node/eth2_libp2p/Cargo.toml @@ -36,6 +36,7 @@ discv5 = { version = "0.1.0-alpha.12", features = ["libp2p"] } tiny-keccak = "2.0.2" environment = { path = "../../lighthouse/environment" } rand = "0.7.3" +directory = { path = "../../common/directory" } regex = "1.3.9" [dependencies.libp2p] diff --git a/beacon_node/eth2_libp2p/src/config.rs b/beacon_node/eth2_libp2p/src/config.rs index 73094642d77..11bb0d36271 100644 --- a/beacon_node/eth2_libp2p/src/config.rs +++ b/beacon_node/eth2_libp2p/src/config.rs @@ -1,5 +1,8 @@ use crate::types::GossipKind; use crate::{Enr, PeerIdSerialized}; +use directory::{ + DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_TESTNET, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR, +}; use discv5::{Discv5Config, Discv5ConfigBuilder}; use libp2p::gossipsub::{ GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId, ValidationMode, @@ -74,9 +77,14 @@ pub struct Config { impl Default for Config { /// Generate a default network configuration. fn default() -> Self { - let mut network_dir = dirs::home_dir().unwrap_or_else(|| PathBuf::from(".")); - network_dir.push(".lighthouse"); - network_dir.push("network"); + // WARNING: this directory default should be always overrided with parameters + // from cli for specific networks. + let network_dir = dirs::home_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join(DEFAULT_ROOT_DIR) + .join(DEFAULT_HARDCODED_TESTNET) + .join(DEFAULT_BEACON_NODE_DIR) + .join(DEFAULT_NETWORK_DIR); // The function used to generate a gossipsub message id // We use the first 8 bytes of SHA256(data) for content addressing diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 42b3b8277da..aabdbb35ca4 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,7 +1,8 @@ use beacon_chain::builder::PUBKEY_CACHE_FILENAME; use clap::ArgMatches; use clap_utils::BAD_TESTNET_DIR_MESSAGE; -use client::{config::DEFAULT_DATADIR, ClientConfig, ClientGenesis}; +use client::{ClientConfig, ClientGenesis}; +use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use eth2_libp2p::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use eth2_testnet_config::Eth2TestnetConfig; use slog::{crit, info, warn, Logger}; @@ -13,9 +14,6 @@ use std::net::{TcpListener, UdpSocket}; use std::path::PathBuf; use types::{ChainSpec, EthSpec, GRAFFITI_BYTES_LEN}; -pub const BEACON_NODE_DIR: &str = "beacon"; -pub const NETWORK_DIR: &str = "network"; - /// Gets the fully-initialized global client. /// /// The top-level `clap` arguments should be provided as `cli_args`. @@ -295,7 +293,7 @@ pub fn set_network_config( if let Some(dir) = cli_args.value_of("network-dir") { config.network_dir = PathBuf::from(dir); } else { - config.network_dir = data_dir.join(NETWORK_DIR); + config.network_dir = data_dir.join(DEFAULT_NETWORK_DIR); }; if let Some(listen_address_str) = cli_args.value_of("listen-address") { @@ -456,11 +454,18 @@ pub fn get_data_dir(cli_args: &ArgMatches) -> PathBuf { // Read the `--datadir` flag. // // If it's not present, try and find the home directory (`~`) and push the default data - // directory onto it. + // directory and the testnet name onto it. + cli_args .value_of("datadir") - .map(|path| PathBuf::from(path).join(BEACON_NODE_DIR)) - .or_else(|| dirs::home_dir().map(|home| home.join(DEFAULT_DATADIR).join(BEACON_NODE_DIR))) + .map(|path| PathBuf::from(path).join(DEFAULT_BEACON_NODE_DIR)) + .or_else(|| { + dirs::home_dir().map(|home| { + home.join(DEFAULT_ROOT_DIR) + .join(directory::get_testnet_name(cli_args)) + .join(DEFAULT_BEACON_NODE_DIR) + }) + }) .unwrap_or_else(|| PathBuf::from(".")) } diff --git a/book/src/key-management.md b/book/src/key-management.md index 53edec221d9..4b03bec0ec2 100644 --- a/book/src/key-management.md +++ b/book/src/key-management.md @@ -40,12 +40,12 @@ keypairs. Creating a single validator looks like this: - `lighthouse account validator create --wallet-name wally --wallet-password wally.pass --count 1` -In step (1), we created a wallet in `~/.lighthouse/wallets` with the name +In step (1), we created a wallet in `~/.lighthouse/{testnet}/wallets` with the name `wally`. We encrypted this using a pre-defined password in the `wally.pass` file. Then, in step (2), we created one new validator in the -`~/.lighthouse/validators` directory using `wally` (unlocking it with +`~/.lighthouse/{testnet}/validators` directory using `wally` (unlocking it with `wally.pass`) and storing the passwords to the validators voting key in -`~/.lighthouse/secrets`. +`~/.lighthouse/{testnet}/secrets`. Thanks to the hierarchical key derivation scheme, we can delete all of the aforementioned directories and then regenerate them as long as we remembered @@ -63,14 +63,16 @@ There are three important directories in Lighthouse validator key management: - `wallets/`: contains encrypted wallets which are used for hierarchical key derivation. - - Defaults to `~/.lighthouse/wallets` + - Defaults to `~/.lighthouse/{testnet}/wallets` - `validators/`: contains a directory for each validator containing encrypted keystores and other validator-specific data. - - Defaults to `~/.lighthouse/validators` + - Defaults to `~/.lighthouse/{testnet}/validators` - `secrets/`: since the validator signing keys are "hot", the validator process needs access to the passwords to decrypt the keystores in the validators dir. These passwords are stored here. - - Defaults to `~/.lighthouse/secrets` + - Defaults to `~/.lighthouse/{testnet}/secrets` + +where `testnet` is the name of the testnet passed in the `--testnet` parameter (default is `medalla`). When the validator client boots, it searches the `validators/` for directories containing voting keystores. When it discovers a keystore, it searches the diff --git a/book/src/validator-create.md b/book/src/validator-create.md index 25112e74872..9d73cdf802e 100644 --- a/book/src/validator-create.md +++ b/book/src/validator-create.md @@ -41,7 +41,7 @@ OPTIONS: The GWEI value of the deposit amount. Defaults to the minimum amount required for an active validator (MAX_EFFECTIVE_BALANCE) --secrets-dir - The path where the validator keystore passwords will be stored. Defaults to ~/.lighthouse/secrets + The path where the validator keystore passwords will be stored. Defaults to ~/.lighthouse/{testnet}/secrets -s, --spec Specifies the default eth2 spec type. [default: mainnet] [possible values: mainnet, minimal, interop] @@ -53,7 +53,7 @@ OPTIONS: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. --validator-dir <VALIDATOR_DIRECTORY> - The path where the validator directories will be created. Defaults to ~/.lighthouse/validators + The path where the validator directories will be created. Defaults to ~/.lighthouse/{testnet}/validators --wallet-name <WALLET_NAME> Use the wallet identified by this name --wallet-password <WALLET_PASSWORD_PATH> @@ -73,10 +73,12 @@ This command will: - Derive a single new BLS keypair from `wally`, updating it so that it generates a new key next time. -- Create a new directory in `~/.lighthouse/validators` containing: +- Create a new directory in `~/.lighthouse/{testnet}/validators` containing: - An encrypted keystore containing the validators voting keypair. - An `eth1_deposit_data.rlp` assuming the default deposit amount (`32 ETH` for most testnets and mainnet) which can be submitted to the deposit contract for the medalla testnet. Other testnets can be set via the `--testnet` CLI param. -- Store a password to the validators voting keypair in `~/.lighthouse/secrets`. +- Store a password to the validators voting keypair in `~/.lighthouse/{testnet}/secrets`. + +where `testnet` is the name of the testnet passed in the `--testnet` parameter (default is `medalla`). \ No newline at end of file diff --git a/book/src/validator-management.md b/book/src/validator-management.md index fbb76c9b4e9..df0e7243d0a 100644 --- a/book/src/validator-management.md +++ b/book/src/validator-management.md @@ -16,7 +16,7 @@ useful. ## Introducing the `validator_definitions.yml` file The `validator_definitions.yml` file is located in the `validator-dir`, which -defaults to `~/.lighthouse/validators`. It is a +defaults to `~/.lighthouse/{testnet}/validators`. It is a [YAML](https://en.wikipedia.org/wiki/YAML) encoded file defining exactly which validators the validator client will (and won't) act for. @@ -92,7 +92,7 @@ name identical to the `voting_public_key` value. Lets assume the following directory structure: ``` -~/.lighthouse/validators +~/.lighthouse/{testnet}/validators ├── john │   └── voting-keystore.json ├── sally @@ -135,7 +135,7 @@ In order for the validator client to decrypt the validators, they will need to ensure their `secrets-dir` is organised as below: ``` -~/.lighthouse/secrets +~/.lighthouse/{testnet}/secrets ├── 0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477 ├── 0xaa440c566fcf34dedf233baf56cf5fb05bb420d9663b4208272545608c27c13d5b08174518c758ecd814f158f2b4a337 └── 0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007 diff --git a/common/directory/Cargo.toml b/common/directory/Cargo.toml new file mode 100644 index 00000000000..ebea5f3dc37 --- /dev/null +++ b/common/directory/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "directory" +version = "0.1.0" +authors = ["pawan <pawandhananjay@gmail.com>"] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +clap = "2.33.0" +clap_utils = {path = "../clap_utils"} +dirs = "2.0.2" +eth2_testnet_config = { path = "../eth2_testnet_config" } diff --git a/common/directory/src/lib.rs b/common/directory/src/lib.rs new file mode 100644 index 00000000000..765fdabd621 --- /dev/null +++ b/common/directory/src/lib.rs @@ -0,0 +1,60 @@ +use clap::ArgMatches; +pub use eth2_testnet_config::DEFAULT_HARDCODED_TESTNET; +use std::fs::create_dir_all; +use std::path::{Path, PathBuf}; + +/// Names for the default directories. +pub const DEFAULT_ROOT_DIR: &str = ".lighthouse"; +pub const DEFAULT_BEACON_NODE_DIR: &str = "beacon"; +pub const DEFAULT_NETWORK_DIR: &str = "network"; +pub const DEFAULT_VALIDATOR_DIR: &str = "validators"; +pub const DEFAULT_SECRET_DIR: &str = "secrets"; +pub const DEFAULT_WALLET_DIR: &str = "wallets"; + +/// Base directory name for unnamed testnets passed through the --testnet-dir flag +pub const CUSTOM_TESTNET_DIR: &str = "custom"; + +/// Gets the testnet directory name +/// +/// Tries to get the name first from the "testnet" flag, +/// if not present, then checks the "testnet-dir" flag and returns a custom name +/// If neither flags are present, returns the default hardcoded network name. +pub fn get_testnet_name(matches: &ArgMatches) -> String { + if let Some(testnet_name) = matches.value_of("testnet") { + testnet_name.to_string() + } else if matches.value_of("testnet-dir").is_some() { + CUSTOM_TESTNET_DIR.to_string() + } else { + eth2_testnet_config::DEFAULT_HARDCODED_TESTNET.to_string() + } +} + +/// Checks if a directory exists in the given path and creates a directory if it does not exist. +pub fn ensure_dir_exists<P: AsRef<Path>>(path: P) -> Result<(), String> { + let path = path.as_ref(); + + if !path.exists() { + create_dir_all(path).map_err(|e| format!("Unable to create {:?}: {:?}", path, e))?; + } + + Ok(()) +} + +/// If `arg` is in `matches`, parses the value as a path. +/// +/// Otherwise, attempts to find the default directory for the `testnet` from the `matches` +/// and appends `flag` to it. +pub fn parse_path_or_default_with_flag( + matches: &ArgMatches, + arg: &'static str, + flag: &str, +) -> Result<PathBuf, String> { + clap_utils::parse_path_with_default_in_home_dir( + matches, + arg, + PathBuf::new() + .join(DEFAULT_ROOT_DIR) + .join(get_testnet_name(matches)) + .join(flag), + ) +} diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 8f6fed4b4ae..80b4007b973 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -12,7 +12,6 @@ harness = false bls = { path = "../../crypto/bls" } compare_fields = { path = "../../common/compare_fields" } compare_fields_derive = { path = "../../common/compare_fields_derive" } -dirs = "2.0.2" eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } ethereum-types = "0.9.1" eth2_hashing = "0.1.0" diff --git a/consensus/types/src/test_utils/builders/testing_beacon_state_builder.rs b/consensus/types/src/test_utils/builders/testing_beacon_state_builder.rs index 67a3dae2665..922d4017fea 100644 --- a/consensus/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/consensus/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -4,21 +4,9 @@ use crate::*; use bls::get_withdrawal_credentials; use log::debug; use rayon::prelude::*; -use std::path::PathBuf; pub const KEYPAIRS_FILE: &str = "keypairs.raw_keypairs"; -/// Returns the directory where the generated keypairs should be stored. -/// -/// It is either `$HOME/.lighthouse/keypairs.raw_keypairs` or, if `$HOME` is not available, -/// `./keypairs.raw_keypairs`. -pub fn keypairs_path() -> PathBuf { - let dir = dirs::home_dir() - .map(|home| (home.join(".lighthouse"))) - .unwrap_or_else(|| PathBuf::from("")); - dir.join(KEYPAIRS_FILE) -} - /// Builds a beacon state to be used for testing purposes. /// /// This struct should **never be used for production purposes.** diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index c2c2c09b89e..8872922bb6e 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -35,3 +35,4 @@ validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] rand = "0.7.2" eth2_keystore = { path = "../crypto/eth2_keystore" } lighthouse_version = { path = "../common/lighthouse_version" } +directory = { path = "../common/directory" } diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index 2c6f7d8cffc..9fd0757d81d 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -20,7 +20,7 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches<'_>) -> Res .and_then(|dir| dir.parse::<PathBuf>().map_err(|_| ())) .unwrap_or_else(|_| { dirs::home_dir() - .map(|home| home.join(".lighthouse").join("testnet")) + .map(|home| home.join(directory::DEFAULT_ROOT_DIR).join("testnet")) .expect("should locate home directory") }); diff --git a/lcli/src/interop_genesis.rs b/lcli/src/interop_genesis.rs index 9c8609b5c3d..28cd2625b07 100644 --- a/lcli/src/interop_genesis.rs +++ b/lcli/src/interop_genesis.rs @@ -31,7 +31,7 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches) -> Result< .and_then(|dir| dir.parse::<PathBuf>().map_err(|_| ())) .unwrap_or_else(|_| { dirs::home_dir() - .map(|home| home.join(".lighthouse").join("testnet")) + .map(|home| home.join(directory::DEFAULT_ROOT_DIR).join("testnet")) .expect("should locate home directory") }); diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 918426e74d3..fc60e8c98db 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -10,7 +10,7 @@ pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> { let testnet_dir_path = parse_path_with_default_in_home_dir( matches, "testnet-dir", - PathBuf::from(".lighthouse/testnet"), + PathBuf::from(directory::DEFAULT_ROOT_DIR).join("testnet"), )?; let deposit_contract_address: Address = parse_required(matches, "deposit-contract-address")?; let deposit_contract_deploy_block = parse_required(matches, "deposit-contract-deploy-block")?; diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 3bc232d9fa4..1daf5f97c9b 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -31,9 +31,10 @@ validator_client = { "path" = "../validator_client" } account_manager = { "path" = "../account_manager" } clap_utils = { path = "../common/clap_utils" } eth2_testnet_config = { path = "../common/eth2_testnet_config" } +directory = { path = "../common/directory" } lighthouse_version = { path = "../common/lighthouse_version" } +account_utils = { path = "../common/account_utils" } [dev-dependencies] tempfile = "3.1.0" validator_dir = { path = "../common/validator_dir" } -account_utils = { path = "../common/account_utils" } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index c174992e0e6..9d13706a1c1 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -10,7 +10,6 @@ use std::process::exit; use types::EthSpec; use validator_client::ProductionValidatorClient; -pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; fn bls_library_name() -> &'static str { @@ -91,7 +90,10 @@ fn main() { .short("d") .value_name("DIR") .global(true) - .help("Data directory for lighthouse keys and databases.") + .help( + "Root data directory for lighthouse keys and databases. \ + Defaults to $HOME/.lighthouse/{default-testnet}, \ + currently, $HOME/.lighthouse/medalla") .takes_value(true), ) .arg( diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index f5c47303414..30f885b4e8b 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -11,7 +11,7 @@ use account_manager::{ list::CMD as LIST_CMD, CMD as WALLET_CMD, }, - BASE_DIR_FLAG, CMD as ACCOUNT_CMD, *, + CMD as ACCOUNT_CMD, WALLETS_DIR_FLAG, *, }; use account_utils::{ eth2_keystore::KeystoreBuilder, @@ -73,7 +73,7 @@ fn dir_child_count<P: AsRef<Path>>(dir: P) -> usize { fn list_wallets<P: AsRef<Path>>(base_dir: P) -> Vec<String> { let output = output_result( wallet_cmd() - .arg(format!("--{}", BASE_DIR_FLAG)) + .arg(format!("--{}", WALLETS_DIR_FLAG)) .arg(base_dir.as_ref().as_os_str()) .arg(LIST_CMD), ) @@ -97,7 +97,7 @@ fn create_wallet<P: AsRef<Path>>( ) -> Result<Output, String> { output_result( wallet_cmd() - .arg(format!("--{}", BASE_DIR_FLAG)) + .arg(format!("--{}", WALLETS_DIR_FLAG)) .arg(base_dir.as_ref().as_os_str()) .arg(CREATE_CMD) .arg(format!("--{}", NAME_FLAG)) @@ -233,15 +233,15 @@ impl TestValidator { store_withdrawal_key: bool, ) -> Result<Vec<String>, String> { let mut cmd = validator_cmd(); - cmd.arg(format!("--{}", BASE_DIR_FLAG)) - .arg(self.wallet.base_dir().into_os_string()) + cmd.arg(format!("--{}", VALIDATOR_DIR_FLAG)) + .arg(self.validator_dir.clone().into_os_string()) .arg(CREATE_CMD) + .arg(format!("--{}", WALLETS_DIR_FLAG)) + .arg(self.wallet.base_dir().into_os_string()) .arg(format!("--{}", WALLET_NAME_FLAG)) .arg(&self.wallet.name) .arg(format!("--{}", WALLET_PASSWORD_FLAG)) .arg(self.wallet.password_path().into_os_string()) - .arg(format!("--{}", VALIDATOR_DIR_FLAG)) - .arg(self.validator_dir.clone().into_os_string()) .arg(format!("--{}", SECRETS_DIR_FLAG)) .arg(self.secrets_dir.clone().into_os_string()) .arg(format!("--{}", DEPOSIT_GWEI_FLAG)) @@ -375,13 +375,6 @@ fn validator_create() { assert_eq!(dir_child_count(validator_dir.path()), 6); } -/// Returns the `lighthouse account validator import` command. -fn validator_import_cmd() -> Command { - let mut cmd = validator_cmd(); - cmd.arg(IMPORT_CMD); - cmd -} - #[test] fn validator_import_launchpad() { const PASSWORD: &str = "cats"; @@ -407,12 +400,13 @@ fn validator_import_launchpad() { // Create a not-keystore file in the src dir. File::create(src_dir.path().join(NOT_KEYSTORE_NAME)).unwrap(); - let mut child = validator_import_cmd() + let mut child = validator_cmd() + .arg(format!("--{}", VALIDATOR_DIR_FLAG)) + .arg(dst_dir.path().as_os_str()) + .arg(IMPORT_CMD) .arg(format!("--{}", STDIN_INPUTS_FLAG)) // Using tty does not work well with tests. .arg(format!("--{}", import::DIR_FLAG)) .arg(src_dir.path().as_os_str()) - .arg(format!("--{}", VALIDATOR_DIR_FLAG)) - .arg(dst_dir.path().as_os_str()) .stderr(Stdio::piped()) .stdin(Stdio::piped()) .spawn() diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 9459a07b5b5..b1a74b64a73 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -96,7 +96,7 @@ pub fn testing_client_config() -> ClientConfig { /// This struct is separate to `LocalValidatorClient` to allow for pre-computation of validator /// keypairs since the task is quite resource intensive. pub struct ValidatorFiles { - pub datadir: TempDir, + pub validator_dir: TempDir, pub secrets_dir: TempDir, } @@ -110,7 +110,7 @@ impl ValidatorFiles { .map_err(|e| format!("Unable to create VC secrets dir: {:?}", e))?; Ok(Self { - datadir, + validator_dir: datadir, secrets_dir, }) } @@ -120,7 +120,7 @@ impl ValidatorFiles { let this = Self::new()?; build_deterministic_validator_dirs( - this.datadir.path().into(), + this.validator_dir.path().into(), this.secrets_dir.path().into(), keypair_indices, ) @@ -170,7 +170,7 @@ impl<E: EthSpec> LocalValidatorClient<E> { mut config: ValidatorConfig, files: ValidatorFiles, ) -> Result<Self, String> { - config.data_dir = files.datadir.path().into(); + config.validator_dir = files.validator_dir.path().into(); config.secrets_dir = files.secrets_dir.path().into(); ProductionValidatorClient::new(context, config) diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 1bbde0c5adb..77a6e5ce97e 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -31,6 +31,7 @@ slog-term = "2.5.0" tokio = { version = "0.2.21", features = ["time"] } futures = { version = "0.3.5", features = ["compat"] } dirs = "2.0.2" +directory = {path = "../common/directory"} logging = { path = "../common/logging" } environment = { path = "../lighthouse/environment" } parking_lot = "0.11.0" diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 7ac483439ce..9ad0c3faa1d 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -16,6 +16,19 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value(&DEFAULT_HTTP_SERVER) .takes_value(true), ) + .arg( + Arg::with_name("validators-dir") + .long("validators-dir") + .value_name("VALIDATORS_DIR") + .help( + "The directory which contains the validator keystores, deposit data for \ + each validator along with the common slashing protection database \ + and the validator_definitions.yml" + ) + .takes_value(true) + .conflicts_with("datadir") + .requires("secrets-dir") + ) .arg( Arg::with_name("secrets-dir") .long("secrets-dir") @@ -24,9 +37,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { "The directory which contains the password to unlock the validator \ voting keypairs. Each password should be contained in a file where the \ name is the 0x-prefixed hex representation of the validators voting public \ - key. Defaults to ~/.lighthouse/secrets.", + key. Defaults to ~/.lighthouse/{testnet}/secrets.", ) - .takes_value(true), + .takes_value(true) + .conflicts_with("datadir") + .requires("validators-dir"), ) .arg(Arg::with_name("auto-register").long("auto-register").help( "If present, the validator client will register any new signing keys with \ @@ -48,6 +63,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { that might also be using the same keystores." ) ) + .arg( + Arg::with_name("strict-slashing-protection") + .long("strict-slashing-protection") + .help( + "If present, do not create a new slashing database. This is to ensure that users \ + do not accidentally get slashed in case their slashing protection db ends up in the \ + wrong directory during directory restructure and vc creates a new empty db and \ + re-registers all validators." + ) + ) .arg( Arg::with_name("disable-auto-discover") .long("disable-auto-discover") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 4a11c5aecdc..991b5516220 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -1,12 +1,14 @@ use clap::ArgMatches; -use clap_utils::{parse_optional, parse_path_with_default_in_home_dir}; +use clap_utils::{parse_optional, parse_required}; +use directory::{ + get_testnet_name, DEFAULT_HARDCODED_TESTNET, DEFAULT_ROOT_DIR, DEFAULT_SECRET_DIR, + DEFAULT_VALIDATOR_DIR, +}; use serde_derive::{Deserialize, Serialize}; use std::path::PathBuf; use types::{Graffiti, GRAFFITI_BYTES_LEN}; pub const DEFAULT_HTTP_SERVER: &str = "http://localhost:5052/"; -pub const DEFAULT_DATA_DIR: &str = ".lighthouse/validators"; -pub const DEFAULT_SECRETS_DIR: &str = ".lighthouse/secrets"; /// Path to the slashing protection database within the datadir. pub const SLASHING_PROTECTION_FILENAME: &str = "slashing_protection.sqlite"; @@ -14,7 +16,7 @@ pub const SLASHING_PROTECTION_FILENAME: &str = "slashing_protection.sqlite"; #[derive(Clone, Serialize, Deserialize)] pub struct Config { /// The data directory, which stores all validator databases - pub data_dir: PathBuf, + pub validator_dir: PathBuf, /// The directory containing the passwords to unlock validator keystores. pub secrets_dir: PathBuf, /// The http endpoint of the beacon node API. @@ -28,6 +30,8 @@ pub struct Config { pub delete_lockfiles: bool, /// If true, don't scan the validators dir for new keystores. pub disable_auto_discover: bool, + /// If true, don't re-register existing validators in definitions.yml for slashing protection. + pub strict_slashing_protection: bool, /// Graffiti to be inserted everytime we create a block. pub graffiti: Option<Graffiti>, } @@ -35,19 +39,22 @@ pub struct Config { impl Default for Config { /// Build a new configuration from defaults. fn default() -> Self { - let data_dir = dirs::home_dir() - .map(|home| home.join(DEFAULT_DATA_DIR)) - .unwrap_or_else(|| PathBuf::from(".")); - let secrets_dir = dirs::home_dir() - .map(|home| home.join(DEFAULT_SECRETS_DIR)) - .unwrap_or_else(|| PathBuf::from(".")); + // WARNING: these directory defaults should be always overrided with parameters + // from cli for specific networks. + let base_dir = dirs::home_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join(DEFAULT_ROOT_DIR) + .join(DEFAULT_HARDCODED_TESTNET); + let validator_dir = base_dir.join(DEFAULT_VALIDATOR_DIR); + let secrets_dir = base_dir.join(DEFAULT_SECRET_DIR); Self { - data_dir, + validator_dir, secrets_dir, http_server: DEFAULT_HTTP_SERVER.to_string(), allow_unsynced_beacon_node: false, delete_lockfiles: false, disable_auto_discover: false, + strict_slashing_protection: false, graffiti: None, } } @@ -59,16 +66,39 @@ impl Config { pub fn from_cli(cli_args: &ArgMatches) -> Result<Config, String> { let mut config = Config::default(); - config.data_dir = parse_path_with_default_in_home_dir( - cli_args, - "datadir", - PathBuf::from(".lighthouse").join("validators"), - )?; + let default_root_dir = dirs::home_dir() + .map(|home| home.join(DEFAULT_ROOT_DIR)) + .unwrap_or_else(|| PathBuf::from(".")); + + let (mut validator_dir, mut secrets_dir) = (None, None); + if cli_args.value_of("datadir").is_some() { + let base_dir: PathBuf = parse_required(cli_args, "datadir")?; + validator_dir = Some(base_dir.join(DEFAULT_VALIDATOR_DIR)); + secrets_dir = Some(base_dir.join(DEFAULT_SECRET_DIR)); + } + if cli_args.value_of("validators-dir").is_some() + && cli_args.value_of("secrets-dir").is_some() + { + validator_dir = Some(parse_required(cli_args, "validators-dir")?); + secrets_dir = Some(parse_required(cli_args, "secrets-dir")?); + } + + config.validator_dir = validator_dir.unwrap_or_else(|| { + default_root_dir + .join(get_testnet_name(cli_args)) + .join(DEFAULT_VALIDATOR_DIR) + }); + + config.secrets_dir = secrets_dir.unwrap_or_else(|| { + default_root_dir + .join(get_testnet_name(cli_args)) + .join(DEFAULT_SECRET_DIR) + }); - if !config.data_dir.exists() { + if !config.validator_dir.exists() { return Err(format!( - "The directory for validator data (--datadir) does not exist: {:?}", - config.data_dir + "The directory for validator data does not exist: {:?}", + config.validator_dir )); } @@ -79,10 +109,7 @@ impl Config { config.allow_unsynced_beacon_node = cli_args.is_present("allow-unsynced"); config.delete_lockfiles = cli_args.is_present("delete-lockfiles"); config.disable_auto_discover = cli_args.is_present("disable-auto-discover"); - - if let Some(secrets_dir) = parse_optional(cli_args, "secrets-dir")? { - config.secrets_dir = secrets_dir; - } + config.strict_slashing_protection = cli_args.is_present("strict-slashing-protection"); if let Some(input_graffiti) = cli_args.value_of("graffiti") { let graffiti_bytes = input_graffiti.as_bytes(); diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 6b709023faf..6d82baa6bfb 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -68,18 +68,18 @@ impl<T: EthSpec> ProductionValidatorClient<T> { log, "Starting validator client"; "beacon_node" => &config.http_server, - "datadir" => format!("{:?}", config.data_dir), + "validator_dir" => format!("{:?}", config.validator_dir), ); - let mut validator_defs = ValidatorDefinitions::open_or_create(&config.data_dir) + let mut validator_defs = ValidatorDefinitions::open_or_create(&config.validator_dir) .map_err(|e| format!("Unable to open or create validator definitions: {:?}", e))?; if !config.disable_auto_discover { let new_validators = validator_defs - .discover_local_keystores(&config.data_dir, &config.secrets_dir, &log) + .discover_local_keystores(&config.validator_dir, &config.secrets_dir, &log) .map_err(|e| format!("Unable to discover local validator keystores: {:?}", e))?; validator_defs - .save(&config.data_dir) + .save(&config.validator_dir) .map_err(|e| format!("Unable to update validator definitions: {:?}", e))?; info!( log, @@ -90,7 +90,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { let validators = InitializedValidators::from_definitions( validator_defs, - config.data_dir.clone(), + config.validator_dir.clone(), config.delete_lockfiles, log.clone(), ) diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index f7d0442d376..66a616ff336 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -62,14 +62,24 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { fork_service: ForkService<T, E>, log: Logger, ) -> Result<Self, String> { - let slashing_db_path = config.data_dir.join(SLASHING_PROTECTION_FILENAME); - let slashing_protection = + let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); + let slashing_protection = if config.strict_slashing_protection { + // Don't create a new slashing database if `strict_slashing_protection` is turned on. + SlashingDatabase::open(&slashing_db_path).map_err(|e| { + format!( + "Failed to open slashing protection database: {:?}. + Ensure that `slashing_protection.sqlite` is in {:?} folder", + e, config.validator_dir + ) + })? + } else { SlashingDatabase::open_or_create(&slashing_db_path).map_err(|e| { format!( "Failed to open or create slashing protection database: {:?}", e ) - })?; + })? + }; Ok(Self { validators: Arc::new(RwLock::new(validators)), From 29709c598260df9d3953c2a8a6db4074ad17c58d Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Tue, 29 Sep 2020 03:46:54 +0000 Subject: [PATCH 04/33] Implement standard eth2.0 API (#1569) - Resolves #1550 - Resolves #824 - Resolves #825 - Resolves #1131 - Resolves #1411 - Resolves #1256 - Resolve #1177 - Includes the `ShufflingId` struct initially defined in #1492. That PR is now closed and the changes are included here, with significant bug fixes. - Implement the https://github.com/ethereum/eth2.0-APIs in a new `http_api` crate using `warp`. This replaces the `rest_api` crate. - Add a new `common/eth2` crate which provides a wrapper around `reqwest`, providing the HTTP client that is used by the validator client and for testing. This replaces the `common/remote_beacon_node` crate. - Create a `http_metrics` crate which is a dedicated server for Prometheus metrics (they are no longer served on the same port as the REST API). We now have flags for `--metrics`, `--metrics-address`, etc. - Allow the `subnet_id` to be an optional parameter for `VerifiedUnaggregatedAttestation::verify`. This means it does not need to be provided unnecessarily by the validator client. - Move `fn map_attestation_committee` in `mod beacon_chain::attestation_verification` to a new `fn with_committee_cache` on the `BeaconChain` so the same cache can be used for obtaining validator duties. - Add some other helpers to `BeaconChain` to assist with common API duties (e.g., `block_root_at_slot`, `head_beacon_block_root`). - Change the `NaiveAggregationPool` so it can index attestations by `hash_tree_root(attestation.data)`. This is a requirement of the API. - Add functions to `BeaconChainHarness` to allow it to create slashings and exits. - Allow for `eth1::Eth1NetworkId` to go to/from a `String`. - Add functions to the `OperationPool` to allow getting all objects in the pool. - Add function to `BeaconState` to check if a committee cache is initialized. - Fix bug where `seconds_per_eth1_block` was not transferring over from `YamlConfig` to `ChainSpec`. - Add the `deposit_contract_address` to `YamlConfig` and `ChainSpec`. We needed to be able to return it in an API response. - Change some uses of serde `serialize_with` and `deserialize_with` to a single use of `with` (code quality). - Impl `Display` and `FromStr` for several BLS fields. - Check for clock discrepancy when VC polls BN for sync state (with +/- 1 slot tolerance). This is not intended to be comprehensive, it was just easy to do. - See #1434 for a per-endpoint overview. - Seeking clarity here: https://github.com/ethereum/eth2.0-APIs/issues/75 - [x] Add docs for prom port to close #1256 - [x] Follow up on this #1177 - [x] ~~Follow up with #1424~~ Will fix in future PR. - [x] Follow up with #1411 - [x] ~~Follow up with #1260~~ Will fix in future PR. - [x] Add quotes to all integers. - [x] Remove `rest_types` - [x] Address missing beacon block error. (#1629) - [x] ~~Add tests for lighthouse/peers endpoints~~ Wontfix - [x] ~~Follow up with validator status proposal~~ Tracked in #1434 - [x] Unify graffiti structs - [x] ~~Start server when waiting for genesis?~~ Will fix in future PR. - [x] TODO in http_api tests - [x] Move lighthouse endpoints off /eth/v1 - [x] Update docs to link to standard - ~~Blocked on #1586~~ Co-authored-by: Michael Sproul <michael@sigmaprime.io> --- Cargo.lock | 452 +++-- Cargo.toml | 8 +- beacon_node/beacon_chain/Cargo.toml | 1 - .../src/attestation_verification.rs | 163 +- beacon_node/beacon_chain/src/beacon_chain.rs | 284 ++- beacon_node/beacon_chain/src/builder.rs | 10 +- beacon_node/beacon_chain/src/errors.rs | 4 + .../src/naive_aggregation_pool.rs | 47 +- .../beacon_chain/src/shuffling_cache.rs | 38 +- beacon_node/beacon_chain/src/test_utils.rs | 114 +- .../tests/attestation_verification.rs | 6 +- beacon_node/beacon_chain/tests/store_tests.rs | 2 +- beacon_node/beacon_chain/tests/tests.rs | 2 +- beacon_node/client/Cargo.toml | 3 +- beacon_node/client/src/builder.rs | 159 +- beacon_node/client/src/config.rs | 6 +- beacon_node/client/src/lib.rs | 16 +- beacon_node/eth1/src/http.rs | 33 +- beacon_node/eth1/src/lib.rs | 4 +- beacon_node/{rest_api => http_api}/Cargo.toml | 58 +- .../http_api/src/beacon_proposer_cache.rs | 185 ++ beacon_node/http_api/src/block_id.rs | 87 + beacon_node/http_api/src/lib.rs | 1749 ++++++++++++++++ beacon_node/http_api/src/metrics.rs | 32 + beacon_node/http_api/src/state_id.rs | 118 ++ .../http_api/src/validator_inclusion.rs | 88 + beacon_node/http_api/tests/tests.rs | 1786 +++++++++++++++++ beacon_node/http_metrics/Cargo.toml | 28 + beacon_node/http_metrics/src/lib.rs | 135 ++ .../{rest_api => http_metrics}/src/metrics.rs | 59 +- beacon_node/http_metrics/tests/tests.rs | 46 + beacon_node/network/Cargo.toml | 1 - .../network/src/attestation_service/mod.rs | 3 +- .../network/src/beacon_processor/worker.rs | 2 +- beacon_node/network/src/service.rs | 3 +- beacon_node/operation_pool/src/lib.rs | 45 + beacon_node/rest_api/src/beacon.rs | 499 ----- beacon_node/rest_api/src/config.rs | 55 - beacon_node/rest_api/src/consensus.rs | 126 -- beacon_node/rest_api/src/helpers.rs | 260 --- beacon_node/rest_api/src/lib.rs | 127 -- beacon_node/rest_api/src/lighthouse.rs | 48 - beacon_node/rest_api/src/node.rs | 39 - beacon_node/rest_api/src/router.rs | 322 --- beacon_node/rest_api/src/url_query.rs | 166 -- beacon_node/rest_api/src/validator.rs | 747 ------- beacon_node/rest_api/tests/test.rs | 1345 ------------- beacon_node/src/cli.rs | 34 +- beacon_node/src/config.rs | 53 +- beacon_node/src/lib.rs | 19 +- beacon_node/tests/test.rs | 9 +- book/src/SUMMARY.md | 19 +- book/src/advanced_metrics.md | 34 + book/src/api-bn.md | 130 ++ book/src/api-lighthouse.md | 179 ++ book/src/api-vc.md | 3 + book/src/api.md | 14 +- book/src/http.md | 26 +- book/src/http/advanced.md | 115 -- book/src/http/beacon.md | 784 -------- book/src/http/lighthouse.md | 182 -- book/src/http/network.md | 148 -- book/src/http/node.md | 91 - book/src/http/spec.md | 154 -- book/src/http/validator.md | 545 ----- .../consensus.md => validator-inclusion.md} | 135 +- book/src/websockets.md | 111 - common/eth2/Cargo.toml | 25 + common/eth2/src/lib.rs | 784 ++++++++ common/eth2/src/lighthouse.rs | 224 +++ common/eth2/src/types.rs | 432 ++++ common/lighthouse_metrics/src/lib.rs | 14 + common/remote_beacon_node/Cargo.toml | 21 - common/remote_beacon_node/src/lib.rs | 732 ------- common/rest_types/Cargo.toml | 27 - common/rest_types/src/api_error.rs | 99 - common/rest_types/src/beacon.rs | 65 - common/rest_types/src/consensus.rs | 66 - common/rest_types/src/handler.rs | 247 --- common/rest_types/src/lib.rs | 22 - common/rest_types/src/node.rs | 103 - common/rest_types/src/validator.rs | 103 - common/slot_clock/src/lib.rs | 10 + common/warp_utils/Cargo.toml | 15 + common/warp_utils/src/lib.rs | 5 + common/warp_utils/src/reject.rs | 168 ++ common/warp_utils/src/reply.rs | 15 + consensus/fork_choice/src/fork_choice.rs | 16 +- consensus/fork_choice/src/lib.rs | 1 + consensus/fork_choice/tests/tests.rs | 2 +- .../src/fork_choice_test_definition.rs | 13 +- consensus/proto_array/src/proto_array.rs | 6 +- .../src/proto_array_fork_choice.rs | 17 +- consensus/serde_hex/Cargo.toml | 9 - consensus/serde_utils/Cargo.toml | 1 + consensus/serde_utils/src/bytes_4_hex.rs | 38 + .../src/lib.rs => serde_utils/src/hex.rs} | 12 + consensus/serde_utils/src/lib.rs | 9 +- consensus/serde_utils/src/quoted_int.rs | 144 ++ consensus/serde_utils/src/quoted_u64.rs | 115 -- consensus/serde_utils/src/quoted_u64_vec.rs | 8 +- consensus/serde_utils/src/u32_hex.rs | 21 + consensus/serde_utils/src/u8_hex.rs | 29 + consensus/ssz_types/Cargo.toml | 2 +- consensus/ssz_types/src/bitfield.rs | 2 +- consensus/types/Cargo.toml | 2 + consensus/types/src/aggregate_and_proof.rs | 1 + consensus/types/src/attestation_data.rs | 1 + consensus/types/src/attestation_duty.rs | 3 + consensus/types/src/beacon_block.rs | 1 + consensus/types/src/beacon_block_body.rs | 5 - consensus/types/src/beacon_block_header.rs | 1 + consensus/types/src/beacon_state.rs | 9 + .../types/src/beacon_state/committee_cache.rs | 1 + consensus/types/src/chain_spec.rs | 138 +- consensus/types/src/deposit_data.rs | 1 + consensus/types/src/deposit_message.rs | 1 + consensus/types/src/enr_fork_id.rs | 11 +- consensus/types/src/eth1_data.rs | 1 + consensus/types/src/fork.rs | 11 +- consensus/types/src/fork_data.rs | 6 +- consensus/types/src/free_attestation.rs | 1 + consensus/types/src/graffiti.rs | 132 ++ consensus/types/src/indexed_attestation.rs | 38 + consensus/types/src/lib.rs | 8 +- consensus/types/src/pending_attestation.rs | 2 + consensus/types/src/shuffling_id.rs | 61 + consensus/types/src/slot_epoch_macros.rs | 13 + consensus/types/src/subnet_id.rs | 3 +- consensus/types/src/utils.rs | 3 - consensus/types/src/utils/serde_utils.rs | 134 -- consensus/types/src/validator_subscription.rs | 21 + consensus/types/src/voluntary_exit.rs | 1 + crypto/bls/Cargo.toml | 2 +- crypto/bls/src/generic_aggregate_signature.rs | 19 +- crypto/bls/src/generic_public_key.rs | 10 +- crypto/bls/src/generic_public_key_bytes.rs | 20 +- crypto/bls/src/generic_signature.rs | 10 +- crypto/bls/src/generic_signature_bytes.rs | 10 +- crypto/bls/src/macros.rs | 53 +- testing/node_test_rig/Cargo.toml | 2 +- testing/node_test_rig/src/lib.rs | 37 +- testing/simulator/src/checks.rs | 20 +- testing/simulator/src/cli.rs | 4 +- testing/simulator/src/local_network.rs | 15 +- testing/simulator/src/sync_sim.rs | 6 +- validator_client/Cargo.toml | 3 +- validator_client/src/attestation_service.rs | 330 ++- validator_client/src/block_service.rs | 47 +- validator_client/src/config.rs | 8 +- validator_client/src/duties_service.rs | 280 ++- validator_client/src/fork_service.rs | 22 +- .../src/initialized_validators.rs | 3 +- validator_client/src/is_synced.rs | 78 +- validator_client/src/lib.rs | 138 +- validator_client/src/validator_duty.rs | 131 ++ 156 files changed, 8861 insertions(+), 8915 deletions(-) rename beacon_node/{rest_api => http_api}/Cargo.toml (51%) create mode 100644 beacon_node/http_api/src/beacon_proposer_cache.rs create mode 100644 beacon_node/http_api/src/block_id.rs create mode 100644 beacon_node/http_api/src/lib.rs create mode 100644 beacon_node/http_api/src/metrics.rs create mode 100644 beacon_node/http_api/src/state_id.rs create mode 100644 beacon_node/http_api/src/validator_inclusion.rs create mode 100644 beacon_node/http_api/tests/tests.rs create mode 100644 beacon_node/http_metrics/Cargo.toml create mode 100644 beacon_node/http_metrics/src/lib.rs rename beacon_node/{rest_api => http_metrics}/src/metrics.rs (69%) create mode 100644 beacon_node/http_metrics/tests/tests.rs delete mode 100644 beacon_node/rest_api/src/beacon.rs delete mode 100644 beacon_node/rest_api/src/config.rs delete mode 100644 beacon_node/rest_api/src/consensus.rs delete mode 100644 beacon_node/rest_api/src/helpers.rs delete mode 100644 beacon_node/rest_api/src/lib.rs delete mode 100644 beacon_node/rest_api/src/lighthouse.rs delete mode 100644 beacon_node/rest_api/src/node.rs delete mode 100644 beacon_node/rest_api/src/router.rs delete mode 100644 beacon_node/rest_api/src/url_query.rs delete mode 100644 beacon_node/rest_api/src/validator.rs delete mode 100644 beacon_node/rest_api/tests/test.rs create mode 100644 book/src/advanced_metrics.md create mode 100644 book/src/api-bn.md create mode 100644 book/src/api-lighthouse.md create mode 100644 book/src/api-vc.md delete mode 100644 book/src/http/advanced.md delete mode 100644 book/src/http/beacon.md delete mode 100644 book/src/http/lighthouse.md delete mode 100644 book/src/http/network.md delete mode 100644 book/src/http/node.md delete mode 100644 book/src/http/spec.md delete mode 100644 book/src/http/validator.md rename book/src/{http/consensus.md => validator-inclusion.md} (52%) delete mode 100644 book/src/websockets.md create mode 100644 common/eth2/Cargo.toml create mode 100644 common/eth2/src/lib.rs create mode 100644 common/eth2/src/lighthouse.rs create mode 100644 common/eth2/src/types.rs delete mode 100644 common/remote_beacon_node/Cargo.toml delete mode 100644 common/remote_beacon_node/src/lib.rs delete mode 100644 common/rest_types/Cargo.toml delete mode 100644 common/rest_types/src/api_error.rs delete mode 100644 common/rest_types/src/beacon.rs delete mode 100644 common/rest_types/src/consensus.rs delete mode 100644 common/rest_types/src/handler.rs delete mode 100644 common/rest_types/src/lib.rs delete mode 100644 common/rest_types/src/node.rs delete mode 100644 common/rest_types/src/validator.rs create mode 100644 common/warp_utils/Cargo.toml create mode 100644 common/warp_utils/src/lib.rs create mode 100644 common/warp_utils/src/reject.rs create mode 100644 common/warp_utils/src/reply.rs delete mode 100644 consensus/serde_hex/Cargo.toml create mode 100644 consensus/serde_utils/src/bytes_4_hex.rs rename consensus/{serde_hex/src/lib.rs => serde_utils/src/hex.rs} (81%) create mode 100644 consensus/serde_utils/src/quoted_int.rs delete mode 100644 consensus/serde_utils/src/quoted_u64.rs create mode 100644 consensus/serde_utils/src/u32_hex.rs create mode 100644 consensus/serde_utils/src/u8_hex.rs create mode 100644 consensus/types/src/graffiti.rs create mode 100644 consensus/types/src/shuffling_id.rs delete mode 100644 consensus/types/src/utils.rs delete mode 100644 consensus/types/src/utils/serde_utils.rs create mode 100644 consensus/types/src/validator_subscription.rs create mode 100644 validator_client/src/validator_duty.rs diff --git a/Cargo.lock b/Cargo.lock index 73c7d707a7a..a94d97af3a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -227,12 +227,6 @@ dependencies = [ "syn", ] -[[package]] -name = "assert_matches" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5" - [[package]] name = "async-tls" version = "0.8.0" @@ -349,7 +343,6 @@ dependencies = [ "rand 0.7.3", "rand_core 0.5.1", "rayon", - "regex", "safe_arith", "serde", "serde_derive", @@ -519,7 +512,7 @@ dependencies = [ "rand 0.7.3", "serde", "serde_derive", - "serde_hex", + "serde_utils", "tree_hash", "zeroize", ] @@ -575,6 +568,16 @@ dependencies = [ "serde", ] +[[package]] +name = "buf_redux" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" +dependencies = [ + "memchr", + "safemem", +] + [[package]] name = "bumpalo" version = "3.4.0" @@ -770,13 +773,14 @@ dependencies = [ "eth2_ssz", "futures 0.3.5", "genesis", + "http_api", + "http_metrics", "lazy_static", "lighthouse_metrics", "network", "parking_lot 0.11.0", "prometheus", "reqwest", - "rest_api", "serde", "serde_derive", "serde_yaml", @@ -1460,6 +1464,22 @@ dependencies = [ "web3", ] +[[package]] +name = "eth2" +version = "0.1.0" +dependencies = [ + "eth2_libp2p", + "hex 0.4.2", + "procinfo", + "proto_array", + "psutil", + "reqwest", + "serde", + "serde_json", + "serde_utils", + "types", +] + [[package]] name = "eth2_config" version = "0.2.0" @@ -1600,7 +1620,7 @@ dependencies = [ "eth2_ssz", "serde", "serde_derive", - "serde_hex", + "serde_utils", "tree_hash", "tree_hash_derive", "typenum", @@ -2148,6 +2168,31 @@ dependencies = [ "tokio 0.2.22", ] +[[package]] +name = "headers" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed18eb2459bf1a09ad2d6b1547840c3e5e62882fa09b9a6a20b1de8e3228848f" +dependencies = [ + "base64 0.12.3", + "bitflags 1.2.1", + "bytes 0.5.6", + "headers-core", + "http 0.2.1", + "mime 0.3.16", + "sha-1 0.8.2", + "time 0.1.44", +] + +[[package]] +name = "headers-core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +dependencies = [ + "http 0.2.1", +] + [[package]] name = "heck" version = "0.3.1" @@ -2269,6 +2314,58 @@ dependencies = [ "http 0.2.1", ] +[[package]] +name = "http_api" +version = "0.1.0" +dependencies = [ + "beacon_chain", + "discv5", + "environment", + "eth1", + "eth2", + "eth2_libp2p", + "fork_choice", + "hex 0.4.2", + "lazy_static", + "lighthouse_metrics", + "lighthouse_version", + "network", + "parking_lot 0.11.0", + "serde", + "slog", + "slot_clock", + "state_processing", + "store", + "tokio 0.2.22", + "tree_hash", + "types", + "warp", + "warp_utils", +] + +[[package]] +name = "http_metrics" +version = "0.1.0" +dependencies = [ + "beacon_chain", + "environment", + "eth2", + "eth2_libp2p", + "lazy_static", + "lighthouse_metrics", + "lighthouse_version", + "prometheus", + "reqwest", + "serde", + "slog", + "slot_clock", + "store", + "tokio 0.2.22", + "types", + "warp", + "warp_utils", +] + [[package]] name = "httparse" version = "1.3.4" @@ -2448,6 +2545,15 @@ dependencies = [ "hashbrown 0.9.1", ] +[[package]] +name = "input_buffer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19a8a95243d5a0398cae618ec29477c6e3cb631152be5c19481f80bc71559754" +dependencies = [ + "bytes 0.5.6", +] + [[package]] name = "instant" version = "0.1.7" @@ -3259,6 +3365,24 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" +[[package]] +name = "multipart" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8209c33c951f07387a8497841122fc6f712165e3f9bda3e6be4645b58188f676" +dependencies = [ + "buf_redux", + "httparse", + "log 0.4.11", + "mime 0.3.16", + "mime_guess", + "quick-error", + "rand 0.6.5", + "safemem", + "tempfile", + "twoway", +] + [[package]] name = "multistream-select" version = "0.8.2" @@ -3339,7 +3463,6 @@ dependencies = [ "num_cpus", "parking_lot 0.11.0", "rand 0.7.3", - "rest_types", "rlp", "slog", "sloggers", @@ -3372,10 +3495,10 @@ version = "0.2.0" dependencies = [ "beacon_node", "environment", + "eth2", "eth2_config", "futures 0.3.5", "genesis", - "remote_beacon_node", "reqwest", "serde", "tempdir", @@ -4054,6 +4177,25 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "rand" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" +dependencies = [ + "autocfg 0.1.7", + "libc", + "rand_chacha 0.1.1", + "rand_core 0.4.2", + "rand_hc 0.1.0", + "rand_isaac", + "rand_jitter", + "rand_os", + "rand_pcg", + "rand_xorshift 0.1.1", + "winapi 0.3.9", +] + [[package]] name = "rand" version = "0.7.3" @@ -4062,9 +4204,19 @@ checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ "getrandom", "libc", - "rand_chacha", + "rand_chacha 0.2.2", "rand_core 0.5.1", - "rand_hc", + "rand_hc 0.2.0", +] + +[[package]] +name = "rand_chacha" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" +dependencies = [ + "autocfg 0.1.7", + "rand_core 0.3.1", ] [[package]] @@ -4101,6 +4253,15 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rand_hc" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "rand_hc" version = "0.2.0" @@ -4110,6 +4271,59 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_isaac" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "rand_jitter" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" +dependencies = [ + "libc", + "rand_core 0.4.2", + "winapi 0.3.9", +] + +[[package]] +name = "rand_os" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" +dependencies = [ + "cloudabi 0.0.3", + "fuchsia-cprng", + "libc", + "rand_core 0.4.2", + "rdrand", + "winapi 0.3.9", +] + +[[package]] +name = "rand_pcg" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" +dependencies = [ + "autocfg 0.1.7", + "rand_core 0.4.2", +] + +[[package]] +name = "rand_xorshift" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "rand_xorshift" version = "0.2.0" @@ -4197,24 +4411,6 @@ version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" -[[package]] -name = "remote_beacon_node" -version = "0.2.0" -dependencies = [ - "eth2_config", - "eth2_ssz", - "futures 0.3.5", - "hex 0.4.2", - "operation_pool", - "proto_array", - "reqwest", - "rest_types", - "serde", - "serde_json", - "types", - "url 2.1.1", -] - [[package]] name = "remove_dir_all" version = "0.5.3" @@ -4260,73 +4456,6 @@ dependencies = [ "winreg", ] -[[package]] -name = "rest_api" -version = "0.2.0" -dependencies = [ - "assert_matches", - "beacon_chain", - "bls", - "bus", - "environment", - "eth2_config", - "eth2_libp2p", - "eth2_ssz", - "eth2_ssz_derive", - "futures 0.3.5", - "hex 0.4.2", - "http 0.2.1", - "hyper 0.13.8", - "itertools 0.9.0", - "lazy_static", - "lighthouse_metrics", - "lighthouse_version", - "network", - "node_test_rig", - "operation_pool", - "parking_lot 0.11.0", - "remote_beacon_node", - "rest_types", - "serde", - "serde_json", - "serde_yaml", - "slog", - "slog-async", - "slog-term", - "slot_clock", - "state_processing", - "store", - "tokio 0.2.22", - "tree_hash", - "types", - "uhttp_sse", - "url 2.1.1", -] - -[[package]] -name = "rest_types" -version = "0.2.0" -dependencies = [ - "beacon_chain", - "bls", - "environment", - "eth2_hashing", - "eth2_ssz", - "eth2_ssz_derive", - "hyper 0.13.8", - "procinfo", - "psutil", - "rayon", - "serde", - "serde_json", - "serde_yaml", - "state_processing", - "store", - "tokio 0.2.22", - "tree_hash", - "types", -] - [[package]] name = "ring" version = "0.16.12" @@ -4615,14 +4744,6 @@ dependencies = [ "syn", ] -[[package]] -name = "serde_hex" -version = "0.2.0" -dependencies = [ - "hex 0.4.2", - "serde", -] - [[package]] name = "serde_json" version = "1.0.57" @@ -4661,6 +4782,7 @@ dependencies = [ name = "serde_utils" version = "0.1.0" dependencies = [ + "hex 0.4.2", "serde", "serde_derive", "serde_json", @@ -5668,6 +5790,19 @@ dependencies = [ "tokio 0.2.22", ] +[[package]] +name = "tokio-tungstenite" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d9e878ad426ca286e4dcae09cbd4e1973a7f8987d97570e2469703dd7f5720c" +dependencies = [ + "futures-util", + "log 0.4.11", + "pin-project", + "tokio 0.2.22", + "tungstenite", +] + [[package]] name = "tokio-udp" version = "0.1.6" @@ -5769,6 +5904,16 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "tracing-futures" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" +dependencies = [ + "pin-project", + "tracing", +] + [[package]] name = "trackable" version = "1.0.0" @@ -5822,6 +5967,34 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +[[package]] +name = "tungstenite" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0308d80d86700c5878b9ef6321f020f29b1bb9d5ff3cab25e75e23f3a492a23" +dependencies = [ + "base64 0.12.3", + "byteorder", + "bytes 0.5.6", + "http 0.2.1", + "httparse", + "input_buffer", + "log 0.4.11", + "rand 0.7.3", + "sha-1 0.9.1", + "url 2.1.1", + "utf-8", +] + +[[package]] +name = "twoway" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" +dependencies = [ + "memchr", +] + [[package]] name = "typeable" version = "0.1.2" @@ -5856,13 +6029,15 @@ dependencies = [ "log 0.4.11", "merkle_proof", "rand 0.7.3", - "rand_xorshift", + "rand_xorshift 0.2.0", "rayon", + "regex", "rusqlite", "safe_arith", "serde", "serde_derive", "serde_json", + "serde_utils", "serde_yaml", "slog", "swap_or_not_shuffle", @@ -5872,12 +6047,6 @@ dependencies = [ "tree_hash_derive", ] -[[package]] -name = "uhttp_sse" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6ff93345ba2206230b1bb1aa3ece1a63dd9443b7531024575d16a0680a59444" - [[package]] name = "uint" version = "0.8.5" @@ -6018,6 +6187,18 @@ dependencies = [ "percent-encoding 2.1.0", ] +[[package]] +name = "urlencoding" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9232eb53352b4442e40d7900465dfc534e8cb2dc8f18656fcb2ac16112b5593" + +[[package]] +name = "utf-8" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05e42f7c18b8f902290b009cde6d651262f956c98bc51bca4cd1d511c9cd85c7" + [[package]] name = "uuid" version = "0.8.1" @@ -6040,6 +6221,7 @@ dependencies = [ "directory", "dirs", "environment", + "eth2", "eth2_config", "eth2_interop_keypairs", "eth2_keystore", @@ -6052,8 +6234,6 @@ dependencies = [ "logging", "parking_lot 0.11.0", "rayon", - "remote_beacon_node", - "rest_types", "serde", "serde_derive", "serde_json", @@ -6148,6 +6328,46 @@ dependencies = [ "try-lock", ] +[[package]] +name = "warp" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f41be6df54c97904af01aa23e613d4521eed7ab23537cede692d4058f6449407" +dependencies = [ + "bytes 0.5.6", + "futures 0.3.5", + "headers", + "http 0.2.1", + "hyper 0.13.8", + "log 0.4.11", + "mime 0.3.16", + "mime_guess", + "multipart", + "pin-project", + "scoped-tls 1.0.0", + "serde", + "serde_json", + "serde_urlencoded", + "tokio 0.2.22", + "tokio-tungstenite", + "tower-service", + "tracing", + "tracing-futures", + "urlencoding", +] + +[[package]] +name = "warp_utils" +version = "0.1.0" +dependencies = [ + "beacon_chain", + "eth2", + "safe_arith", + "state_processing", + "types", + "warp", +] + [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" diff --git a/Cargo.toml b/Cargo.toml index 82922f5a5f6..b8b2fdde765 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,8 +7,9 @@ members = [ "beacon_node/client", "beacon_node/eth1", "beacon_node/eth2_libp2p", + "beacon_node/http_api", + "beacon_node/http_metrics", "beacon_node/network", - "beacon_node/rest_api", "beacon_node/store", "beacon_node/timer", "beacon_node/websocket_server", @@ -21,6 +22,7 @@ members = [ "common/compare_fields_derive", "common/deposit_contract", "common/directory", + "common/eth2", "common/eth2_config", "common/eth2_interop_keypairs", "common/eth2_testnet_config", @@ -30,10 +32,9 @@ members = [ "common/lighthouse_version", "common/logging", "common/lru_cache", - "common/remote_beacon_node", - "common/rest_types", "common/slot_clock", "common/test_random_derive", + "common/warp_utils", "common/validator_dir", "consensus/cached_tree_hash", @@ -44,7 +45,6 @@ members = [ "consensus/ssz", "consensus/ssz_derive", "consensus/ssz_types", - "consensus/serde_hex", "consensus/serde_utils", "consensus/state_processing", "consensus/swap_or_not_shuffle", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 05ae819c49c..04e22f4268f 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -58,4 +58,3 @@ environment = { path = "../../lighthouse/environment" } bus = "2.2.3" derivative = "2.1.1" itertools = "0.9.0" -regex = "1.3.9" diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 64803304441..32a08590207 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -28,8 +28,7 @@ use crate::{ beacon_chain::{ - ATTESTATION_CACHE_LOCK_TIMEOUT, HEAD_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, - VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, + HEAD_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, }, metrics, observed_attestations::ObserveOutcome, @@ -38,12 +37,10 @@ use crate::{ }; use bls::verify_signature_sets; use proto_array::Block as ProtoBlock; -use slog::debug; use slot_clock::SlotClock; use state_processing::{ common::get_indexed_attestation, per_block_processing::errors::AttestationValidationError, - per_slot_processing, signature_sets::{ indexed_attestation_signature_set_from_pubkeys, signed_aggregate_selection_proof_signature_set, signed_aggregate_signature_set, @@ -53,7 +50,7 @@ use std::borrow::Cow; use tree_hash::TreeHash; use types::{ Attestation, BeaconCommittee, CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation, - RelativeEpoch, SelectionProof, SignedAggregateAndProof, Slot, SubnetId, + SelectionProof, SignedAggregateAndProof, Slot, SubnetId, }; /// Returned when an attestation was not successfully verified. It might not have been verified for @@ -267,6 +264,7 @@ pub struct VerifiedAggregatedAttestation<T: BeaconChainTypes> { pub struct VerifiedUnaggregatedAttestation<T: BeaconChainTypes> { attestation: Attestation<T::EthSpec>, indexed_attestation: IndexedAttestation<T::EthSpec>, + subnet_id: SubnetId, } /// Custom `Clone` implementation is to avoid the restrictive trait bounds applied by the usual derive @@ -276,6 +274,7 @@ impl<T: BeaconChainTypes> Clone for VerifiedUnaggregatedAttestation<T> { Self { attestation: self.attestation.clone(), indexed_attestation: self.indexed_attestation.clone(), + subnet_id: self.subnet_id, } } } @@ -428,6 +427,11 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> { pub fn attestation(&self) -> &Attestation<T::EthSpec> { &self.signed_aggregate.message.aggregate } + + /// Returns the underlying `signed_aggregate`. + pub fn aggregate(&self) -> &SignedAggregateAndProof<T::EthSpec> { + &self.signed_aggregate + } } impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> { @@ -438,7 +442,7 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> { /// verify that it was received on the correct subnet. pub fn verify( attestation: Attestation<T::EthSpec>, - subnet_id: SubnetId, + subnet_id: Option<SubnetId>, chain: &BeaconChain<T>, ) -> Result<Self, Error> { let attestation_epoch = attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()); @@ -513,13 +517,15 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> { ) .map_err(BeaconChainError::from)?; - // Ensure the attestation is from the correct subnet. - if subnet_id != expected_subnet_id { - return Err(Error::InvalidSubnetId { - received: subnet_id, - expected: expected_subnet_id, - }); - } + // If a subnet was specified, ensure that subnet is correct. + if let Some(subnet_id) = subnet_id { + if subnet_id != expected_subnet_id { + return Err(Error::InvalidSubnetId { + received: subnet_id, + expected: expected_subnet_id, + }); + } + }; let validator_index = *indexed_attestation .attesting_indices @@ -564,6 +570,7 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> { Ok(Self { attestation, indexed_attestation, + subnet_id: expected_subnet_id, }) } @@ -572,6 +579,11 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> { chain.add_to_naive_aggregation_pool(self) } + /// Returns the correct subnet for the attestation. + pub fn subnet_id(&self) -> SubnetId { + self.subnet_id + } + /// Returns the wrapped `attestation`. pub fn attestation(&self) -> &Attestation<T::EthSpec> { &self.attestation @@ -587,6 +599,7 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> { } /// Returns `Ok(())` if the `attestation.data.beacon_block_root` is known to this chain. +/// You can use this `shuffling_id` to read from the shuffling cache. /// /// The block root may not be known for two reasons: /// @@ -615,6 +628,7 @@ fn verify_head_block_is_known<T: BeaconChainTypes>( }); } } + Ok(block) } else { Err(Error::UnknownHeadBlock { @@ -770,7 +784,7 @@ type CommitteesPerSlot = u64; /// Returns the `indexed_attestation` and committee count per slot for the `attestation` using the /// public keys cached in the `chain`. -pub fn obtain_indexed_attestation_and_committees_per_slot<T: BeaconChainTypes>( +fn obtain_indexed_attestation_and_committees_per_slot<T: BeaconChainTypes>( chain: &BeaconChain<T>, attestation: &Attestation<T::EthSpec>, ) -> Result<(IndexedAttestation<T::EthSpec>, CommitteesPerSlot), Error> { @@ -790,8 +804,8 @@ pub fn obtain_indexed_attestation_and_committees_per_slot<T: BeaconChainTypes>( /// /// If the committee for `attestation` isn't found in the `shuffling_cache`, we will read a state /// from disk and then update the `shuffling_cache`. -pub fn map_attestation_committee<'a, T, F, R>( - chain: &'a BeaconChain<T>, +fn map_attestation_committee<T, F, R>( + chain: &BeaconChain<T>, attestation: &Attestation<T::EthSpec>, map_fn: F, ) -> Result<R, Error> @@ -809,104 +823,23 @@ where // processing an attestation that does not include our latest finalized block in its chain. // // We do not delay consideration for later, we simply drop the attestation. - let target_block = chain - .fork_choice - .read() - .get_block(&target.root) - .ok_or_else(|| Error::UnknownTargetRoot(target.root))?; - - // Obtain the shuffling cache, timing how long we wait. - let cache_wait_timer = - metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES); - - let mut shuffling_cache = chain - .shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| BeaconChainError::AttestationCacheLockTimeout)?; - - metrics::stop_timer(cache_wait_timer); - - if let Some(committee_cache) = shuffling_cache.get(attestation_epoch, target.root) { - let committees_per_slot = committee_cache.committees_per_slot(); - committee_cache - .get_beacon_committee(attestation.data.slot, attestation.data.index) - .map(|committee| map_fn((committee, committees_per_slot))) - .unwrap_or_else(|| { - Err(Error::NoCommitteeForSlotAndIndex { - slot: attestation.data.slot, - index: attestation.data.index, - }) - }) - } else { - // Drop the shuffling cache to avoid holding the lock for any longer than - // required. - drop(shuffling_cache); - - debug!( - chain.log, - "Attestation processing cache miss"; - "attn_epoch" => attestation_epoch.as_u64(), - "target_block_epoch" => target_block.slot.epoch(T::EthSpec::slots_per_epoch()).as_u64(), - ); - - let state_read_timer = - metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES); - - let mut state = chain - .store - .get_inconsistent_state_for_attestation_verification_only( - &target_block.state_root, - Some(target_block.slot), - ) - .map_err(BeaconChainError::from)? - .ok_or_else(|| BeaconChainError::MissingBeaconState(target_block.state_root))?; - - metrics::stop_timer(state_read_timer); - let state_skip_timer = - metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_SKIP_TIMES); - - while state.current_epoch() + 1 < attestation_epoch { - // Here we tell `per_slot_processing` to skip hashing the state and just - // use the zero hash instead. - // - // The state roots are not useful for the shuffling, so there's no need to - // compute them. - per_slot_processing(&mut state, Some(Hash256::zero()), &chain.spec) - .map_err(BeaconChainError::from)?; - } - - metrics::stop_timer(state_skip_timer); - let committee_building_timer = - metrics::start_timer(&metrics::ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES); - - let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), attestation_epoch) - .map_err(BeaconChainError::IncorrectStateForAttestation)?; - - state - .build_committee_cache(relative_epoch, &chain.spec) - .map_err(BeaconChainError::from)?; - - let committee_cache = state - .committee_cache(relative_epoch) - .map_err(BeaconChainError::from)?; - - chain - .shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| BeaconChainError::AttestationCacheLockTimeout)? - .insert(attestation_epoch, target.root, committee_cache); - - metrics::stop_timer(committee_building_timer); - - let committees_per_slot = committee_cache.committees_per_slot(); - committee_cache - .get_beacon_committee(attestation.data.slot, attestation.data.index) - .map(|committee| map_fn((committee, committees_per_slot))) - .unwrap_or_else(|| { - Err(Error::NoCommitteeForSlotAndIndex { - slot: attestation.data.slot, - index: attestation.data.index, - }) - }) + if !chain.fork_choice.read().contains_block(&target.root) { + return Err(Error::UnknownTargetRoot(target.root)); } + + chain + .with_committee_cache(target.root, attestation_epoch, |committee_cache| { + let committees_per_slot = committee_cache.committees_per_slot(); + + Ok(committee_cache + .get_beacon_committee(attestation.data.slot, attestation.data.index) + .map(|committee| map_fn((committee, committees_per_slot))) + .unwrap_or_else(|| { + Err(Error::NoCommitteeForSlotAndIndex { + slot: attestation.data.slot, + index: attestation.data.index, + }) + })) + }) + .map_err(BeaconChainError::from)? } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 1caaec5fea4..3bf5ae282d4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -21,7 +21,7 @@ use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::persisted_fork_choice::PersistedForkChoice; -use crate::shuffling_cache::ShufflingCache; +use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::snapshot_cache::SnapshotCache; use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_pubkey_cache::ValidatorPubkeyCache; @@ -31,7 +31,6 @@ use fork_choice::ForkChoice; use itertools::process_results; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; -use regex::bytes::Regex; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use state_processing::{ @@ -201,6 +200,8 @@ pub struct BeaconChain<T: BeaconChainTypes> { pub(crate) canonical_head: TimeoutRwLock<BeaconSnapshot<T::EthSpec>>, /// The root of the genesis block. pub genesis_block_root: Hash256, + /// The root of the genesis state. + pub genesis_state_root: Hash256, /// The root of the list of genesis validators, used during syncing. pub genesis_validators_root: Hash256, @@ -459,6 +460,30 @@ impl<T: BeaconChainTypes> BeaconChain<T> { } } + /// Returns the block at the given slot, if any. Only returns blocks in the canonical chain. + /// + /// ## Errors + /// + /// May return a database error. + pub fn state_root_at_slot(&self, slot: Slot) -> Result<Option<Hash256>, Error> { + process_results(self.rev_iter_state_roots()?, |mut iter| { + iter.find(|(_, this_slot)| *this_slot == slot) + .map(|(root, _)| root) + }) + } + + /// Returns the block root at the given slot, if any. Only returns roots in the canonical chain. + /// + /// ## Errors + /// + /// May return a database error. + pub fn block_root_at_slot(&self, slot: Slot) -> Result<Option<Hash256>, Error> { + process_results(self.rev_iter_block_roots()?, |mut iter| { + iter.find(|(_, this_slot)| *this_slot == slot) + .map(|(root, _)| root) + }) + } + /// Returns the block at the given root, if any. /// /// ## Errors @@ -506,6 +531,30 @@ impl<T: BeaconChainTypes> BeaconChain<T> { f(&head_lock) } + /// Returns the beacon block root at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_block_root(&self) -> Result<Hash256, Error> { + self.with_head(|s| Ok(s.beacon_block_root)) + } + + /// Returns the beacon block at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_block(&self) -> Result<SignedBeaconBlock<T::EthSpec>, Error> { + self.with_head(|s| Ok(s.beacon_block.clone())) + } + + /// Returns the beacon state at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_state(&self) -> Result<BeaconState<T::EthSpec>, Error> { + self.with_head(|s| { + Ok(s.beacon_state + .clone_with(CloneConfig::committee_caches_only())) + }) + } + /// Returns info representing the head block and state. /// /// A summarized version of `Self::head` that involves less cloning. @@ -719,46 +768,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> { .map_err(Into::into) } - /// Returns the attestation slot and committee index for a given validator index. + /// Returns the attestation duties for a given validator index. /// /// Information is read from the current state, so only information from the present and prior /// epoch is available. - pub fn validator_attestation_slot_and_index( + pub fn validator_attestation_duty( &self, validator_index: usize, epoch: Epoch, - ) -> Result<Option<(Slot, u64)>, Error> { - let as_epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); - let head_state = &self.head()?.beacon_state; - - let mut state = if epoch == as_epoch(head_state.slot) { - self.head()?.beacon_state - } else { - // The block proposer shuffling is not affected by the state roots, so we don't need to - // calculate them. - self.state_at_slot( - epoch.start_slot(T::EthSpec::slots_per_epoch()), - StateSkipConfig::WithoutStateRoots, - )? - }; - - state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + ) -> Result<Option<AttestationDuty>, Error> { + let head_block_root = self.head_beacon_block_root()?; - if as_epoch(state.slot) != epoch { - return Err(Error::InvariantViolated(format!( - "Epochs in consistent in attestation duties lookup: state: {}, requested: {}", - as_epoch(state.slot), - epoch - ))); - } - - if let Some(attestation_duty) = - state.get_attestation_duties(validator_index, RelativeEpoch::Current)? - { - Ok(Some((attestation_duty.slot, attestation_duty.index))) - } else { - Ok(None) - } + self.with_committee_cache(head_block_root, epoch, |committee_cache| { + Ok(committee_cache.get_attestation_duties(validator_index)) + }) } /// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`. @@ -767,11 +790,22 @@ impl<T: BeaconChainTypes> BeaconChain<T> { pub fn get_aggregated_attestation( &self, data: &AttestationData, - ) -> Result<Option<Attestation<T::EthSpec>>, Error> { + ) -> Option<Attestation<T::EthSpec>> { + self.naive_aggregation_pool.read().get(data) + } + + /// Returns an aggregated `Attestation`, if any, that has a matching + /// `attestation.data.tree_hash_root()`. + /// + /// The attestation will be obtained from `self.naive_aggregation_pool`. + pub fn get_aggregated_attestation_by_slot_and_root( + &self, + slot: Slot, + attestation_data_root: &Hash256, + ) -> Option<Attestation<T::EthSpec>> { self.naive_aggregation_pool .read() - .get(data) - .map_err(Into::into) + .get_by_slot_and_root(slot, attestation_data_root) } /// Produce an unaggregated `Attestation` that is valid for the given `slot` and `index`. @@ -898,7 +932,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { pub fn verify_unaggregated_attestation_for_gossip( &self, attestation: Attestation<T::EthSpec>, - subnet_id: SubnetId, + subnet_id: Option<SubnetId>, ) -> Result<VerifiedUnaggregatedAttestation<T>, AttestationError> { metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_REQUESTS); let _timer = @@ -1320,11 +1354,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { block: SignedBeaconBlock<T::EthSpec>, ) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> { let slot = block.message.slot; - #[allow(clippy::invalid_regex)] - let re = Regex::new("\\p{C}").expect("regex is valid"); - let graffiti_string = - String::from_utf8_lossy(&re.replace_all(&block.message.body.graffiti[..], &b""[..])) - .to_string(); + let graffiti_string = block.message.body.graffiti.as_utf8_lossy(); match GossipVerifiedBlock::new(block, self) { Ok(verified) => { @@ -1449,8 +1479,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { ) -> Result<Hash256, BlockError<T::EthSpec>> { let signed_block = fully_verified_block.block; let block_root = fully_verified_block.block_root; - let state = fully_verified_block.state; - let parent_block = fully_verified_block.parent_block; + let mut state = fully_verified_block.state; let current_slot = self.slot()?; let mut ops = fully_verified_block.intermediate_states; @@ -1482,29 +1511,25 @@ impl<T: BeaconChainTypes> BeaconChain<T> { .ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)? .import_new_pubkeys(&state)?; - // If the imported block is in the previous or current epochs (according to the - // wall-clock), check to see if this is the first block of the epoch. If so, add the - // committee to the shuffling cache. - if state.current_epoch() + 1 >= self.epoch()? - && parent_block.slot().epoch(T::EthSpec::slots_per_epoch()) != state.current_epoch() - { - let mut shuffling_cache = self - .shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| Error::AttestationCacheLockTimeout)?; - - let committee_cache = state.committee_cache(RelativeEpoch::Current)?; + // For the current and next epoch of this state, ensure we have the shuffling from this + // block in our cache. + for relative_epoch in &[RelativeEpoch::Current, RelativeEpoch::Next] { + let shuffling_id = ShufflingId::new(block_root, &state, *relative_epoch)?; - let epoch_start_slot = state - .current_epoch() - .start_slot(T::EthSpec::slots_per_epoch()); - let target_root = if state.slot == epoch_start_slot { - block_root - } else { - *state.get_block_root(epoch_start_slot)? - }; - - shuffling_cache.insert(state.current_epoch(), target_root, committee_cache); + let shuffling_is_cached = self + .shuffling_cache + .try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or_else(|| Error::AttestationCacheLockTimeout)? + .contains(&shuffling_id); + + if !shuffling_is_cached { + state.build_committee_cache(*relative_epoch, &self.spec)?; + let committee_cache = state.committee_cache(*relative_epoch)?; + self.shuffling_cache + .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or_else(|| Error::AttestationCacheLockTimeout)? + .insert(shuffling_id, committee_cache); + } } let mut fork_choice = self.fork_choice.write(); @@ -1992,6 +2017,129 @@ impl<T: BeaconChainTypes> BeaconChain<T> { Ok(()) } + /// Runs the `map_fn` with the committee cache for `shuffling_epoch` from the chain with head + /// `head_block_root`. + /// + /// It's not necessary that `head_block_root` matches our current view of the chain, it can be + /// any block that is: + /// + /// - Known to us. + /// - The finalized block or a descendant of the finalized block. + /// + /// It would be quite common for attestation verification operations to use a `head_block_root` + /// that differs from our view of the head. + /// + /// ## Important + /// + /// This function is **not** suitable for determining proposer duties. + /// + /// ## Notes + /// + /// This function exists in this odd "map" pattern because efficiently obtaining a committee + /// can be complex. It might involve reading straight from the `beacon_chain.shuffling_cache` + /// or it might involve reading it from a state from the DB. Due to the complexities of + /// `RwLock`s on the shuffling cache, a simple `Cow` isn't suitable here. + /// + /// If the committee for `(head_block_root, shuffling_epoch)` isn't found in the + /// `shuffling_cache`, we will read a state from disk and then update the `shuffling_cache`. + pub(crate) fn with_committee_cache<F, R>( + &self, + head_block_root: Hash256, + shuffling_epoch: Epoch, + map_fn: F, + ) -> Result<R, Error> + where + F: Fn(&CommitteeCache) -> Result<R, Error>, + { + let head_block = self + .fork_choice + .read() + .get_block(&head_block_root) + .ok_or_else(|| Error::MissingBeaconBlock(head_block_root))?; + + let shuffling_id = BlockShufflingIds { + current: head_block.current_epoch_shuffling_id.clone(), + next: head_block.next_epoch_shuffling_id.clone(), + block_root: head_block.root, + } + .id_for_epoch(shuffling_epoch) + .ok_or_else(|| Error::InvalidShufflingId { + shuffling_epoch, + head_block_epoch: head_block.slot.epoch(T::EthSpec::slots_per_epoch()), + })?; + + // Obtain the shuffling cache, timing how long we wait. + let cache_wait_timer = + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES); + + let mut shuffling_cache = self + .shuffling_cache + .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or_else(|| Error::AttestationCacheLockTimeout)?; + + metrics::stop_timer(cache_wait_timer); + + if let Some(committee_cache) = shuffling_cache.get(&shuffling_id) { + map_fn(committee_cache) + } else { + // Drop the shuffling cache to avoid holding the lock for any longer than + // required. + drop(shuffling_cache); + + debug!( + self.log, + "Committee cache miss"; + "shuffling_epoch" => shuffling_epoch.as_u64(), + "head_block_root" => head_block_root.to_string(), + ); + + let state_read_timer = + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES); + + let mut state = self + .store + .get_inconsistent_state_for_attestation_verification_only( + &head_block.state_root, + Some(head_block.slot), + )? + .ok_or_else(|| Error::MissingBeaconState(head_block.state_root))?; + + metrics::stop_timer(state_read_timer); + let state_skip_timer = + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_SKIP_TIMES); + + while state.current_epoch() + 1 < shuffling_epoch { + // Here we tell `per_slot_processing` to skip hashing the state and just + // use the zero hash instead. + // + // The state roots are not useful for the shuffling, so there's no need to + // compute them. + per_slot_processing(&mut state, Some(Hash256::zero()), &self.spec) + .map_err(Error::from)?; + } + + metrics::stop_timer(state_skip_timer); + let committee_building_timer = + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES); + + let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), shuffling_epoch) + .map_err(Error::IncorrectStateForAttestation)?; + + state.build_committee_cache(relative_epoch, &self.spec)?; + + let committee_cache = state.committee_cache(relative_epoch)?; + + self.shuffling_cache + .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or_else(|| Error::AttestationCacheLockTimeout)? + .insert(shuffling_id, committee_cache); + + metrics::stop_timer(committee_building_timer); + + map_fn(&committee_cache) + } + } + /// Returns `true` if the given block root has not been processed. pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, Error> { Ok(!self diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index ac9d1c4b1fd..ff47c7a2b81 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -374,8 +374,13 @@ where let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis); - let fork_choice = ForkChoice::from_genesis(fc_store, &genesis.beacon_block.message) - .map_err(|e| format!("Unable to build initialize ForkChoice: {:?}", e))?; + let fork_choice = ForkChoice::from_genesis( + fc_store, + genesis.beacon_block_root, + &genesis.beacon_block.message, + &genesis.beacon_state, + ) + .map_err(|e| format!("Unable to build initialize ForkChoice: {:?}", e))?; self.fork_choice = Some(fork_choice); self.genesis_time = Some(genesis.beacon_state.genesis_time); @@ -561,6 +566,7 @@ where observed_attester_slashings: <_>::default(), eth1_chain: self.eth1_chain, genesis_validators_root: canonical_head.beacon_state.genesis_validators_root, + genesis_state_root: canonical_head.beacon_state_root, canonical_head: TimeoutRwLock::new(canonical_head.clone()), genesis_block_root, fork_choice: RwLock::new(fork_choice), diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 96f1c9a8411..6eb7bceeb21 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -83,6 +83,10 @@ pub enum BeaconChainError { ObservedBlockProducersError(ObservedBlockProducersError), PruningError(PruningError), ArithError(ArithError), + InvalidShufflingId { + shuffling_epoch: Epoch, + head_block_epoch: Epoch, + }, } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs index c561141a1de..247f613a96d 100644 --- a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -1,7 +1,9 @@ use crate::metrics; use std::collections::HashMap; -use types::{Attestation, AttestationData, EthSpec, Slot}; +use tree_hash::TreeHash; +use types::{Attestation, AttestationData, EthSpec, Hash256, Slot}; +type AttestationDataRoot = Hash256; /// The number of slots that will be stored in the pool. /// /// For example, if `SLOTS_RETAINED == 3` and the pool is pruned at slot `6`, then all attestations @@ -53,7 +55,7 @@ pub enum Error { /// A collection of `Attestation` objects, keyed by their `attestation.data`. Enforces that all /// `attestation` are from the same slot. struct AggregatedAttestationMap<E: EthSpec> { - map: HashMap<AttestationData, Attestation<E>>, + map: HashMap<AttestationDataRoot, Attestation<E>>, } impl<E: EthSpec> AggregatedAttestationMap<E> { @@ -87,7 +89,9 @@ impl<E: EthSpec> AggregatedAttestationMap<E> { return Err(Error::MoreThanOneAggregationBitSet(set_bits.len())); } - if let Some(existing_attestation) = self.map.get_mut(&a.data) { + let attestation_data_root = a.data.tree_hash_root(); + + if let Some(existing_attestation) = self.map.get_mut(&attestation_data_root) { if existing_attestation .aggregation_bits .get(committee_index) @@ -107,7 +111,7 @@ impl<E: EthSpec> AggregatedAttestationMap<E> { )); } - self.map.insert(a.data.clone(), a.clone()); + self.map.insert(attestation_data_root, a.clone()); Ok(InsertOutcome::NewAttestationData { committee_index }) } } @@ -115,8 +119,13 @@ impl<E: EthSpec> AggregatedAttestationMap<E> { /// Returns an aggregated `Attestation` with the given `data`, if any. /// /// The given `a.data.slot` must match the slot that `self` was initialized with. - pub fn get(&self, data: &AttestationData) -> Result<Option<Attestation<E>>, Error> { - Ok(self.map.get(data).cloned()) + pub fn get(&self, data: &AttestationData) -> Option<Attestation<E>> { + self.map.get(&data.tree_hash_root()).cloned() + } + + /// Returns an aggregated `Attestation` with the given `root`, if any. + pub fn get_by_root(&self, root: &AttestationDataRoot) -> Option<&Attestation<E>> { + self.map.get(root) } /// Iterate all attestations in `self`. @@ -220,12 +229,19 @@ impl<E: EthSpec> NaiveAggregationPool<E> { } /// Returns an aggregated `Attestation` with the given `data`, if any. - pub fn get(&self, data: &AttestationData) -> Result<Option<Attestation<E>>, Error> { + pub fn get(&self, data: &AttestationData) -> Option<Attestation<E>> { + self.maps.get(&data.slot).and_then(|map| map.get(data)) + } + + /// Returns an aggregated `Attestation` with the given `data`, if any. + pub fn get_by_slot_and_root( + &self, + slot: Slot, + root: &AttestationDataRoot, + ) -> Option<Attestation<E>> { self.maps - .iter() - .find(|(slot, _map)| **slot == data.slot) - .map(|(_slot, map)| map.get(data)) - .unwrap_or_else(|| Ok(None)) + .get(&slot) + .and_then(|map| map.get_by_root(root).cloned()) } /// Iterate all attestations in all slots of `self`. @@ -338,8 +354,7 @@ mod tests { let retrieved = pool .get(&a.data) - .expect("should not error while getting attestation") - .expect("should get an attestation"); + .expect("should not error while getting attestation"); assert_eq!( retrieved, a, "retrieved attestation should equal the one inserted" @@ -378,8 +393,7 @@ mod tests { let retrieved = pool .get(&a_0.data) - .expect("should not error while getting attestation") - .expect("should get an attestation"); + .expect("should not error while getting attestation"); let mut a_01 = a_0.clone(); a_01.aggregate(&a_1); @@ -408,8 +422,7 @@ mod tests { assert_eq!( pool.get(&a_0.data) - .expect("should not error while getting attestation") - .expect("should get an attestation"), + .expect("should not error while getting attestation"), retrieved, "should not have aggregated different attestation data" ); diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index d8b6e8706e1..b76adf05e64 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -1,6 +1,6 @@ use crate::metrics; use lru::LruCache; -use types::{beacon_state::CommitteeCache, Epoch, Hash256}; +use types::{beacon_state::CommitteeCache, Epoch, Hash256, ShufflingId}; /// The size of the LRU cache that stores committee caches for quicker verification. /// @@ -14,7 +14,7 @@ const CACHE_SIZE: usize = 16; /// It has been named `ShufflingCache` because `CommitteeCacheCache` is a bit weird and looks like /// a find/replace error. pub struct ShufflingCache { - cache: LruCache<(Epoch, Hash256), CommitteeCache>, + cache: LruCache<ShufflingId, CommitteeCache>, } impl ShufflingCache { @@ -24,8 +24,8 @@ impl ShufflingCache { } } - pub fn get(&mut self, epoch: Epoch, root: Hash256) -> Option<&CommitteeCache> { - let opt = self.cache.get(&(epoch, root)); + pub fn get(&mut self, key: &ShufflingId) -> Option<&CommitteeCache> { + let opt = self.cache.get(key); if opt.is_some() { metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS); @@ -36,11 +36,37 @@ impl ShufflingCache { opt } - pub fn insert(&mut self, epoch: Epoch, root: Hash256, committee_cache: &CommitteeCache) { - let key = (epoch, root); + pub fn contains(&self, key: &ShufflingId) -> bool { + self.cache.contains(key) + } + pub fn insert(&mut self, key: ShufflingId, committee_cache: &CommitteeCache) { if !self.cache.contains(&key) { self.cache.put(key, committee_cache.clone()); } } } + +/// Contains the shuffling IDs for a beacon block. +pub struct BlockShufflingIds { + pub current: ShufflingId, + pub next: ShufflingId, + pub block_root: Hash256, +} + +impl BlockShufflingIds { + /// Returns the shuffling ID for the given epoch. + /// + /// Returns `None` if `epoch` is prior to `self.current.shuffling_epoch`. + pub fn id_for_epoch(&self, epoch: Epoch) -> Option<ShufflingId> { + if epoch == self.current.shuffling_epoch { + Some(self.current.clone()) + } else if epoch == self.next.shuffling_epoch { + Some(self.next.clone()) + } else if epoch > self.next.shuffling_epoch { + Some(ShufflingId::from_components(epoch, self.block_root)) + } else { + None + } + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 8690c2e8d2d..2bad5f892e5 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -26,9 +26,11 @@ use store::{config::StoreConfig, BlockReplay, HotColdDB, ItemStore, LevelDB, Mem use tempfile::{tempdir, TempDir}; use tree_hash::TreeHash; use types::{ - AggregateSignature, Attestation, BeaconState, BeaconStateHash, ChainSpec, Domain, Epoch, - EthSpec, Hash256, Keypair, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, - SignedBeaconBlockHash, SignedRoot, Slot, SubnetId, + AggregateSignature, Attestation, AttestationData, AttesterSlashing, BeaconState, + BeaconStateHash, ChainSpec, Checkpoint, Domain, Epoch, EthSpec, Hash256, IndexedAttestation, + Keypair, ProposerSlashing, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, + SignedBeaconBlockHash, SignedRoot, SignedVoluntaryExit, Slot, SubnetId, VariableList, + VoluntaryExit, }; pub use types::test_utils::generate_deterministic_keypairs; @@ -129,7 +131,7 @@ impl<E: EthSpec> BeaconChainHarness<BlockingMigratorEphemeralHarnessType<E>> { let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); let drain = slog_term::FullFormat::new(decorator).build(); - let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug); + let debug_level = slog::LevelFilter::new(drain, slog::Level::Critical); let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!()); let config = StoreConfig::default(); @@ -193,7 +195,7 @@ impl<E: EthSpec> BeaconChainHarness<NullMigratorEphemeralHarnessType<E>> { let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); let drain = slog_term::FullFormat::new(decorator).build(); - let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug); + let debug_level = slog::LevelFilter::new(drain, slog::Level::Critical); let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!()); let store = HotColdDB::open_ephemeral(config, spec.clone(), log.clone()).unwrap(); @@ -238,7 +240,7 @@ impl<E: EthSpec> BeaconChainHarness<BlockingMigratorDiskHarnessType<E>> { let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); let drain = slog_term::FullFormat::new(decorator).build(); - let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug); + let debug_level = slog::LevelFilter::new(drain, slog::Level::Critical); let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!()); let chain = BeaconChainBuilder::new(eth_spec_instance) @@ -397,7 +399,7 @@ where // If we produce two blocks for the same slot, they hash up to the same value and // BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce // different blocks each time. - self.chain.set_graffiti(self.rng.gen::<[u8; 32]>()); + self.chain.set_graffiti(self.rng.gen::<[u8; 32]>().into()); let randao_reveal = { let epoch = slot.epoch(E::slots_per_epoch()); @@ -442,8 +444,8 @@ where let committee_count = state.get_committee_count_at_slot(state.slot).unwrap(); state - .get_beacon_committees_at_slot(state.slot) - .unwrap() + .get_beacon_committees_at_slot(attestation_slot) + .expect("should get committees") .iter() .map(|bc| { bc.committee @@ -570,7 +572,6 @@ where let aggregate = self .chain .get_aggregated_attestation(&attestation.data) - .unwrap() .unwrap_or_else(|| { committee_attestations.iter().skip(1).fold(attestation.clone(), |mut agg, (att, _)| { agg.aggregate(att); @@ -601,6 +602,94 @@ where .collect() } + pub fn make_attester_slashing(&self, validator_indices: Vec<u64>) -> AttesterSlashing<E> { + let mut attestation_1 = IndexedAttestation { + attesting_indices: VariableList::new(validator_indices).unwrap(), + data: AttestationData { + slot: Slot::new(0), + index: 0, + beacon_block_root: Hash256::zero(), + target: Checkpoint { + root: Hash256::zero(), + epoch: Epoch::new(0), + }, + source: Checkpoint { + root: Hash256::zero(), + epoch: Epoch::new(0), + }, + }, + signature: AggregateSignature::infinity(), + }; + + let mut attestation_2 = attestation_1.clone(); + attestation_2.data.index += 1; + + for attestation in &mut [&mut attestation_1, &mut attestation_2] { + for &i in &attestation.attesting_indices { + let sk = &self.validators_keypairs[i as usize].sk; + + let fork = self.chain.head_info().unwrap().fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + let domain = self.chain.spec.get_domain( + attestation.data.target.epoch, + Domain::BeaconAttester, + &fork, + genesis_validators_root, + ); + let message = attestation.data.signing_root(domain); + + attestation.signature.add_assign(&sk.sign(message)); + } + } + + AttesterSlashing { + attestation_1, + attestation_2, + } + } + + pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing { + let mut block_header_1 = self + .chain + .head_beacon_block() + .unwrap() + .message + .block_header(); + block_header_1.proposer_index = validator_index; + + let mut block_header_2 = block_header_1.clone(); + block_header_2.state_root = Hash256::zero(); + + let sk = &self.validators_keypairs[validator_index as usize].sk; + let fork = self.chain.head_info().unwrap().fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + let mut signed_block_headers = vec![block_header_1, block_header_2] + .into_iter() + .map(|block_header| { + block_header.sign::<E>(&sk, &fork, genesis_validators_root, &self.chain.spec) + }) + .collect::<Vec<_>>(); + + ProposerSlashing { + signed_header_2: signed_block_headers.remove(1), + signed_header_1: signed_block_headers.remove(0), + } + } + + pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit { + let sk = &self.validators_keypairs[validator_index as usize].sk; + let fork = self.chain.head_info().unwrap().fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + VoluntaryExit { + epoch, + validator_index, + } + .sign(sk, &fork, genesis_validators_root, &self.chain.spec) + } + pub fn process_block(&self, slot: Slot, block: SignedBeaconBlock<E>) -> SignedBeaconBlockHash { assert_eq!(self.chain.slot().unwrap(), slot); let block_hash: SignedBeaconBlockHash = self.chain.process_block(block).unwrap().into(); @@ -612,7 +701,10 @@ where for (unaggregated_attestations, maybe_signed_aggregate) in attestations.into_iter() { for (attestation, subnet_id) in unaggregated_attestations { self.chain - .verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id) + .verify_unaggregated_attestation_for_gossip( + attestation.clone(), + Some(subnet_id), + ) .unwrap() .add_to_pool(&self.chain) .unwrap(); diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 937850751c3..35c87c0d97f 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -570,7 +570,7 @@ fn unaggregated_gossip_verification() { matches!( harness .chain - .verify_unaggregated_attestation_for_gossip($attn_getter, $subnet_getter) + .verify_unaggregated_attestation_for_gossip($attn_getter, Some($subnet_getter)) .err() .expect(&format!( "{} should error during verify_unaggregated_attestation_for_gossip", @@ -837,7 +837,7 @@ fn unaggregated_gossip_verification() { harness .chain - .verify_unaggregated_attestation_for_gossip(valid_attestation.clone(), subnet_id) + .verify_unaggregated_attestation_for_gossip(valid_attestation.clone(), Some(subnet_id)) .expect("valid attestation should be verified"); /* @@ -926,6 +926,6 @@ fn attestation_that_skips_epochs() { harness .chain - .verify_unaggregated_attestation_for_gossip(attestation, subnet_id) + .verify_unaggregated_attestation_for_gossip(attestation, Some(subnet_id)) .expect("should gossip verify attestation that skips slots"); } diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index caa2f9d6cfc..e9006a6268d 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -326,7 +326,7 @@ fn epoch_boundary_state_attestation_processing() { let res = harness .chain - .verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id); + .verify_unaggregated_attestation_for_gossip(attestation.clone(), Some(subnet_id)); let current_slot = harness.chain.slot().expect("should get slot"); let expected_attestation_slot = attestation.data.slot; diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 12f1c4364a4..721eb409167 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -463,7 +463,7 @@ fn attestations_with_increasing_slots() { for (attestation, subnet_id) in attestations.into_iter().flatten() { let res = harness .chain - .verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id); + .verify_unaggregated_attestation_for_gossip(attestation.clone(), Some(subnet_id)); let current_slot = harness.chain.slot().expect("should get slot"); let expected_attestation_slot = attestation.data.slot; diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index ba98eb946d4..797d7adb430 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -14,7 +14,6 @@ store = { path = "../store" } network = { path = "../network" } timer = { path = "../timer" } eth2_libp2p = { path = "../eth2_libp2p" } -rest_api = { path = "../rest_api" } parking_lot = "0.11.0" websocket_server = { path = "../websocket_server" } prometheus = "0.9.0" @@ -42,3 +41,5 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } time = "0.2.16" bus = "2.2.3" directory = {path = "../../common/directory"} +http_api = { path = "../http_api" } +http_metrics = { path = "../http_metrics" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 15cd97ea8e4..05cc6aa6d7b 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -13,15 +13,14 @@ use beacon_chain::{ use bus::Bus; use environment::RuntimeContext; use eth1::{Config as Eth1Config, Service as Eth1Service}; -use eth2_config::Eth2Config; use eth2_libp2p::NetworkGlobals; use genesis::{interop_genesis_state, Eth1GenesisService}; use network::{NetworkConfig, NetworkMessage, NetworkService}; use parking_lot::Mutex; -use slog::info; +use slog::{debug, info}; use ssz::Decode; use std::net::SocketAddr; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use timer::spawn_timer; @@ -61,7 +60,10 @@ pub struct ClientBuilder<T: BeaconChainTypes> { event_handler: Option<T::EventHandler>, network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>, network_send: Option<UnboundedSender<NetworkMessage<T::EthSpec>>>, - http_listen_addr: Option<SocketAddr>, + db_path: Option<PathBuf>, + freezer_db_path: Option<PathBuf>, + http_api_config: http_api::Config, + http_metrics_config: http_metrics::Config, websocket_listen_addr: Option<SocketAddr>, eth_spec_instance: T::EthSpec, } @@ -103,7 +105,10 @@ where event_handler: None, network_globals: None, network_send: None, - http_listen_addr: None, + db_path: None, + freezer_db_path: None, + http_api_config: <_>::default(), + http_metrics_config: <_>::default(), websocket_listen_addr: None, eth_spec_instance, } @@ -280,55 +285,16 @@ where Ok(self) } - /// Immediately starts the beacon node REST API http server. - pub fn http_server( - mut self, - client_config: &ClientConfig, - eth2_config: &Eth2Config, - events: Arc<Mutex<Bus<SignedBeaconBlockHash>>>, - ) -> Result<Self, String> { - let beacon_chain = self - .beacon_chain - .clone() - .ok_or_else(|| "http_server requires a beacon chain")?; - let context = self - .runtime_context - .as_ref() - .ok_or_else(|| "http_server requires a runtime_context")? - .service_context("http".into()); - let network_globals = self - .network_globals - .clone() - .ok_or_else(|| "http_server requires a libp2p network")?; - let network_send = self - .network_send - .clone() - .ok_or_else(|| "http_server requires a libp2p network sender")?; - - let network_info = rest_api::NetworkInfo { - network_globals, - network_chan: network_send, - }; - - let listening_addr = rest_api::start_server( - context.executor, - &client_config.rest_api, - beacon_chain, - network_info, - client_config - .create_db_path() - .map_err(|_| "unable to read data dir")?, - client_config - .create_freezer_db_path() - .map_err(|_| "unable to read freezer DB dir")?, - eth2_config.clone(), - events, - ) - .map_err(|e| format!("Failed to start HTTP API: {:?}", e))?; - - self.http_listen_addr = Some(listening_addr); + /// Provides configuration for the HTTP API. + pub fn http_api_config(mut self, config: http_api::Config) -> Self { + self.http_api_config = config; + self + } - Ok(self) + /// Provides configuration for the HTTP server that serves Prometheus metrics. + pub fn http_metrics_config(mut self, config: http_metrics::Config) -> Self { + self.http_metrics_config = config; + self } /// Immediately starts the service that periodically logs information each slot. @@ -367,25 +333,85 @@ where /// specified. /// /// If type inference errors are being raised, see the comment on the definition of `Self`. + #[allow(clippy::type_complexity)] pub fn build( self, - ) -> Client< - Witness< - TStoreMigrator, - TSlotClock, - TEth1Backend, - TEthSpec, - TEventHandler, - THotStore, - TColdStore, + ) -> Result< + Client< + Witness< + TStoreMigrator, + TSlotClock, + TEth1Backend, + TEthSpec, + TEventHandler, + THotStore, + TColdStore, + >, >, + String, > { - Client { + let runtime_context = self + .runtime_context + .as_ref() + .ok_or_else(|| "build requires a runtime context".to_string())?; + let log = runtime_context.log().clone(); + + let http_api_listen_addr = if self.http_api_config.enabled { + let ctx = Arc::new(http_api::Context { + config: self.http_api_config.clone(), + chain: self.beacon_chain.clone(), + network_tx: self.network_send.clone(), + network_globals: self.network_globals.clone(), + log: log.clone(), + }); + + let exit = runtime_context.executor.exit(); + + let (listen_addr, server) = http_api::serve(ctx, exit) + .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; + + runtime_context + .clone() + .executor + .spawn_without_exit(async move { server.await }, "http-api"); + + Some(listen_addr) + } else { + info!(log, "HTTP server is disabled"); + None + }; + + let http_metrics_listen_addr = if self.http_metrics_config.enabled { + let ctx = Arc::new(http_metrics::Context { + config: self.http_metrics_config.clone(), + chain: self.beacon_chain.clone(), + db_path: self.db_path.clone(), + freezer_db_path: self.freezer_db_path.clone(), + log: log.clone(), + }); + + let exit = runtime_context.executor.exit(); + + let (listen_addr, server) = http_metrics::serve(ctx, exit) + .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; + + runtime_context + .executor + .spawn_without_exit(async move { server.await }, "http-api"); + + Some(listen_addr) + } else { + debug!(log, "Metrics server is disabled"); + None + }; + + Ok(Client { beacon_chain: self.beacon_chain, network_globals: self.network_globals, - http_listen_addr: self.http_listen_addr, + http_api_listen_addr, + http_metrics_listen_addr, websocket_listen_addr: self.websocket_listen_addr, - } + }) } } @@ -520,6 +546,9 @@ where .clone() .ok_or_else(|| "disk_store requires a chain spec".to_string())?; + self.db_path = Some(hot_path.into()); + self.freezer_db_path = Some(cold_path.into()); + let store = HotColdDB::open(hot_path, cold_path, config, spec, context.log().clone()) .map_err(|e| format!("Unable to open database: {:?}", e))?; self.store = Some(Arc::new(store)); diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index fdcd3d6e819..0cf90d6b45d 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -62,10 +62,11 @@ pub struct Config { pub genesis: ClientGenesis, pub store: store::StoreConfig, pub network: network::NetworkConfig, - pub rest_api: rest_api::Config, pub chain: beacon_chain::ChainConfig, pub websocket_server: websocket_server::Config, pub eth1: eth1::Config, + pub http_api: http_api::Config, + pub http_metrics: http_metrics::Config, } impl Default for Config { @@ -79,7 +80,6 @@ impl Default for Config { store: <_>::default(), network: NetworkConfig::default(), chain: <_>::default(), - rest_api: <_>::default(), websocket_server: <_>::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), dummy_eth1_backend: false, @@ -87,6 +87,8 @@ impl Default for Config { eth1: <_>::default(), disabled_forks: Vec::new(), graffiti: Graffiti::default(), + http_api: <_>::default(), + http_metrics: <_>::default(), } } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index da670ff1344..6b721aee924 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -23,7 +23,10 @@ pub use eth2_config::Eth2Config; pub struct Client<T: BeaconChainTypes> { beacon_chain: Option<Arc<BeaconChain<T>>>, network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>, - http_listen_addr: Option<SocketAddr>, + /// Listen address for the standard eth2.0 API, if the service was started. + http_api_listen_addr: Option<SocketAddr>, + /// Listen address for the HTTP server which serves Prometheus metrics. + http_metrics_listen_addr: Option<SocketAddr>, websocket_listen_addr: Option<SocketAddr>, } @@ -33,9 +36,14 @@ impl<T: BeaconChainTypes> Client<T> { self.beacon_chain.clone() } - /// Returns the address of the client's HTTP API server, if it was started. - pub fn http_listen_addr(&self) -> Option<SocketAddr> { - self.http_listen_addr + /// Returns the address of the client's standard eth2.0 API server, if it was started. + pub fn http_api_listen_addr(&self) -> Option<SocketAddr> { + self.http_api_listen_addr + } + + /// Returns the address of the client's HTTP Prometheus metrics server, if it was started. + pub fn http_metrics_listen_addr(&self) -> Option<SocketAddr> { + self.http_metrics_listen_addr } /// Returns the address of the client's WebSocket API server, if it was started. diff --git a/beacon_node/eth1/src/http.rs b/beacon_node/eth1/src/http.rs index 6dffdaa7c6d..e8f7d23a026 100644 --- a/beacon_node/eth1/src/http.rs +++ b/beacon_node/eth1/src/http.rs @@ -39,19 +39,34 @@ pub enum Eth1NetworkId { Custom(u64), } +impl Into<u64> for Eth1NetworkId { + fn into(self) -> u64 { + match self { + Eth1NetworkId::Mainnet => 1, + Eth1NetworkId::Goerli => 5, + Eth1NetworkId::Custom(id) => id, + } + } +} + +impl From<u64> for Eth1NetworkId { + fn from(id: u64) -> Self { + let into = |x: Eth1NetworkId| -> u64 { x.into() }; + match id { + id if id == into(Eth1NetworkId::Mainnet) => Eth1NetworkId::Mainnet, + id if id == into(Eth1NetworkId::Goerli) => Eth1NetworkId::Goerli, + id => Eth1NetworkId::Custom(id), + } + } +} + impl FromStr for Eth1NetworkId { type Err = String; fn from_str(s: &str) -> Result<Self, Self::Err> { - match s { - "1" => Ok(Eth1NetworkId::Mainnet), - "5" => Ok(Eth1NetworkId::Goerli), - custom => { - let network_id = u64::from_str_radix(custom, 10) - .map_err(|e| format!("Failed to parse eth1 network id {}", e))?; - Ok(Eth1NetworkId::Custom(network_id)) - } - } + u64::from_str_radix(s, 10) + .map(Into::into) + .map_err(|e| format!("Failed to parse eth1 network id {}", e)) } } diff --git a/beacon_node/eth1/src/lib.rs b/beacon_node/eth1/src/lib.rs index f5f018bd17b..a7aba85a28a 100644 --- a/beacon_node/eth1/src/lib.rs +++ b/beacon_node/eth1/src/lib.rs @@ -13,4 +13,6 @@ pub use block_cache::{BlockCache, Eth1Block}; pub use deposit_cache::DepositCache; pub use deposit_log::DepositLog; pub use inner::SszEth1Cache; -pub use service::{BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service}; +pub use service::{ + BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service, DEFAULT_NETWORK_ID, +}; diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/http_api/Cargo.toml similarity index 51% rename from beacon_node/rest_api/Cargo.toml rename to beacon_node/http_api/Cargo.toml index 38a5a1e7d55..828d26deb3d 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -1,50 +1,34 @@ [package] -name = "rest_api" -version = "0.2.0" -authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>", "Luke Anderson <luke@sigmaprime.io>"] +name = "http_api" +version = "0.1.0" +authors = ["Paul Hauner <paul@paulhauner.com>"] edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + [dependencies] -bls = { path = "../../crypto/bls" } -rest_types = { path = "../../common/rest_types" } +warp = "0.2.5" +serde = { version = "1.0.110", features = ["derive"] } +tokio = { version = "0.2.21", features = ["sync"] } +parking_lot = "0.11.0" +types = { path = "../../consensus/types" } +hex = "0.4.2" beacon_chain = { path = "../beacon_chain" } +eth2 = { path = "../../common/eth2", features = ["lighthouse"] } +slog = "2.5.2" network = { path = "../network" } eth2_libp2p = { path = "../eth2_libp2p" } -store = { path = "../store" } -serde = { version = "1.0.110", features = ["derive"] } -serde_json = "1.0.52" -serde_yaml = "0.8.11" -slog = "2.5.2" -slog-term = "2.5.0" -slog-async = "2.5.0" -eth2_ssz = "0.1.2" -eth2_ssz_derive = "0.1.0" +eth1 = { path = "../eth1" } +fork_choice = { path = "../../consensus/fork_choice" } state_processing = { path = "../../consensus/state_processing" } -types = { path = "../../consensus/types" } -http = "0.2.1" -hyper = "0.13.5" -tokio = { version = "0.2.21", features = ["sync"] } -url = "2.1.1" -lazy_static = "1.4.0" -eth2_config = { path = "../../common/eth2_config" } +lighthouse_version = { path = "../../common/lighthouse_version" } lighthouse_metrics = { path = "../../common/lighthouse_metrics" } +lazy_static = "1.4.0" +warp_utils = { path = "../../common/warp_utils" } slot_clock = { path = "../../common/slot_clock" } -hex = "0.4.2" -parking_lot = "0.11.0" -futures = "0.3.5" -operation_pool = { path = "../operation_pool" } -environment = { path = "../../lighthouse/environment" } -uhttp_sse = "0.5.1" -bus = "2.2.3" -itertools = "0.9.0" -lighthouse_version = { path = "../../common/lighthouse_version" } [dev-dependencies] -assert_matches = "1.3.0" -remote_beacon_node = { path = "../../common/remote_beacon_node" } -node_test_rig = { path = "../../testing/node_test_rig" } -tree_hash = "0.1.0" - -[features] -fake_crypto = [] +store = { path = "../store" } +environment = { path = "../../lighthouse/environment" } +tree_hash = { path = "../../consensus/tree_hash" } +discv5 = { version = "0.1.0-alpha.10", features = ["libp2p"] } diff --git a/beacon_node/http_api/src/beacon_proposer_cache.rs b/beacon_node/http_api/src/beacon_proposer_cache.rs new file mode 100644 index 00000000000..b062119e578 --- /dev/null +++ b/beacon_node/http_api/src/beacon_proposer_cache.rs @@ -0,0 +1,185 @@ +use crate::metrics; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::types::ProposerData; +use fork_choice::ProtoBlock; +use slot_clock::SlotClock; +use state_processing::per_slot_processing; +use types::{BeaconState, Epoch, EthSpec, Hash256, PublicKeyBytes}; + +/// This sets a maximum bound on the number of epochs to skip whilst instantiating the cache for +/// the first time. +const EPOCHS_TO_SKIP: u64 = 2; + +/// Caches the beacon block proposers for a given `epoch` and `epoch_boundary_root`. +/// +/// This cache is only able to contain a single set of proposers and is only +/// intended to cache the proposers for the current epoch according to the head +/// of the chain. A change in epoch or re-org to a different chain may cause a +/// cache miss and rebuild. +pub struct BeaconProposerCache { + epoch: Epoch, + decision_block_root: Hash256, + proposers: Vec<ProposerData>, +} + +impl BeaconProposerCache { + /// Create a new cache for the current epoch of the `chain`. + pub fn new<T: BeaconChainTypes>(chain: &BeaconChain<T>) -> Result<Self, BeaconChainError> { + let head_root = chain.head_beacon_block_root()?; + let head_block = chain + .fork_choice + .read() + .get_block(&head_root) + .ok_or_else(|| BeaconChainError::MissingBeaconBlock(head_root))?; + + // If the head epoch is more than `EPOCHS_TO_SKIP` in the future, just build the cache at + // the epoch of the head. This prevents doing a massive amount of skip slots when starting + // a new database from genesis. + let epoch = { + let epoch_now = chain + .epoch() + .unwrap_or_else(|_| chain.spec.genesis_slot.epoch(T::EthSpec::slots_per_epoch())); + let head_epoch = head_block.slot.epoch(T::EthSpec::slots_per_epoch()); + if epoch_now > head_epoch + EPOCHS_TO_SKIP { + head_epoch + } else { + epoch_now + } + }; + + Self::for_head_block(chain, epoch, head_root, head_block) + } + + /// Create a new cache that contains the shuffling for `current_epoch`, + /// assuming that `head_root` and `head_block` represents the most recent + /// canonical block. + fn for_head_block<T: BeaconChainTypes>( + chain: &BeaconChain<T>, + current_epoch: Epoch, + head_root: Hash256, + head_block: ProtoBlock, + ) -> Result<Self, BeaconChainError> { + let _timer = metrics::start_timer(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_TIMES); + + let mut head_state = chain + .get_state(&head_block.state_root, Some(head_block.slot))? + .ok_or_else(|| BeaconChainError::MissingBeaconState(head_block.state_root))?; + + let decision_block_root = Self::decision_block_root(current_epoch, head_root, &head_state)?; + + // We *must* skip forward to the current epoch to obtain valid proposer + // duties. We cannot skip to the previous epoch, like we do with + // attester duties. + while head_state.current_epoch() < current_epoch { + // Skip slots until the current epoch, providing `Hash256::zero()` as the state root + // since we don't require it to be valid to identify producers. + per_slot_processing(&mut head_state, Some(Hash256::zero()), &chain.spec)?; + } + + let proposers = current_epoch + .slot_iter(T::EthSpec::slots_per_epoch()) + .map(|slot| { + head_state + .get_beacon_proposer_index(slot, &chain.spec) + .map_err(BeaconChainError::from) + .and_then(|i| { + let pubkey = chain + .validator_pubkey(i)? + .ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheIncomplete(i))?; + + Ok(ProposerData { + pubkey: PublicKeyBytes::from(pubkey), + slot, + }) + }) + }) + .collect::<Result<_, _>>()?; + + Ok(Self { + epoch: current_epoch, + decision_block_root, + proposers, + }) + } + + /// Returns a block root which can be used to key the shuffling obtained from the following + /// parameters: + /// + /// - `shuffling_epoch`: the epoch for which the shuffling pertains. + /// - `head_block_root`: the block root at the head of the chain. + /// - `head_block_state`: the state of `head_block_root`. + pub fn decision_block_root<E: EthSpec>( + shuffling_epoch: Epoch, + head_block_root: Hash256, + head_block_state: &BeaconState<E>, + ) -> Result<Hash256, BeaconChainError> { + let decision_slot = shuffling_epoch + .start_slot(E::slots_per_epoch()) + .saturating_sub(1_u64); + + // If decision slot is equal to or ahead of the head, the block root is the head block root + if decision_slot >= head_block_state.slot { + Ok(head_block_root) + } else { + head_block_state + .get_block_root(decision_slot) + .map(|root| *root) + .map_err(Into::into) + } + } + + /// Return the proposers for the given `Epoch`. + /// + /// The cache may be rebuilt if: + /// + /// - The epoch has changed since the last cache build. + /// - There has been a re-org that crosses an epoch boundary. + pub fn get_proposers<T: BeaconChainTypes>( + &mut self, + chain: &BeaconChain<T>, + epoch: Epoch, + ) -> Result<Vec<ProposerData>, warp::Rejection> { + let current_epoch = chain + .slot_clock + .now_or_genesis() + .ok_or_else(|| { + warp_utils::reject::custom_server_error("unable to read slot clock".to_string()) + })? + .epoch(T::EthSpec::slots_per_epoch()); + + // Disallow requests that are outside the current epoch. This ensures the cache doesn't get + // washed-out with old values. + if current_epoch != epoch { + return Err(warp_utils::reject::custom_bad_request(format!( + "requested epoch is {} but only current epoch {} is allowed", + epoch, current_epoch + ))); + } + + let (head_block_root, head_decision_block_root) = chain + .with_head(|head| { + Self::decision_block_root(current_epoch, head.beacon_block_root, &head.beacon_state) + .map(|decision_root| (head.beacon_block_root, decision_root)) + }) + .map_err(warp_utils::reject::beacon_chain_error)?; + + let head_block = chain + .fork_choice + .read() + .get_block(&head_block_root) + .ok_or_else(|| BeaconChainError::MissingBeaconBlock(head_block_root)) + .map_err(warp_utils::reject::beacon_chain_error)?; + + // Rebuild the cache if this call causes a cache-miss. + if self.epoch != current_epoch || self.decision_block_root != head_decision_block_root { + metrics::inc_counter(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_MISSES_TOTAL); + + *self = Self::for_head_block(chain, current_epoch, head_block_root, head_block) + .map_err(warp_utils::reject::beacon_chain_error)?; + } else { + metrics::inc_counter(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_HITS_TOTAL); + } + + Ok(self.proposers.clone()) + } +} diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs new file mode 100644 index 00000000000..5e358a2d683 --- /dev/null +++ b/beacon_node/http_api/src/block_id.rs @@ -0,0 +1,87 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::BlockId as CoreBlockId; +use std::str::FromStr; +use types::{Hash256, SignedBeaconBlock, Slot}; + +/// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given +/// `BlockId`. +#[derive(Debug)] +pub struct BlockId(pub CoreBlockId); + +impl BlockId { + pub fn from_slot(slot: Slot) -> Self { + Self(CoreBlockId::Slot(slot)) + } + + pub fn from_root(root: Hash256) -> Self { + Self(CoreBlockId::Root(root)) + } + + /// Return the block root identified by `self`. + pub fn root<T: BeaconChainTypes>( + &self, + chain: &BeaconChain<T>, + ) -> Result<Hash256, warp::Rejection> { + match &self.0 { + CoreBlockId::Head => chain + .head_info() + .map(|head| head.block_root) + .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Genesis => Ok(chain.genesis_block_root), + CoreBlockId::Finalized => chain + .head_info() + .map(|head| head.finalized_checkpoint.root) + .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Justified => chain + .head_info() + .map(|head| head.current_justified_checkpoint.root) + .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Slot(slot) => chain + .block_root_at_slot(*slot) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|root_opt| { + root_opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block at slot {}", + slot + )) + }) + }), + CoreBlockId::Root(root) => Ok(*root), + } + } + + /// Return the `SignedBeaconBlock` identified by `self`. + pub fn block<T: BeaconChainTypes>( + &self, + chain: &BeaconChain<T>, + ) -> Result<SignedBeaconBlock<T::EthSpec>, warp::Rejection> { + match &self.0 { + CoreBlockId::Head => chain + .head_beacon_block() + .map_err(warp_utils::reject::beacon_chain_error), + _ => { + let root = self.root(chain)?; + chain + .get_block(&root) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|root_opt| { + root_opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + )) + }) + }) + } + } + } +} + +impl FromStr for BlockId { + type Err = String; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + CoreBlockId::from_str(s).map(Self) + } +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs new file mode 100644 index 00000000000..b23937b5df1 --- /dev/null +++ b/beacon_node/http_api/src/lib.rs @@ -0,0 +1,1749 @@ +//! This crate contains a HTTP server which serves the endpoints listed here: +//! +//! https://github.com/ethereum/eth2.0-APIs +//! +//! There are also some additional, non-standard endpoints behind the `/lighthouse/` path which are +//! used for development. + +mod beacon_proposer_cache; +mod block_id; +mod metrics; +mod state_id; +mod validator_inclusion; + +use beacon_chain::{ + observed_operations::ObservationOutcome, AttestationError as AttnError, BeaconChain, + BeaconChainError, BeaconChainTypes, +}; +use beacon_proposer_cache::BeaconProposerCache; +use block_id::BlockId; +use eth2::{ + types::{self as api_types, ValidatorId}, + StatusCode, +}; +use eth2_libp2p::{types::SyncState, NetworkGlobals, PubsubMessage}; +use lighthouse_version::version_with_platform; +use network::NetworkMessage; +use parking_lot::Mutex; +use serde::{Deserialize, Serialize}; +use slog::{crit, error, info, trace, warn, Logger}; +use slot_clock::SlotClock; +use state_id::StateId; +use state_processing::per_slot_processing; +use std::borrow::Cow; +use std::convert::TryInto; +use std::future::Future; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedSender; +use types::{ + Attestation, AttestationDuty, AttesterSlashing, CloneConfig, CommitteeCache, Epoch, EthSpec, + Hash256, ProposerSlashing, PublicKey, RelativeEpoch, SignedAggregateAndProof, + SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig, +}; +use warp::Filter; + +const API_PREFIX: &str = "eth"; +const API_VERSION: &str = "v1"; + +/// If the node is within this many epochs from the head, we declare it to be synced regardless of +/// the network sync state. +/// +/// This helps prevent attacks where nodes can convince us that we're syncing some non-existent +/// finalized head. +const SYNC_TOLERANCE_EPOCHS: u64 = 8; + +/// A wrapper around all the items required to spawn the HTTP server. +/// +/// The server will gracefully handle the case where any fields are `None`. +pub struct Context<T: BeaconChainTypes> { + pub config: Config, + pub chain: Option<Arc<BeaconChain<T>>>, + pub network_tx: Option<UnboundedSender<NetworkMessage<T::EthSpec>>>, + pub network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>, + pub log: Logger, +} + +/// Configuration for the HTTP server. +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct Config { + pub enabled: bool, + pub listen_addr: Ipv4Addr, + pub listen_port: u16, + pub allow_origin: Option<String>, +} + +impl Default for Config { + fn default() -> Self { + Self { + enabled: false, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 5052, + allow_origin: None, + } + } +} + +#[derive(Debug)] +pub enum Error { + Warp(warp::Error), + Other(String), +} + +impl From<warp::Error> for Error { + fn from(e: warp::Error) -> Self { + Error::Warp(e) + } +} + +impl From<String> for Error { + fn from(e: String) -> Self { + Error::Other(e) + } +} + +/// Creates a `warp` logging wrapper which we use to create `slog` logs. +pub fn slog_logging( + log: Logger, +) -> warp::filters::log::Log<impl Fn(warp::filters::log::Info) + Clone> { + warp::log::custom(move |info| { + match info.status() { + status if status == StatusCode::OK || status == StatusCode::NOT_FOUND => { + trace!( + log, + "Processed HTTP API request"; + "elapsed" => format!("{:?}", info.elapsed()), + "status" => status.to_string(), + "path" => info.path(), + "method" => info.method().to_string(), + ); + } + status => { + warn!( + log, + "Error processing HTTP API request"; + "elapsed" => format!("{:?}", info.elapsed()), + "status" => status.to_string(), + "path" => info.path(), + "method" => info.method().to_string(), + ); + } + }; + }) +} + +/// Creates a `warp` logging wrapper which we use for Prometheus metrics (not necessarily logging, +/// per say). +pub fn prometheus_metrics() -> warp::filters::log::Log<impl Fn(warp::filters::log::Info) + Clone> { + warp::log::custom(move |info| { + // Here we restrict the `info.path()` value to some predefined values. Without this, we end + // up with a new metric type each time someone includes something unique in the path (e.g., + // a block hash). + let path = { + let equals = |s: &'static str| -> Option<&'static str> { + if info.path() == format!("/{}/{}/{}", API_PREFIX, API_VERSION, s) { + Some(s) + } else { + None + } + }; + + let starts_with = |s: &'static str| -> Option<&'static str> { + if info + .path() + .starts_with(&format!("/{}/{}/{}", API_PREFIX, API_VERSION, s)) + { + Some(s) + } else { + None + } + }; + + equals("beacon/blocks") + .or_else(|| starts_with("validator/duties/attester")) + .or_else(|| starts_with("validator/duties/proposer")) + .or_else(|| starts_with("validator/attestation_data")) + .or_else(|| starts_with("validator/blocks")) + .or_else(|| starts_with("validator/aggregate_attestation")) + .or_else(|| starts_with("validator/aggregate_and_proofs")) + .or_else(|| starts_with("validator/beacon_committee_subscriptions")) + .or_else(|| starts_with("beacon/")) + .or_else(|| starts_with("config/")) + .or_else(|| starts_with("debug/")) + .or_else(|| starts_with("events/")) + .or_else(|| starts_with("node/")) + .or_else(|| starts_with("validator/")) + .unwrap_or("other") + }; + + metrics::inc_counter_vec(&metrics::HTTP_API_PATHS_TOTAL, &[path]); + metrics::inc_counter_vec( + &metrics::HTTP_API_STATUS_CODES_TOTAL, + &[&info.status().to_string()], + ); + metrics::observe_timer_vec(&metrics::HTTP_API_PATHS_TIMES, &[path], info.elapsed()); + }) +} + +/// Creates a server that will serve requests using information from `ctx`. +/// +/// The server will shut down gracefully when the `shutdown` future resolves. +/// +/// ## Returns +/// +/// This function will bind the server to the provided address and then return a tuple of: +/// +/// - `SocketAddr`: the address that the HTTP server will listen on. +/// - `Future`: the actual server future that will need to be awaited. +/// +/// ## Errors +/// +/// Returns an error if the server is unable to bind or there is another error during +/// configuration. +pub fn serve<T: BeaconChainTypes>( + ctx: Arc<Context<T>>, + shutdown: impl Future<Output = ()> + Send + Sync + 'static, +) -> Result<(SocketAddr, impl Future<Output = ()>), Error> { + let config = ctx.config.clone(); + let log = ctx.log.clone(); + let allow_origin = config.allow_origin.clone(); + + // Sanity check. + if !config.enabled { + crit!(log, "Cannot start disabled HTTP server"); + return Err(Error::Other( + "A disabled server should not be started".to_string(), + )); + } + + let eth1_v1 = warp::path(API_PREFIX).and(warp::path(API_VERSION)); + + // Instantiate the beacon proposer cache. + let beacon_proposer_cache = ctx + .chain + .as_ref() + .map(|chain| BeaconProposerCache::new(&chain)) + .transpose() + .map_err(|e| format!("Unable to initialize beacon proposer cache: {:?}", e))? + .map(Mutex::new) + .map(Arc::new); + + // Create a `warp` filter that provides access to the proposer cache. + let beacon_proposer_cache = || { + warp::any() + .map(move || beacon_proposer_cache.clone()) + .and_then(|beacon_proposer_cache| async move { + match beacon_proposer_cache { + Some(cache) => Ok(cache), + None => Err(warp_utils::reject::custom_not_found( + "Beacon proposer cache is not initialized.".to_string(), + )), + } + }) + }; + + // Create a `warp` filter that provides access to the network globals. + let inner_network_globals = ctx.network_globals.clone(); + let network_globals = warp::any() + .map(move || inner_network_globals.clone()) + .and_then(|network_globals| async move { + match network_globals { + Some(globals) => Ok(globals), + None => Err(warp_utils::reject::custom_not_found( + "network globals are not initialized.".to_string(), + )), + } + }); + + // Create a `warp` filter that provides access to the beacon chain. + let inner_ctx = ctx.clone(); + let chain_filter = + warp::any() + .map(move || inner_ctx.chain.clone()) + .and_then(|chain| async move { + match chain { + Some(chain) => Ok(chain), + None => Err(warp_utils::reject::custom_not_found( + "Beacon chain genesis has not yet been observed.".to_string(), + )), + } + }); + + // Create a `warp` filter that provides access to the network sender channel. + let inner_ctx = ctx.clone(); + let network_tx_filter = warp::any() + .map(move || inner_ctx.network_tx.clone()) + .and_then(|network_tx| async move { + match network_tx { + Some(network_tx) => Ok(network_tx), + None => Err(warp_utils::reject::custom_not_found( + "The networking stack has not yet started.".to_string(), + )), + } + }); + + // Create a `warp` filter that rejects request whilst the node is syncing. + let not_while_syncing_filter = warp::any() + .and(network_globals.clone()) + .and(chain_filter.clone()) + .and_then( + |network_globals: Arc<NetworkGlobals<T::EthSpec>>, chain: Arc<BeaconChain<T>>| async move { + match *network_globals.sync_state.read() { + SyncState::SyncingFinalized { head_slot, .. } => { + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or_else(|| { + warp_utils::reject::custom_server_error( + "unable to read slot clock".to_string(), + ) + })?; + + let tolerance = SYNC_TOLERANCE_EPOCHS * T::EthSpec::slots_per_epoch(); + + if head_slot + tolerance >= current_slot { + Ok(()) + } else { + Err(warp_utils::reject::not_synced(format!( + "head slot is {}, current slot is {}", + head_slot, current_slot + ))) + } + } + SyncState::SyncingHead { .. } => Ok(()), + SyncState::Synced => Ok(()), + SyncState::Stalled => Err(warp_utils::reject::not_synced( + "sync is stalled".to_string(), + )), + } + }, + ) + .untuple_one(); + + // Create a `warp` filter that provides access to the logger. + let log_filter = warp::any().map(move || ctx.log.clone()); + + /* + * + * Start of HTTP method definitions. + * + */ + + // GET beacon/genesis + let get_beacon_genesis = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("genesis")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + chain + .head_info() + .map_err(warp_utils::reject::beacon_chain_error) + .map(|head| api_types::GenesisData { + genesis_time: head.genesis_time, + genesis_validators_root: head.genesis_validators_root, + genesis_fork_version: chain.spec.genesis_fork_version, + }) + .map(api_types::GenericResponse::from) + }) + }); + + /* + * beacon/states/{state_id} + */ + + let beacon_states_path = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("states")) + .and(warp::path::param::<StateId>()) + .and(chain_filter.clone()); + + // GET beacon/states/{state_id}/root + let get_beacon_state_root = beacon_states_path + .clone() + .and(warp::path("root")) + .and(warp::path::end()) + .and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + state_id + .root(&chain) + .map(api_types::RootData::from) + .map(api_types::GenericResponse::from) + }) + }); + + // GET beacon/states/{state_id}/fork + let get_beacon_state_fork = beacon_states_path + .clone() + .and(warp::path("fork")) + .and(warp::path::end()) + .and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || state_id.fork(&chain).map(api_types::GenericResponse::from)) + }); + + // GET beacon/states/{state_id}/finality_checkpoints + let get_beacon_state_finality_checkpoints = beacon_states_path + .clone() + .and(warp::path("finality_checkpoints")) + .and(warp::path::end()) + .and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + state_id + .map_state(&chain, |state| { + Ok(api_types::FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint, + current_justified: state.current_justified_checkpoint, + finalized: state.finalized_checkpoint, + }) + }) + .map(api_types::GenericResponse::from) + }) + }); + + // GET beacon/states/{state_id}/validators + let get_beacon_state_validators = beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::end()) + .and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + state_id + .map_state(&chain, |state| { + let epoch = state.current_epoch(); + let finalized_epoch = state.finalized_checkpoint.epoch; + let far_future_epoch = chain.spec.far_future_epoch; + + Ok(state + .validators + .iter() + .zip(state.balances.iter()) + .enumerate() + .map(|(index, (validator, balance))| api_types::ValidatorData { + index: index as u64, + balance: *balance, + status: api_types::ValidatorStatus::from_validator( + Some(validator), + epoch, + finalized_epoch, + far_future_epoch, + ), + validator: validator.clone(), + }) + .collect::<Vec<_>>()) + }) + .map(api_types::GenericResponse::from) + }) + }); + + // GET beacon/states/{state_id}/validators/{validator_id} + let get_beacon_state_validators_id = beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::param::<ValidatorId>()) + .and(warp::path::end()) + .and_then( + |state_id: StateId, chain: Arc<BeaconChain<T>>, validator_id: ValidatorId| { + blocking_json_task(move || { + state_id + .map_state(&chain, |state| { + let index_opt = match &validator_id { + ValidatorId::PublicKey(pubkey) => { + state.validators.iter().position(|v| v.pubkey == *pubkey) + } + ValidatorId::Index(index) => Some(*index as usize), + }; + + index_opt + .and_then(|index| { + let validator = state.validators.get(index)?; + let balance = *state.balances.get(index)?; + let epoch = state.current_epoch(); + let finalized_epoch = state.finalized_checkpoint.epoch; + let far_future_epoch = chain.spec.far_future_epoch; + + Some(api_types::ValidatorData { + index: index as u64, + balance, + status: api_types::ValidatorStatus::from_validator( + Some(validator), + epoch, + finalized_epoch, + far_future_epoch, + ), + validator: validator.clone(), + }) + }) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "unknown validator: {}", + validator_id + )) + }) + }) + .map(api_types::GenericResponse::from) + }) + }, + ); + + // GET beacon/states/{state_id}/committees/{epoch} + let get_beacon_state_committees = beacon_states_path + .clone() + .and(warp::path("committees")) + .and(warp::path::param::<Epoch>()) + .and(warp::query::<api_types::CommitteesQuery>()) + .and(warp::path::end()) + .and_then( + |state_id: StateId, + chain: Arc<BeaconChain<T>>, + epoch: Epoch, + query: api_types::CommitteesQuery| { + blocking_json_task(move || { + state_id.map_state(&chain, |state| { + let relative_epoch = + RelativeEpoch::from_epoch(state.current_epoch(), epoch).map_err( + |_| { + warp_utils::reject::custom_bad_request(format!( + "state is epoch {} and only previous, current and next epochs are supported", + state.current_epoch() + )) + }, + )?; + + let committee_cache = if state + .committee_cache_is_initialized(relative_epoch) + { + state.committee_cache(relative_epoch).map(Cow::Borrowed) + } else { + CommitteeCache::initialized(state, epoch, &chain.spec).map(Cow::Owned) + } + .map_err(BeaconChainError::BeaconStateError) + .map_err(warp_utils::reject::beacon_chain_error)?; + + // Use either the supplied slot or all slots in the epoch. + let slots = query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { + epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() + }); + + // Use either the supplied committee index or all available indices. + let indices = query.index.map(|index| vec![index]).unwrap_or_else(|| { + (0..committee_cache.committees_per_slot()).collect() + }); + + let mut response = Vec::with_capacity(slots.len() * indices.len()); + + for slot in slots { + // It is not acceptable to query with a slot that is not within the + // specified epoch. + if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { + return Err(warp_utils::reject::custom_bad_request(format!( + "{} is not in epoch {}", + slot, epoch + ))); + } + + for &index in &indices { + let committee = committee_cache + .get_beacon_committee(slot, index) + .ok_or_else(|| { + warp_utils::reject::custom_bad_request(format!( + "committee index {} does not exist in epoch {}", + index, epoch + )) + })?; + + response.push(api_types::CommitteeData { + index, + slot, + validators: committee + .committee + .iter() + .map(|i| *i as u64) + .collect(), + }); + } + } + + Ok(api_types::GenericResponse::from(response)) + }) + }) + }, + ); + + // GET beacon/headers + // + // Note: this endpoint only returns information about blocks in the canonical chain. Given that + // there's a `canonical` flag on the response, I assume it should also return non-canonical + // things. Returning non-canonical things is hard for us since we don't already have a + // mechanism for arbitrary forwards block iteration, we only support iterating forwards along + // the canonical chain. + let get_beacon_headers = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("headers")) + .and(warp::query::<api_types::HeadersQuery>()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then( + |query: api_types::HeadersQuery, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + let (root, block) = match (query.slot, query.parent_root) { + // No query parameters, return the canonical head block. + (None, None) => chain + .head_beacon_block() + .map_err(warp_utils::reject::beacon_chain_error) + .map(|block| (block.canonical_root(), block))?, + // Only the parent root parameter, do a forwards-iterator lookup. + (None, Some(parent_root)) => { + let parent = BlockId::from_root(parent_root).block(&chain)?; + let (root, _slot) = chain + .forwards_iter_block_roots(parent.slot()) + .map_err(warp_utils::reject::beacon_chain_error)? + // Ignore any skip-slots immediately following the parent. + .find(|res| { + res.as_ref().map_or(false, |(root, _)| *root != parent_root) + }) + .transpose() + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "child of block with root {}", + parent_root + )) + })?; + + BlockId::from_root(root) + .block(&chain) + .map(|block| (root, block))? + } + // Slot is supplied, search by slot and optionally filter by + // parent root. + (Some(slot), parent_root_opt) => { + let root = BlockId::from_slot(slot).root(&chain)?; + let block = BlockId::from_root(root).block(&chain)?; + + // If the parent root was supplied, check that it matches the block + // obtained via a slot lookup. + if let Some(parent_root) = parent_root_opt { + if block.parent_root() != parent_root { + return Err(warp_utils::reject::custom_not_found(format!( + "no canonical block at slot {} with parent root {}", + slot, parent_root + ))); + } + } + + (root, block) + } + }; + + let data = api_types::BlockHeaderData { + root, + canonical: true, + header: api_types::BlockHeaderAndSignature { + message: block.message.block_header(), + signature: block.signature.into(), + }, + }; + + Ok(api_types::GenericResponse::from(vec![data])) + }) + }, + ); + + // GET beacon/headers/{block_id} + let get_beacon_headers_block_id = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("headers")) + .and(warp::path::param::<BlockId>()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|block_id: BlockId, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + let root = block_id.root(&chain)?; + let block = BlockId::from_root(root).block(&chain)?; + + let canonical = chain + .block_root_at_slot(block.slot()) + .map_err(warp_utils::reject::beacon_chain_error)? + .map_or(false, |canonical| root == canonical); + + let data = api_types::BlockHeaderData { + root, + canonical, + header: api_types::BlockHeaderAndSignature { + message: block.message.block_header(), + signature: block.signature.into(), + }, + }; + + Ok(api_types::GenericResponse::from(data)) + }) + }); + + /* + * beacon/blocks + */ + + // POST beacon/blocks/{block_id} + let post_beacon_blocks = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .and_then( + |block: SignedBeaconBlock<T::EthSpec>, + chain: Arc<BeaconChain<T>>, + network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>, + log: Logger| { + blocking_json_task(move || { + // Send the block, regardless of whether or not it is valid. The API + // specification is very clear that this is the desired behaviour. + publish_pubsub_message( + &network_tx, + PubsubMessage::BeaconBlock(Box::new(block.clone())), + )?; + + match chain.process_block(block.clone()) { + Ok(root) => { + info!( + log, + "Valid block from HTTP API"; + "root" => format!("{}", root) + ); + + // Update the head since it's likely this block will become the new + // head. + chain + .fork_choice() + .map_err(warp_utils::reject::beacon_chain_error)?; + + Ok(()) + } + Err(e) => { + let msg = format!("{:?}", e); + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::broadcast_without_import(msg)) + } + } + }) + }, + ); + + let beacon_blocks_path = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::path::param::<BlockId>()) + .and(chain_filter.clone()); + + // GET beacon/blocks/{block_id} + let get_beacon_block = beacon_blocks_path.clone().and(warp::path::end()).and_then( + |block_id: BlockId, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || block_id.block(&chain).map(api_types::GenericResponse::from)) + }, + ); + + // GET beacon/blocks/{block_id}/root + let get_beacon_block_root = beacon_blocks_path + .clone() + .and(warp::path("root")) + .and(warp::path::end()) + .and_then(|block_id: BlockId, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + block_id + .root(&chain) + .map(api_types::RootData::from) + .map(api_types::GenericResponse::from) + }) + }); + + // GET beacon/blocks/{block_id}/attestations + let get_beacon_block_attestations = beacon_blocks_path + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and_then(|block_id: BlockId, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + block_id + .block(&chain) + .map(|block| block.message.body.attestations) + .map(api_types::GenericResponse::from) + }) + }); + + /* + * beacon/pool + */ + + let beacon_pool_path = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("pool")) + .and(chain_filter.clone()); + + // POST beacon/pool/attestations + let post_beacon_pool_attestations = beacon_pool_path + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc<BeaconChain<T>>, + attestation: Attestation<T::EthSpec>, + network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| { + blocking_json_task(move || { + let attestation = chain + .verify_unaggregated_attestation_for_gossip(attestation.clone(), None) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + publish_pubsub_message( + &network_tx, + PubsubMessage::Attestation(Box::new(( + attestation.subnet_id(), + attestation.attestation().clone(), + ))), + )?; + + chain + .apply_attestation_to_fork_choice(&attestation) + .map_err(|e| { + warp_utils::reject::broadcast_without_import(format!( + "not applied to fork choice: {:?}", + e + )) + })?; + + chain + .add_to_naive_aggregation_pool(attestation) + .map_err(|e| { + warp_utils::reject::broadcast_without_import(format!( + "not applied to naive aggregation pool: {:?}", + e + )) + })?; + + Ok(()) + }) + }, + ); + + // GET beacon/pool/attestations + let get_beacon_pool_attestations = beacon_pool_path + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and_then(|chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + let mut attestations = chain.op_pool.get_all_attestations(); + attestations.extend(chain.naive_aggregation_pool.read().iter().cloned()); + Ok(api_types::GenericResponse::from(attestations)) + }) + }); + + // POST beacon/pool/attester_slashings + let post_beacon_pool_attester_slashings = beacon_pool_path + .clone() + .and(warp::path("attester_slashings")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc<BeaconChain<T>>, + slashing: AttesterSlashing<T::EthSpec>, + network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| { + blocking_json_task(move || { + let outcome = chain + .verify_attester_slashing_for_gossip(slashing.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + if let ObservationOutcome::New(slashing) = outcome { + publish_pubsub_message( + &network_tx, + PubsubMessage::AttesterSlashing(Box::new( + slashing.clone().into_inner(), + )), + )?; + + chain + .import_attester_slashing(slashing) + .map_err(warp_utils::reject::beacon_chain_error)?; + } + + Ok(()) + }) + }, + ); + + // GET beacon/pool/attester_slashings + let get_beacon_pool_attester_slashings = beacon_pool_path + .clone() + .and(warp::path("attester_slashings")) + .and(warp::path::end()) + .and_then(|chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + let attestations = chain.op_pool.get_all_attester_slashings(); + Ok(api_types::GenericResponse::from(attestations)) + }) + }); + + // POST beacon/pool/proposer_slashings + let post_beacon_pool_proposer_slashings = beacon_pool_path + .clone() + .and(warp::path("proposer_slashings")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc<BeaconChain<T>>, + slashing: ProposerSlashing, + network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| { + blocking_json_task(move || { + let outcome = chain + .verify_proposer_slashing_for_gossip(slashing.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + if let ObservationOutcome::New(slashing) = outcome { + publish_pubsub_message( + &network_tx, + PubsubMessage::ProposerSlashing(Box::new( + slashing.clone().into_inner(), + )), + )?; + + chain.import_proposer_slashing(slashing); + } + + Ok(()) + }) + }, + ); + + // GET beacon/pool/proposer_slashings + let get_beacon_pool_proposer_slashings = beacon_pool_path + .clone() + .and(warp::path("proposer_slashings")) + .and(warp::path::end()) + .and_then(|chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + let attestations = chain.op_pool.get_all_proposer_slashings(); + Ok(api_types::GenericResponse::from(attestations)) + }) + }); + + // POST beacon/pool/voluntary_exits + let post_beacon_pool_voluntary_exits = beacon_pool_path + .clone() + .and(warp::path("voluntary_exits")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc<BeaconChain<T>>, + exit: SignedVoluntaryExit, + network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| { + blocking_json_task(move || { + let outcome = chain + .verify_voluntary_exit_for_gossip(exit.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + if let ObservationOutcome::New(exit) = outcome { + publish_pubsub_message( + &network_tx, + PubsubMessage::VoluntaryExit(Box::new(exit.clone().into_inner())), + )?; + + chain.import_voluntary_exit(exit); + } + + Ok(()) + }) + }, + ); + + // GET beacon/pool/voluntary_exits + let get_beacon_pool_voluntary_exits = beacon_pool_path + .clone() + .and(warp::path("voluntary_exits")) + .and(warp::path::end()) + .and_then(|chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + let attestations = chain.op_pool.get_all_voluntary_exits(); + Ok(api_types::GenericResponse::from(attestations)) + }) + }); + + /* + * config/fork_schedule + */ + + let config_path = eth1_v1.and(warp::path("config")); + + // GET config/fork_schedule + let get_config_fork_schedule = config_path + .clone() + .and(warp::path("fork_schedule")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + StateId::head() + .fork(&chain) + .map(|fork| api_types::GenericResponse::from(vec![fork])) + }) + }); + + // GET config/spec + let get_config_spec = config_path + .clone() + .and(warp::path("spec")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from(YamlConfig::from_spec::< + T::EthSpec, + >( + &chain.spec + ))) + }) + }); + + // GET config/deposit_contract + let get_config_deposit_contract = config_path + .clone() + .and(warp::path("deposit_contract")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from( + api_types::DepositContractData { + address: chain.spec.deposit_contract_address, + chain_id: eth1::DEFAULT_NETWORK_ID.into(), + }, + )) + }) + }); + + /* + * debug + */ + + // GET debug/beacon/states/{state_id} + let get_debug_beacon_states = eth1_v1 + .and(warp::path("debug")) + .and(warp::path("beacon")) + .and(warp::path("states")) + .and(warp::path::param::<StateId>()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| { + blocking_task(move || { + state_id.map_state(&chain, |state| { + Ok(warp::reply::json(&api_types::GenericResponseRef::from( + &state, + ))) + }) + }) + }); + + // GET debug/beacon/heads + let get_debug_beacon_heads = eth1_v1 + .and(warp::path("debug")) + .and(warp::path("beacon")) + .and(warp::path("heads")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + let heads = chain + .heads() + .into_iter() + .map(|(root, slot)| api_types::ChainHeadData { root, slot }) + .collect::<Vec<_>>(); + Ok(api_types::GenericResponse::from(heads)) + }) + }); + + /* + * node + */ + + // GET node/identity + let get_node_identity = eth1_v1 + .and(warp::path("node")) + .and(warp::path("identity")) + .and(warp::path::end()) + .and(network_globals.clone()) + .and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from(api_types::IdentityData { + peer_id: network_globals.local_peer_id().to_base58(), + enr: network_globals.local_enr(), + p2p_addresses: network_globals.listen_multiaddrs(), + })) + }) + }); + + // GET node/version + let get_node_version = eth1_v1 + .and(warp::path("node")) + .and(warp::path("version")) + .and(warp::path::end()) + .and_then(|| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from(api_types::VersionData { + version: version_with_platform(), + })) + }) + }); + + // GET node/syncing + let get_node_syncing = eth1_v1 + .and(warp::path("node")) + .and(warp::path("syncing")) + .and(warp::path::end()) + .and(network_globals.clone()) + .and(chain_filter.clone()) + .and_then( + |network_globals: Arc<NetworkGlobals<T::EthSpec>>, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + let head_slot = chain + .head_info() + .map(|info| info.slot) + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_slot = chain + .slot() + .map_err(warp_utils::reject::beacon_chain_error)?; + + // Taking advantage of saturating subtraction on slot. + let sync_distance = current_slot - head_slot; + + let syncing_data = api_types::SyncingData { + is_syncing: network_globals.sync_state.read().is_syncing(), + head_slot, + sync_distance, + }; + + Ok(api_types::GenericResponse::from(syncing_data)) + }) + }, + ); + + /* + * validator + */ + + // GET validator/duties/attester/{epoch} + let get_validator_duties_attester = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("attester")) + .and(warp::path::param::<Epoch>()) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp::query::<api_types::ValidatorDutiesQuery>()) + .and(chain_filter.clone()) + .and_then( + |epoch: Epoch, query: api_types::ValidatorDutiesQuery, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + let current_epoch = chain + .epoch() + .map_err(warp_utils::reject::beacon_chain_error)?; + + if epoch > current_epoch + 1 { + return Err(warp_utils::reject::custom_bad_request(format!( + "request epoch {} is more than one epoch past the current epoch {}", + epoch, current_epoch + ))); + } + + let validator_count = StateId::head() + .map_state(&chain, |state| Ok(state.validators.len() as u64))?; + + let indices = query + .index + .as_ref() + .map(|index| index.0.clone()) + .map(Result::Ok) + .unwrap_or_else(|| { + Ok::<_, warp::Rejection>((0..validator_count).collect()) + })?; + + let pubkeys = indices + .into_iter() + .filter(|i| *i < validator_count as u64) + .map(|i| { + let pubkey = chain + .validator_pubkey(i as usize) + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_bad_request(format!( + "unknown validator index {}", + i + )) + })?; + + Ok((i, pubkey)) + }) + .collect::<Result<Vec<_>, warp::Rejection>>()?; + + // Converts the internal Lighthouse `AttestationDuty` struct into an + // API-conforming `AttesterData` struct. + let convert = |validator_index: u64, + pubkey: PublicKey, + duty: AttestationDuty| + -> api_types::AttesterData { + api_types::AttesterData { + pubkey: pubkey.into(), + validator_index, + committees_at_slot: duty.committees_at_slot, + committee_index: duty.index, + committee_length: duty.committee_len as u64, + validator_committee_index: duty.committee_position as u64, + slot: duty.slot, + } + }; + + // Here we have two paths: + // + // ## Fast + // + // If the request epoch is the current epoch, use the cached beacon chain + // method. + // + // ## Slow + // + // If the request epoch is prior to the current epoch, load a beacon state from + // disk + // + // The idea is to stop historical requests from washing out the cache on the + // beacon chain, whilst allowing a VC to request duties quickly. + let duties = if epoch == current_epoch { + // Fast path. + pubkeys + .into_iter() + // Exclude indices which do not represent a known public key and a + // validator duty. + .filter_map(|(i, pubkey)| { + Some( + chain + .validator_attestation_duty(i as usize, epoch) + .transpose()? + .map_err(warp_utils::reject::beacon_chain_error) + .map(|duty| convert(i, pubkey, duty)), + ) + }) + .collect::<Result<Vec<_>, warp::Rejection>>()? + } else { + // If the head state is equal to or earlier than the request epoch, use it. + let mut state = chain + .with_head(|head| { + if head.beacon_state.current_epoch() <= epoch { + Ok(Some( + head.beacon_state + .clone_with(CloneConfig::committee_caches_only()), + )) + } else { + Ok(None) + } + }) + .map_err(warp_utils::reject::beacon_chain_error)? + .map(Result::Ok) + .unwrap_or_else(|| { + StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) + .state(&chain) + })?; + + // Only skip forward to the epoch prior to the request, since we have a + // one-epoch look-ahead on shuffling. + while state + .next_epoch() + .map_err(warp_utils::reject::beacon_state_error)? + < epoch + { + // Don't calculate state roots since they aren't required for calculating + // shuffling (achieved by providing Hash256::zero()). + per_slot_processing(&mut state, Some(Hash256::zero()), &chain.spec) + .map_err(warp_utils::reject::slot_processing_error)?; + } + + let relative_epoch = + RelativeEpoch::from_epoch(state.current_epoch(), epoch).map_err( + |e| { + warp_utils::reject::custom_server_error(format!( + "unable to obtain suitable state: {:?}", + e + )) + }, + )?; + + state + .build_committee_cache(relative_epoch, &chain.spec) + .map_err(warp_utils::reject::beacon_state_error)?; + pubkeys + .into_iter() + .filter_map(|(i, pubkey)| { + Some( + state + .get_attestation_duties(i as usize, relative_epoch) + .transpose()? + .map_err(warp_utils::reject::beacon_state_error) + .map(|duty| convert(i, pubkey, duty)), + ) + }) + .collect::<Result<Vec<_>, warp::Rejection>>()? + }; + + Ok(api_types::GenericResponse::from(duties)) + }) + }, + ); + + // GET validator/duties/proposer/{epoch} + let get_validator_duties_proposer = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("proposer")) + .and(warp::path::param::<Epoch>()) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(chain_filter.clone()) + .and(beacon_proposer_cache()) + .and_then( + |epoch: Epoch, + chain: Arc<BeaconChain<T>>, + beacon_proposer_cache: Arc<Mutex<BeaconProposerCache>>| { + blocking_json_task(move || { + beacon_proposer_cache + .lock() + .get_proposers(&chain, epoch) + .map(api_types::GenericResponse::from) + }) + }, + ); + + // GET validator/blocks/{slot} + let get_validator_blocks = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("blocks")) + .and(warp::path::param::<Slot>()) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp::query::<api_types::ValidatorBlocksQuery>()) + .and(chain_filter.clone()) + .and_then( + |slot: Slot, query: api_types::ValidatorBlocksQuery, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + let randao_reveal = (&query.randao_reveal).try_into().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not valid BLS signature: {:?}", + e + )) + })?; + + chain + .produce_block(randao_reveal, slot, query.graffiti.map(Into::into)) + .map(|block_and_state| block_and_state.0) + .map(api_types::GenericResponse::from) + .map_err(warp_utils::reject::block_production_error) + }) + }, + ); + + // GET validator/attestation_data?slot,committee_index + let get_validator_attestation_data = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("attestation_data")) + .and(warp::path::end()) + .and(warp::query::<api_types::ValidatorAttestationDataQuery>()) + .and(not_while_syncing_filter.clone()) + .and(chain_filter.clone()) + .and_then( + |query: api_types::ValidatorAttestationDataQuery, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + chain + .produce_unaggregated_attestation(query.slot, query.committee_index) + .map(|attestation| attestation.data) + .map(api_types::GenericResponse::from) + .map_err(warp_utils::reject::beacon_chain_error) + }) + }, + ); + + // GET validator/aggregate_attestation?attestation_data_root,slot + let get_validator_aggregate_attestation = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("aggregate_attestation")) + .and(warp::path::end()) + .and(warp::query::<api_types::ValidatorAggregateAttestationQuery>()) + .and(not_while_syncing_filter.clone()) + .and(chain_filter.clone()) + .and_then( + |query: api_types::ValidatorAggregateAttestationQuery, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + chain + .get_aggregated_attestation_by_slot_and_root( + query.slot, + &query.attestation_data_root, + ) + .map(api_types::GenericResponse::from) + .ok_or_else(|| { + warp_utils::reject::custom_not_found( + "no matching aggregate found".to_string(), + ) + }) + }) + }, + ); + + // POST validator/aggregate_and_proofs + let post_validator_aggregate_and_proofs = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("aggregate_and_proofs")) + .and(warp::path::end()) + .and(not_while_syncing_filter) + .and(chain_filter.clone()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc<BeaconChain<T>>, + aggregate: SignedAggregateAndProof<T::EthSpec>, + network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| { + blocking_json_task(move || { + let aggregate = + match chain.verify_aggregated_attestation_for_gossip(aggregate.clone()) { + Ok(aggregate) => aggregate, + // If we already know the attestation, don't broadcast it or attempt to + // further verify it. Return success. + // + // It's reasonably likely that two different validators produce + // identical aggregates, especially if they're using the same beacon + // node. + Err(AttnError::AttestationAlreadyKnown(_)) => return Ok(()), + Err(e) => { + return Err(warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + ))) + } + }; + + publish_pubsub_message( + &network_tx, + PubsubMessage::AggregateAndProofAttestation(Box::new( + aggregate.aggregate().clone(), + )), + )?; + + chain + .apply_attestation_to_fork_choice(&aggregate) + .map_err(|e| { + warp_utils::reject::broadcast_without_import(format!( + "not applied to fork choice: {:?}", + e + )) + })?; + + chain.add_to_block_inclusion_pool(aggregate).map_err(|e| { + warp_utils::reject::broadcast_without_import(format!( + "not applied to block inclusion pool: {:?}", + e + )) + })?; + + Ok(()) + }) + }, + ); + + // POST validator/beacon_committee_subscriptions + let post_validator_beacon_committee_subscriptions = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("beacon_committee_subscriptions")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter) + .and_then( + |subscriptions: Vec<api_types::BeaconCommitteeSubscription>, + network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| { + blocking_json_task(move || { + for subscription in &subscriptions { + let subscription = api_types::ValidatorSubscription { + validator_index: subscription.validator_index, + attestation_committee_index: subscription.committee_index, + slot: subscription.slot, + committee_count_at_slot: subscription.committees_at_slot, + is_aggregator: subscription.is_aggregator, + }; + + publish_network_message( + &network_tx, + NetworkMessage::Subscribe { + subscriptions: vec![subscription], + }, + )?; + } + + Ok(()) + }) + }, + ); + + // GET lighthouse/health + let get_lighthouse_health = warp::path("lighthouse") + .and(warp::path("health")) + .and(warp::path::end()) + .and_then(|| { + blocking_json_task(move || { + eth2::lighthouse::Health::observe() + .map(api_types::GenericResponse::from) + .map_err(warp_utils::reject::custom_bad_request) + }) + }); + + // GET lighthouse/syncing + let get_lighthouse_syncing = warp::path("lighthouse") + .and(warp::path("syncing")) + .and(warp::path::end()) + .and(network_globals.clone()) + .and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from( + network_globals.sync_state(), + )) + }) + }); + + // GET lighthouse/peers + let get_lighthouse_peers = warp::path("lighthouse") + .and(warp::path("peers")) + .and(warp::path::end()) + .and(network_globals.clone()) + .and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| { + blocking_json_task(move || { + Ok(network_globals + .peers + .read() + .peers() + .map(|(peer_id, peer_info)| eth2::lighthouse::Peer { + peer_id: peer_id.to_string(), + peer_info: peer_info.clone(), + }) + .collect::<Vec<_>>()) + }) + }); + + // GET lighthouse/peers/connected + let get_lighthouse_peers_connected = warp::path("lighthouse") + .and(warp::path("peers")) + .and(warp::path("connected")) + .and(warp::path::end()) + .and(network_globals) + .and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| { + blocking_json_task(move || { + Ok(network_globals + .peers + .read() + .connected_peers() + .map(|(peer_id, peer_info)| eth2::lighthouse::Peer { + peer_id: peer_id.to_string(), + peer_info: peer_info.clone(), + }) + .collect::<Vec<_>>()) + }) + }); + + // GET lighthouse/proto_array + let get_lighthouse_proto_array = warp::path("lighthouse") + .and(warp::path("proto_array")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc<BeaconChain<T>>| { + blocking_task(move || { + Ok::<_, warp::Rejection>(warp::reply::json(&api_types::GenericResponseRef::from( + chain.fork_choice.read().proto_array().core_proto_array(), + ))) + }) + }); + + // GET lighthouse/validator_inclusion/{epoch}/{validator_id} + let get_lighthouse_validator_inclusion_global = warp::path("lighthouse") + .and(warp::path("validator_inclusion")) + .and(warp::path::param::<Epoch>()) + .and(warp::path::param::<ValidatorId>()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then( + |epoch: Epoch, validator_id: ValidatorId, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + validator_inclusion::validator_inclusion_data(epoch, &validator_id, &chain) + .map(api_types::GenericResponse::from) + }) + }, + ); + + // GET lighthouse/validator_inclusion/{epoch}/global + let get_lighthouse_validator_inclusion = warp::path("lighthouse") + .and(warp::path("validator_inclusion")) + .and(warp::path::param::<Epoch>()) + .and(warp::path("global")) + .and(warp::path::end()) + .and(chain_filter) + .and_then(|epoch: Epoch, chain: Arc<BeaconChain<T>>| { + blocking_json_task(move || { + validator_inclusion::global_validator_inclusion_data(epoch, &chain) + .map(api_types::GenericResponse::from) + }) + }); + + // Define the ultimate set of routes that will be provided to the server. + let routes = warp::get() + .and( + get_beacon_genesis + .or(get_beacon_state_root.boxed()) + .or(get_beacon_state_fork.boxed()) + .or(get_beacon_state_finality_checkpoints.boxed()) + .or(get_beacon_state_validators.boxed()) + .or(get_beacon_state_validators_id.boxed()) + .or(get_beacon_state_committees.boxed()) + .or(get_beacon_headers.boxed()) + .or(get_beacon_headers_block_id.boxed()) + .or(get_beacon_block.boxed()) + .or(get_beacon_block_attestations.boxed()) + .or(get_beacon_block_root.boxed()) + .or(get_beacon_pool_attestations.boxed()) + .or(get_beacon_pool_attester_slashings.boxed()) + .or(get_beacon_pool_proposer_slashings.boxed()) + .or(get_beacon_pool_voluntary_exits.boxed()) + .or(get_config_fork_schedule.boxed()) + .or(get_config_spec.boxed()) + .or(get_config_deposit_contract.boxed()) + .or(get_debug_beacon_states.boxed()) + .or(get_debug_beacon_heads.boxed()) + .or(get_node_identity.boxed()) + .or(get_node_version.boxed()) + .or(get_node_syncing.boxed()) + .or(get_validator_duties_attester.boxed()) + .or(get_validator_duties_proposer.boxed()) + .or(get_validator_blocks.boxed()) + .or(get_validator_attestation_data.boxed()) + .or(get_validator_aggregate_attestation.boxed()) + .or(get_lighthouse_health.boxed()) + .or(get_lighthouse_syncing.boxed()) + .or(get_lighthouse_peers.boxed()) + .or(get_lighthouse_peers_connected.boxed()) + .or(get_lighthouse_proto_array.boxed()) + .or(get_lighthouse_validator_inclusion_global.boxed()) + .or(get_lighthouse_validator_inclusion.boxed()) + .boxed(), + ) + .or(warp::post() + .and( + post_beacon_blocks + .or(post_beacon_pool_attestations.boxed()) + .or(post_beacon_pool_attester_slashings.boxed()) + .or(post_beacon_pool_proposer_slashings.boxed()) + .or(post_beacon_pool_voluntary_exits.boxed()) + .or(post_validator_aggregate_and_proofs.boxed()) + .or(post_validator_beacon_committee_subscriptions.boxed()) + .boxed(), + ) + .boxed()) + .boxed() + // Maps errors into HTTP responses. + .recover(warp_utils::reject::handle_rejection) + .with(slog_logging(log.clone())) + .with(prometheus_metrics()) + // Add a `Server` header. + .map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform())) + // Maybe add some CORS headers. + .map(move |reply| warp_utils::reply::maybe_cors(reply, allow_origin.as_ref())); + + let (listening_socket, server) = warp::serve(routes).try_bind_with_graceful_shutdown( + SocketAddrV4::new(config.listen_addr, config.listen_port), + async { + shutdown.await; + }, + )?; + + info!( + log, + "HTTP API started"; + "listen_address" => listening_socket.to_string(), + ); + + Ok((listening_socket, server)) +} + +/// Publish a message to the libp2p pubsub network. +fn publish_pubsub_message<T: EthSpec>( + network_tx: &UnboundedSender<NetworkMessage<T>>, + message: PubsubMessage<T>, +) -> Result<(), warp::Rejection> { + publish_network_message( + network_tx, + NetworkMessage::Publish { + messages: vec![message], + }, + ) +} + +/// Publish a message to the libp2p network. +fn publish_network_message<T: EthSpec>( + network_tx: &UnboundedSender<NetworkMessage<T>>, + message: NetworkMessage<T>, +) -> Result<(), warp::Rejection> { + network_tx.send(message).map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "unable to publish to network channel: {}", + e + )) + }) +} + +/// Execute some task in a tokio "blocking thread". These threads are ideal for long-running +/// (blocking) tasks since they don't jam up the core executor. +async fn blocking_task<F, T>(func: F) -> T +where + F: Fn() -> T, +{ + tokio::task::block_in_place(func) +} + +/// A convenience wrapper around `blocking_task` for use with `warp` JSON responses. +async fn blocking_json_task<F, T>(func: F) -> Result<warp::reply::Json, warp::Rejection> +where + F: Fn() -> Result<T, warp::Rejection>, + T: Serialize, +{ + blocking_task(func) + .await + .map(|resp| warp::reply::json(&resp)) +} diff --git a/beacon_node/http_api/src/metrics.rs b/beacon_node/http_api/src/metrics.rs new file mode 100644 index 00000000000..c641df6a4a5 --- /dev/null +++ b/beacon_node/http_api/src/metrics.rs @@ -0,0 +1,32 @@ +pub use lighthouse_metrics::*; + +lazy_static::lazy_static! { + pub static ref HTTP_API_PATHS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec( + "http_api_paths_total", + "Count of HTTP requests received", + &["path"] + ); + pub static ref HTTP_API_STATUS_CODES_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec( + "http_api_status_codes_total", + "Count of HTTP status codes returned", + &["status"] + ); + pub static ref HTTP_API_PATHS_TIMES: Result<HistogramVec> = try_create_histogram_vec( + "http_api_paths_times", + "Duration to process HTTP requests per path", + &["path"] + ); + + pub static ref HTTP_API_BEACON_PROPOSER_CACHE_TIMES: Result<Histogram> = try_create_histogram( + "http_api_beacon_proposer_cache_build_times", + "Duration to process HTTP requests per path", + ); + pub static ref HTTP_API_BEACON_PROPOSER_CACHE_HITS_TOTAL: Result<IntCounter> = try_create_int_counter( + "http_api_beacon_proposer_cache_hits_total", + "Count of times the proposer cache has been hit", + ); + pub static ref HTTP_API_BEACON_PROPOSER_CACHE_MISSES_TOTAL: Result<IntCounter> = try_create_int_counter( + "http_api_beacon_proposer_cache_misses_total", + "Count of times the proposer cache has been missed", + ); +} diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs new file mode 100644 index 00000000000..11800648f25 --- /dev/null +++ b/beacon_node/http_api/src/state_id.rs @@ -0,0 +1,118 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::StateId as CoreStateId; +use std::str::FromStr; +use types::{BeaconState, EthSpec, Fork, Hash256, Slot}; + +/// Wraps `eth2::types::StateId` and provides common state-access functionality. E.g., reading +/// states or parts of states from the database. +pub struct StateId(CoreStateId); + +impl StateId { + pub fn head() -> Self { + Self(CoreStateId::Head) + } + + pub fn slot(slot: Slot) -> Self { + Self(CoreStateId::Slot(slot)) + } + + /// Return the state root identified by `self`. + pub fn root<T: BeaconChainTypes>( + &self, + chain: &BeaconChain<T>, + ) -> Result<Hash256, warp::Rejection> { + let slot = match &self.0 { + CoreStateId::Head => { + return chain + .head_info() + .map(|head| head.state_root) + .map_err(warp_utils::reject::beacon_chain_error) + } + CoreStateId::Genesis => return Ok(chain.genesis_state_root), + CoreStateId::Finalized => chain.head_info().map(|head| { + head.finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()) + }), + CoreStateId::Justified => chain.head_info().map(|head| { + head.current_justified_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()) + }), + CoreStateId::Slot(slot) => Ok(*slot), + CoreStateId::Root(root) => return Ok(*root), + } + .map_err(warp_utils::reject::beacon_chain_error)?; + + chain + .state_root_at_slot(slot) + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!("beacon state at slot {}", slot)) + }) + } + + /// Return the `fork` field of the state identified by `self`. + pub fn fork<T: BeaconChainTypes>( + &self, + chain: &BeaconChain<T>, + ) -> Result<Fork, warp::Rejection> { + self.map_state(chain, |state| Ok(state.fork)) + } + + /// Return the `BeaconState` identified by `self`. + pub fn state<T: BeaconChainTypes>( + &self, + chain: &BeaconChain<T>, + ) -> Result<BeaconState<T::EthSpec>, warp::Rejection> { + let (state_root, slot_opt) = match &self.0 { + CoreStateId::Head => { + return chain + .head_beacon_state() + .map_err(warp_utils::reject::beacon_chain_error) + } + CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)), + _ => (self.root(chain)?, None), + }; + + chain + .get_state(&state_root, slot_opt) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|opt| { + opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon state at root {}", + state_root + )) + }) + }) + } + + /// Map a function across the `BeaconState` identified by `self`. + /// + /// This function will avoid instantiating/copying a new state when `self` points to the head + /// of the chain. + pub fn map_state<T: BeaconChainTypes, F, U>( + &self, + chain: &BeaconChain<T>, + func: F, + ) -> Result<U, warp::Rejection> + where + F: Fn(&BeaconState<T::EthSpec>) -> Result<U, warp::Rejection>, + { + match &self.0 { + CoreStateId::Head => chain + .with_head(|snapshot| Ok(func(&snapshot.beacon_state))) + .map_err(warp_utils::reject::beacon_chain_error)?, + _ => func(&self.state(chain)?), + } + } +} + +impl FromStr for StateId { + type Err = String; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + CoreStateId::from_str(s).map(Self) + } +} diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs new file mode 100644 index 00000000000..90847dd6b4e --- /dev/null +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -0,0 +1,88 @@ +use crate::state_id::StateId; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::{ + lighthouse::{GlobalValidatorInclusionData, ValidatorInclusionData}, + types::ValidatorId, +}; +use state_processing::per_epoch_processing::ValidatorStatuses; +use types::{Epoch, EthSpec}; + +/// Returns information about *all validators* (i.e., global) and how they performed during a given +/// epoch. +pub fn global_validator_inclusion_data<T: BeaconChainTypes>( + epoch: Epoch, + chain: &BeaconChain<T>, +) -> Result<GlobalValidatorInclusionData, warp::Rejection> { + let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch()); + + let state = StateId::slot(target_slot).state(chain)?; + + let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec) + .map_err(warp_utils::reject::beacon_state_error)?; + validator_statuses + .process_attestations(&state, &chain.spec) + .map_err(warp_utils::reject::beacon_state_error)?; + + let totals = validator_statuses.total_balances; + + Ok(GlobalValidatorInclusionData { + current_epoch_active_gwei: totals.current_epoch(), + previous_epoch_active_gwei: totals.previous_epoch(), + current_epoch_attesting_gwei: totals.current_epoch_attesters(), + current_epoch_target_attesting_gwei: totals.current_epoch_target_attesters(), + previous_epoch_attesting_gwei: totals.previous_epoch_attesters(), + previous_epoch_target_attesting_gwei: totals.previous_epoch_target_attesters(), + previous_epoch_head_attesting_gwei: totals.previous_epoch_head_attesters(), + }) +} + +/// Returns information about a single validator and how it performed during a given epoch. +pub fn validator_inclusion_data<T: BeaconChainTypes>( + epoch: Epoch, + validator_id: &ValidatorId, + chain: &BeaconChain<T>, +) -> Result<Option<ValidatorInclusionData>, warp::Rejection> { + let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch()); + + let mut state = StateId::slot(target_slot).state(chain)?; + + let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec) + .map_err(warp_utils::reject::beacon_state_error)?; + validator_statuses + .process_attestations(&state, &chain.spec) + .map_err(warp_utils::reject::beacon_state_error)?; + + state + .update_pubkey_cache() + .map_err(warp_utils::reject::beacon_state_error)?; + + let validator_index = match validator_id { + ValidatorId::Index(index) => *index as usize, + ValidatorId::PublicKey(pubkey) => { + if let Some(index) = state + .get_validator_index(pubkey) + .map_err(warp_utils::reject::beacon_state_error)? + { + index + } else { + return Ok(None); + } + } + }; + + Ok(validator_statuses + .statuses + .get(validator_index) + .map(|vote| ValidatorInclusionData { + is_slashed: vote.is_slashed, + is_withdrawable_in_current_epoch: vote.is_withdrawable_in_current_epoch, + is_active_in_current_epoch: vote.is_active_in_current_epoch, + is_active_in_previous_epoch: vote.is_active_in_previous_epoch, + current_epoch_effective_balance_gwei: vote.current_epoch_effective_balance, + is_current_epoch_attester: vote.is_current_epoch_attester, + is_current_epoch_target_attester: vote.is_current_epoch_target_attester, + is_previous_epoch_attester: vote.is_previous_epoch_attester, + is_previous_epoch_target_attester: vote.is_previous_epoch_target_attester, + is_previous_epoch_head_attester: vote.is_previous_epoch_head_attester, + })) +} diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs new file mode 100644 index 00000000000..2a7e8f6d40a --- /dev/null +++ b/beacon_node/http_api/tests/tests.rs @@ -0,0 +1,1786 @@ +use beacon_chain::{ + test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, + BlockingMigratorEphemeralHarnessType, + }, + BeaconChain, StateSkipConfig, +}; +use discv5::enr::{CombinedKey, EnrBuilder}; +use environment::null_logger; +use eth2::{types::*, BeaconNodeHttpClient, Url}; +use eth2_libp2p::{ + rpc::methods::MetaData, + types::{EnrBitfield, SyncState}, + NetworkGlobals, +}; +use http_api::{Config, Context}; +use network::NetworkMessage; +use state_processing::per_slot_processing; +use std::convert::TryInto; +use std::net::Ipv4Addr; +use std::sync::Arc; +use tokio::sync::mpsc; +use tokio::sync::oneshot; +use tree_hash::TreeHash; +use types::{ + test_utils::generate_deterministic_keypairs, AggregateSignature, BeaconState, BitList, Domain, + EthSpec, Hash256, Keypair, MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot, +}; + +type E = MainnetEthSpec; + +const SLOTS_PER_EPOCH: u64 = 32; +const VALIDATOR_COUNT: usize = SLOTS_PER_EPOCH as usize; +const CHAIN_LENGTH: u64 = SLOTS_PER_EPOCH * 5; +const JUSTIFIED_EPOCH: u64 = 4; +const FINALIZED_EPOCH: u64 = 3; + +/// Skipping the slots around the epoch boundary allows us to check that we're obtaining states +/// from skipped slots for the finalized and justified checkpoints (instead of the state from the +/// block that those roots point to). +const SKIPPED_SLOTS: &[u64] = &[ + JUSTIFIED_EPOCH * SLOTS_PER_EPOCH - 1, + JUSTIFIED_EPOCH * SLOTS_PER_EPOCH, + FINALIZED_EPOCH * SLOTS_PER_EPOCH - 1, + FINALIZED_EPOCH * SLOTS_PER_EPOCH, +]; + +struct ApiTester { + chain: Arc<BeaconChain<BlockingMigratorEphemeralHarnessType<E>>>, + client: BeaconNodeHttpClient, + next_block: SignedBeaconBlock<E>, + attestations: Vec<Attestation<E>>, + attester_slashing: AttesterSlashing<E>, + proposer_slashing: ProposerSlashing, + voluntary_exit: SignedVoluntaryExit, + _server_shutdown: oneshot::Sender<()>, + validator_keypairs: Vec<Keypair>, + network_rx: mpsc::UnboundedReceiver<NetworkMessage<E>>, +} + +impl ApiTester { + pub fn new() -> Self { + let mut harness = BeaconChainHarness::new( + MainnetEthSpec, + generate_deterministic_keypairs(VALIDATOR_COUNT), + ); + + harness.advance_slot(); + + for _ in 0..CHAIN_LENGTH { + let slot = harness.chain.slot().unwrap().as_u64(); + + if !SKIPPED_SLOTS.contains(&slot) { + harness.extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + } + + harness.advance_slot(); + } + + let head = harness.chain.head().unwrap(); + + assert_eq!( + harness.chain.slot().unwrap(), + head.beacon_block.slot() + 1, + "precondition: current slot is one after head" + ); + + let (next_block, _next_state) = + harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + + let attestations = harness + .get_unaggregated_attestations( + &AttestationStrategy::AllValidators, + &head.beacon_state, + head.beacon_block_root, + harness.chain.slot().unwrap(), + ) + .into_iter() + .map(|vec| vec.into_iter().map(|(attestation, _subnet_id)| attestation)) + .flatten() + .collect::<Vec<_>>(); + + assert!( + !attestations.is_empty(), + "precondition: attestations for testing" + ); + + let attester_slashing = harness.make_attester_slashing(vec![0, 1]); + let proposer_slashing = harness.make_proposer_slashing(2); + let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); + + // Changing this *after* the chain has been initialized is a bit cheeky, but it shouldn't + // cause issue. + // + // This allows for testing voluntary exits without building out a massive chain. + harness.chain.spec.shard_committee_period = 2; + + let chain = Arc::new(harness.chain); + + assert_eq!( + chain.head_info().unwrap().finalized_checkpoint.epoch, + 3, + "precondition: finality" + ); + assert_eq!( + chain + .head_info() + .unwrap() + .current_justified_checkpoint + .epoch, + 4, + "precondition: justification" + ); + + let (network_tx, network_rx) = mpsc::unbounded_channel(); + + let log = null_logger().unwrap(); + + // Default metadata + let meta_data = MetaData { + seq_number: 0, + attnets: EnrBitfield::<MinimalEthSpec>::default(), + }; + let enr_key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); + let network_globals = NetworkGlobals::new(enr, 42, 42, meta_data, vec![], &log); + + *network_globals.sync_state.write() = SyncState::Synced; + + let context = Arc::new(Context { + config: Config { + enabled: true, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 0, + allow_origin: None, + }, + chain: Some(chain.clone()), + network_tx: Some(network_tx), + network_globals: Some(Arc::new(network_globals)), + log, + }); + let ctx = context.clone(); + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let server_shutdown = async { + // It's not really interesting why this triggered, just that it happened. + let _ = shutdown_rx.await; + }; + let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap(); + + tokio::spawn(async { server.await }); + + let client = BeaconNodeHttpClient::new( + Url::parse(&format!( + "http://{}:{}", + listening_socket.ip(), + listening_socket.port() + )) + .unwrap(), + ); + + Self { + chain, + client, + next_block, + attestations, + attester_slashing, + proposer_slashing, + voluntary_exit, + _server_shutdown: shutdown_tx, + validator_keypairs: harness.validators_keypairs, + network_rx, + } + } + + fn skip_slots(self, count: u64) -> Self { + for _ in 0..count { + self.chain + .slot_clock + .set_slot(self.chain.slot().unwrap().as_u64() + 1); + } + + self + } + + fn interesting_state_ids(&self) -> Vec<StateId> { + let mut ids = vec![ + StateId::Head, + StateId::Genesis, + StateId::Finalized, + StateId::Justified, + StateId::Slot(Slot::new(0)), + StateId::Slot(Slot::new(32)), + StateId::Slot(Slot::from(SKIPPED_SLOTS[0])), + StateId::Slot(Slot::from(SKIPPED_SLOTS[1])), + StateId::Slot(Slot::from(SKIPPED_SLOTS[2])), + StateId::Slot(Slot::from(SKIPPED_SLOTS[3])), + StateId::Root(Hash256::zero()), + ]; + ids.push(StateId::Root(self.chain.head_info().unwrap().state_root)); + ids + } + + fn interesting_block_ids(&self) -> Vec<BlockId> { + let mut ids = vec![ + BlockId::Head, + BlockId::Genesis, + BlockId::Finalized, + BlockId::Justified, + BlockId::Slot(Slot::new(0)), + BlockId::Slot(Slot::new(32)), + BlockId::Slot(Slot::from(SKIPPED_SLOTS[0])), + BlockId::Slot(Slot::from(SKIPPED_SLOTS[1])), + BlockId::Slot(Slot::from(SKIPPED_SLOTS[2])), + BlockId::Slot(Slot::from(SKIPPED_SLOTS[3])), + BlockId::Root(Hash256::zero()), + ]; + ids.push(BlockId::Root(self.chain.head_info().unwrap().block_root)); + ids + } + + fn get_state(&self, state_id: StateId) -> Option<BeaconState<E>> { + match state_id { + StateId::Head => Some(self.chain.head().unwrap().beacon_state), + StateId::Genesis => self + .chain + .get_state(&self.chain.genesis_state_root, None) + .unwrap(), + StateId::Finalized => { + let finalized_slot = self + .chain + .head_info() + .unwrap() + .finalized_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + let root = self + .chain + .state_root_at_slot(finalized_slot) + .unwrap() + .unwrap(); + + self.chain.get_state(&root, Some(finalized_slot)).unwrap() + } + StateId::Justified => { + let justified_slot = self + .chain + .head_info() + .unwrap() + .current_justified_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + let root = self + .chain + .state_root_at_slot(justified_slot) + .unwrap() + .unwrap(); + + self.chain.get_state(&root, Some(justified_slot)).unwrap() + } + StateId::Slot(slot) => { + let root = self.chain.state_root_at_slot(slot).unwrap().unwrap(); + + self.chain.get_state(&root, Some(slot)).unwrap() + } + StateId::Root(root) => self.chain.get_state(&root, None).unwrap(), + } + } + + pub async fn test_beacon_genesis(self) -> Self { + let result = self.client.get_beacon_genesis().await.unwrap().data; + + let state = self.chain.head().unwrap().beacon_state; + let expected = GenesisData { + genesis_time: state.genesis_time, + genesis_validators_root: state.genesis_validators_root, + genesis_fork_version: self.chain.spec.genesis_fork_version, + }; + + assert_eq!(result, expected); + + self + } + + pub async fn test_beacon_states_root(self) -> Self { + for state_id in self.interesting_state_ids() { + let result = self + .client + .get_beacon_states_root(state_id) + .await + .unwrap() + .map(|res| res.data.root); + + let expected = match state_id { + StateId::Head => Some(self.chain.head_info().unwrap().state_root), + StateId::Genesis => Some(self.chain.genesis_state_root), + StateId::Finalized => { + let finalized_slot = self + .chain + .head_info() + .unwrap() + .finalized_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + self.chain.state_root_at_slot(finalized_slot).unwrap() + } + StateId::Justified => { + let justified_slot = self + .chain + .head_info() + .unwrap() + .current_justified_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + self.chain.state_root_at_slot(justified_slot).unwrap() + } + StateId::Slot(slot) => self.chain.state_root_at_slot(slot).unwrap(), + StateId::Root(root) => Some(root), + }; + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_fork(self) -> Self { + for state_id in self.interesting_state_ids() { + let result = self + .client + .get_beacon_states_fork(state_id) + .await + .unwrap() + .map(|res| res.data); + + let expected = self.get_state(state_id).map(|state| state.fork); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_finality_checkpoints(self) -> Self { + for state_id in self.interesting_state_ids() { + let result = self + .client + .get_beacon_states_finality_checkpoints(state_id) + .await + .unwrap() + .map(|res| res.data); + + let expected = self + .get_state(state_id) + .map(|state| FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint, + current_justified: state.current_justified_checkpoint, + finalized: state.finalized_checkpoint, + }); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_validators(self) -> Self { + for state_id in self.interesting_state_ids() { + let result = self + .client + .get_beacon_states_validators(state_id) + .await + .unwrap() + .map(|res| res.data); + + let expected = self.get_state(state_id).map(|state| { + let epoch = state.current_epoch(); + let finalized_epoch = state.finalized_checkpoint.epoch; + let far_future_epoch = self.chain.spec.far_future_epoch; + + let mut validators = Vec::with_capacity(state.validators.len()); + + for i in 0..state.validators.len() { + let validator = state.validators[i].clone(); + + validators.push(ValidatorData { + index: i as u64, + balance: state.balances[i], + status: ValidatorStatus::from_validator( + Some(&validator), + epoch, + finalized_epoch, + far_future_epoch, + ), + validator, + }) + } + + validators + }); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_validator_id(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_opt = self.get_state(state_id); + let validators = match state_opt.as_ref() { + Some(state) => state.validators.clone().into(), + None => vec![], + }; + + for (i, validator) in validators.into_iter().enumerate() { + let validator_ids = &[ + ValidatorId::PublicKey(validator.pubkey.clone()), + ValidatorId::Index(i as u64), + ]; + + for validator_id in validator_ids { + let result = self + .client + .get_beacon_states_validator_id(state_id, validator_id) + .await + .unwrap() + .map(|res| res.data); + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_ref().expect("result should be none"); + + let expected = { + let epoch = state.current_epoch(); + let finalized_epoch = state.finalized_checkpoint.epoch; + let far_future_epoch = self.chain.spec.far_future_epoch; + + ValidatorData { + index: i as u64, + balance: state.balances[i], + status: ValidatorStatus::from_validator( + Some(&validator), + epoch, + finalized_epoch, + far_future_epoch, + ), + validator: validator.clone(), + } + }; + + assert_eq!(result, Some(expected), "{:?}, {:?}", state_id, validator_id); + } + } + } + + self + } + + pub async fn test_beacon_states_committees(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = self.get_state(state_id); + + let epoch = state_opt + .as_ref() + .map(|state| state.current_epoch()) + .unwrap_or_else(|| Epoch::new(0)); + + let results = self + .client + .get_beacon_states_committees(state_id, epoch, None, None) + .await + .unwrap() + .map(|res| res.data); + + if results.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + state.build_all_committee_caches(&self.chain.spec).unwrap(); + let committees = state + .get_beacon_committees_at_epoch( + RelativeEpoch::from_epoch(state.current_epoch(), epoch).unwrap(), + ) + .unwrap(); + + for (i, result) in results.unwrap().into_iter().enumerate() { + let expected = &committees[i]; + + assert_eq!(result.index, expected.index, "{}", state_id); + assert_eq!(result.slot, expected.slot, "{}", state_id); + assert_eq!( + result + .validators + .into_iter() + .map(|i| i as usize) + .collect::<Vec<_>>(), + expected.committee.to_vec(), + "{}", + state_id + ); + } + } + + self + } + + fn get_block_root(&self, block_id: BlockId) -> Option<Hash256> { + match block_id { + BlockId::Head => Some(self.chain.head_info().unwrap().block_root), + BlockId::Genesis => Some(self.chain.genesis_block_root), + BlockId::Finalized => Some(self.chain.head_info().unwrap().finalized_checkpoint.root), + BlockId::Justified => Some( + self.chain + .head_info() + .unwrap() + .current_justified_checkpoint + .root, + ), + BlockId::Slot(slot) => self.chain.block_root_at_slot(slot).unwrap(), + BlockId::Root(root) => Some(root), + } + } + + fn get_block(&self, block_id: BlockId) -> Option<SignedBeaconBlock<E>> { + let root = self.get_block_root(block_id); + root.and_then(|root| self.chain.get_block(&root).unwrap()) + } + + pub async fn test_beacon_headers_all_slots(self) -> Self { + for slot in 0..CHAIN_LENGTH { + let slot = Slot::from(slot); + + let result = self + .client + .get_beacon_headers(Some(slot), None) + .await + .unwrap() + .map(|res| res.data); + + let root = self.chain.block_root_at_slot(slot).unwrap(); + + if root.is_none() && result.is_none() { + continue; + } + + let root = root.unwrap(); + let block = self.chain.block_at_slot(slot).unwrap().unwrap(); + let header = BlockHeaderData { + root, + canonical: true, + header: BlockHeaderAndSignature { + message: block.message.block_header(), + signature: block.signature.into(), + }, + }; + let expected = vec![header]; + + assert_eq!(result.unwrap(), expected, "slot {:?}", slot); + } + + self + } + + pub async fn test_beacon_headers_all_parents(self) -> Self { + let mut roots = self + .chain + .rev_iter_block_roots() + .unwrap() + .map(Result::unwrap) + .map(|(root, _slot)| root) + .collect::<Vec<_>>() + .into_iter() + .rev() + .collect::<Vec<_>>(); + + // The iterator natively returns duplicate roots for skipped slots. + roots.dedup(); + + for i in 1..roots.len() { + let parent_root = roots[i - 1]; + let child_root = roots[i]; + + let result = self + .client + .get_beacon_headers(None, Some(parent_root)) + .await + .unwrap() + .unwrap() + .data; + + assert_eq!(result.len(), 1, "i {}", i); + assert_eq!(result[0].root, child_root, "i {}", i); + } + + self + } + + pub async fn test_beacon_headers_block_id(self) -> Self { + for block_id in self.interesting_block_ids() { + let result = self + .client + .get_beacon_headers_block_id(block_id) + .await + .unwrap() + .map(|res| res.data); + + let block_root_opt = self.get_block_root(block_id); + + let block_opt = block_root_opt.and_then(|root| self.chain.get_block(&root).unwrap()); + + if block_opt.is_none() && result.is_none() { + continue; + } + + let result = result.unwrap(); + let block = block_opt.unwrap(); + let block_root = block_root_opt.unwrap(); + let canonical = self + .chain + .block_root_at_slot(block.slot()) + .unwrap() + .map_or(false, |canonical| block_root == canonical); + + assert_eq!(result.canonical, canonical, "{:?}", block_id); + assert_eq!(result.root, block_root, "{:?}", block_id); + assert_eq!( + result.header.message, + block.message.block_header(), + "{:?}", + block_id + ); + assert_eq!( + result.header.signature, + block.signature.into(), + "{:?}", + block_id + ); + } + + self + } + + pub async fn test_beacon_blocks_root(self) -> Self { + for block_id in self.interesting_block_ids() { + let result = self + .client + .get_beacon_blocks_root(block_id) + .await + .unwrap() + .map(|res| res.data.root); + + let expected = self.get_block_root(block_id); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_post_beacon_blocks_valid(mut self) -> Self { + let next_block = &self.next_block; + + self.client.post_beacon_blocks(next_block).await.unwrap(); + + assert!( + self.network_rx.try_recv().is_ok(), + "valid blocks should be sent to network" + ); + + self + } + + pub async fn test_post_beacon_blocks_invalid(mut self) -> Self { + let mut next_block = self.next_block.clone(); + next_block.message.proposer_index += 1; + + assert!(self.client.post_beacon_blocks(&next_block).await.is_err()); + + assert!( + self.network_rx.try_recv().is_ok(), + "invalid blocks should be sent to network" + ); + + self + } + + pub async fn test_beacon_blocks(self) -> Self { + for block_id in self.interesting_block_ids() { + let result = self + .client + .get_beacon_blocks(block_id) + .await + .unwrap() + .map(|res| res.data); + + let expected = self.get_block(block_id); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_beacon_blocks_attestations(self) -> Self { + for block_id in self.interesting_block_ids() { + let result = self + .client + .get_beacon_blocks_attestations(block_id) + .await + .unwrap() + .map(|res| res.data); + + let expected = self + .get_block(block_id) + .map(|block| block.message.body.attestations.into()); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_post_beacon_pool_attestations_valid(mut self) -> Self { + for attestation in &self.attestations { + self.client + .post_beacon_pool_attestations(attestation) + .await + .unwrap(); + + assert!( + self.network_rx.try_recv().is_ok(), + "valid attestation should be sent to network" + ); + } + + self + } + + pub async fn test_post_beacon_pool_attestations_invalid(mut self) -> Self { + for attestation in &self.attestations { + let mut attestation = attestation.clone(); + attestation.data.slot += 1; + + assert!(self + .client + .post_beacon_pool_attestations(&attestation) + .await + .is_err()); + + assert!( + self.network_rx.try_recv().is_err(), + "invalid attestation should not be sent to network" + ); + } + + self + } + + pub async fn test_get_beacon_pool_attestations(self) -> Self { + let result = self + .client + .get_beacon_pool_attestations() + .await + .unwrap() + .data; + + let mut expected = self.chain.op_pool.get_all_attestations(); + expected.extend(self.chain.naive_aggregation_pool.read().iter().cloned()); + + assert_eq!(result, expected); + + self + } + + pub async fn test_post_beacon_pool_attester_slashings_valid(mut self) -> Self { + self.client + .post_beacon_pool_attester_slashings(&self.attester_slashing) + .await + .unwrap(); + + assert!( + self.network_rx.try_recv().is_ok(), + "valid attester slashing should be sent to network" + ); + + self + } + + pub async fn test_post_beacon_pool_attester_slashings_invalid(mut self) -> Self { + let mut slashing = self.attester_slashing.clone(); + slashing.attestation_1.data.slot += 1; + + self.client + .post_beacon_pool_attester_slashings(&slashing) + .await + .unwrap_err(); + + assert!( + self.network_rx.try_recv().is_err(), + "invalid attester slashing should not be sent to network" + ); + + self + } + + pub async fn test_get_beacon_pool_attester_slashings(self) -> Self { + let result = self + .client + .get_beacon_pool_attester_slashings() + .await + .unwrap() + .data; + + let expected = self.chain.op_pool.get_all_attester_slashings(); + + assert_eq!(result, expected); + + self + } + + pub async fn test_post_beacon_pool_proposer_slashings_valid(mut self) -> Self { + self.client + .post_beacon_pool_proposer_slashings(&self.proposer_slashing) + .await + .unwrap(); + + assert!( + self.network_rx.try_recv().is_ok(), + "valid proposer slashing should be sent to network" + ); + + self + } + + pub async fn test_post_beacon_pool_proposer_slashings_invalid(mut self) -> Self { + let mut slashing = self.proposer_slashing.clone(); + slashing.signed_header_1.message.slot += 1; + + self.client + .post_beacon_pool_proposer_slashings(&slashing) + .await + .unwrap_err(); + + assert!( + self.network_rx.try_recv().is_err(), + "invalid proposer slashing should not be sent to network" + ); + + self + } + + pub async fn test_get_beacon_pool_proposer_slashings(self) -> Self { + let result = self + .client + .get_beacon_pool_proposer_slashings() + .await + .unwrap() + .data; + + let expected = self.chain.op_pool.get_all_proposer_slashings(); + + assert_eq!(result, expected); + + self + } + + pub async fn test_post_beacon_pool_voluntary_exits_valid(mut self) -> Self { + self.client + .post_beacon_pool_voluntary_exits(&self.voluntary_exit) + .await + .unwrap(); + + assert!( + self.network_rx.try_recv().is_ok(), + "valid exit should be sent to network" + ); + + self + } + + pub async fn test_post_beacon_pool_voluntary_exits_invalid(mut self) -> Self { + let mut exit = self.voluntary_exit.clone(); + exit.message.epoch += 1; + + self.client + .post_beacon_pool_voluntary_exits(&exit) + .await + .unwrap_err(); + + assert!( + self.network_rx.try_recv().is_err(), + "invalid exit should not be sent to network" + ); + + self + } + + pub async fn test_get_beacon_pool_voluntary_exits(self) -> Self { + let result = self + .client + .get_beacon_pool_voluntary_exits() + .await + .unwrap() + .data; + + let expected = self.chain.op_pool.get_all_voluntary_exits(); + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_config_fork_schedule(self) -> Self { + let result = self.client.get_config_fork_schedule().await.unwrap().data; + + let expected = vec![self.chain.head_info().unwrap().fork]; + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_config_spec(self) -> Self { + let result = self.client.get_config_spec().await.unwrap().data; + + let expected = YamlConfig::from_spec::<E>(&self.chain.spec); + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_config_deposit_contract(self) -> Self { + let result = self + .client + .get_config_deposit_contract() + .await + .unwrap() + .data; + + let expected = DepositContractData { + address: self.chain.spec.deposit_contract_address, + chain_id: eth1::DEFAULT_NETWORK_ID.into(), + }; + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_node_version(self) -> Self { + let result = self.client.get_node_version().await.unwrap().data; + + let expected = VersionData { + version: lighthouse_version::version_with_platform(), + }; + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_node_syncing(self) -> Self { + let result = self.client.get_node_syncing().await.unwrap().data; + let head_slot = self.chain.head_info().unwrap().slot; + let sync_distance = self.chain.slot().unwrap() - head_slot; + + let expected = SyncingData { + is_syncing: false, + head_slot, + sync_distance, + }; + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_debug_beacon_states(self) -> Self { + for state_id in self.interesting_state_ids() { + let result = self + .client + .get_debug_beacon_states(state_id) + .await + .unwrap() + .map(|res| res.data); + + let mut expected = self.get_state(state_id); + expected.as_mut().map(|state| state.drop_all_caches()); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_get_debug_beacon_heads(self) -> Self { + let result = self + .client + .get_debug_beacon_heads() + .await + .unwrap() + .data + .into_iter() + .map(|head| (head.root, head.slot)) + .collect::<Vec<_>>(); + + let expected = self.chain.heads(); + + assert_eq!(result, expected); + + self + } + + fn validator_count(&self) -> usize { + self.chain.head().unwrap().beacon_state.validators.len() + } + + fn interesting_validator_indices(&self) -> Vec<Vec<u64>> { + let validator_count = self.validator_count() as u64; + + let mut interesting = vec![ + vec![], + vec![0], + vec![0, 1], + vec![0, 1, 3], + vec![validator_count], + vec![validator_count, 1], + vec![validator_count, 1, 3], + vec![u64::max_value()], + vec![u64::max_value(), 1], + vec![u64::max_value(), 1, 3], + ]; + + interesting.push((0..validator_count).collect()); + + interesting + } + + pub async fn test_get_validator_duties_attester(self) -> Self { + let current_epoch = self.chain.epoch().unwrap().as_u64(); + + let half = current_epoch / 2; + let first = current_epoch - half; + let last = current_epoch + half; + + for epoch in first..=last { + for indices in self.interesting_validator_indices() { + let epoch = Epoch::from(epoch); + + // The endpoint does not allow getting duties past the next epoch. + if epoch > current_epoch + 1 { + assert_eq!( + self.client + .get_validator_duties_attester(epoch, Some(&indices)) + .await + .unwrap_err() + .status() + .map(Into::into), + Some(400) + ); + continue; + } + + let results = self + .client + .get_validator_duties_attester(epoch, Some(&indices)) + .await + .unwrap() + .data; + + let mut state = self + .chain + .state_at_slot( + epoch.start_slot(E::slots_per_epoch()), + StateSkipConfig::WithStateRoots, + ) + .unwrap(); + state + .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) + .unwrap(); + + let expected_len = indices + .iter() + .filter(|i| **i < state.validators.len() as u64) + .count(); + + assert_eq!(results.len(), expected_len); + + for (indices_set, &i) in indices.iter().enumerate() { + if let Some(duty) = state + .get_attestation_duties(i as usize, RelativeEpoch::Current) + .unwrap() + { + let expected = AttesterData { + pubkey: state.validators[i as usize].pubkey.clone().into(), + validator_index: i, + committees_at_slot: duty.committees_at_slot, + committee_index: duty.index, + committee_length: duty.committee_len as u64, + validator_committee_index: duty.committee_position as u64, + slot: duty.slot, + }; + + let result = results + .iter() + .find(|duty| duty.validator_index == i) + .unwrap(); + + assert_eq!( + *result, expected, + "epoch: {}, indices_set: {}", + epoch, indices_set + ); + } else { + assert!( + !results.iter().any(|duty| duty.validator_index == i), + "validator index should not exist in response" + ); + } + } + } + } + + self + } + + pub async fn test_get_validator_duties_proposer(self) -> Self { + let current_epoch = self.chain.epoch().unwrap(); + + let result = self + .client + .get_validator_duties_proposer(current_epoch) + .await + .unwrap() + .data; + + let mut state = self.chain.head_beacon_state().unwrap(); + + while state.current_epoch() < current_epoch { + per_slot_processing(&mut state, None, &self.chain.spec).unwrap(); + } + + state + .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) + .unwrap(); + + let expected = current_epoch + .slot_iter(E::slots_per_epoch()) + .map(|slot| { + let index = state + .get_beacon_proposer_index(slot, &self.chain.spec) + .unwrap(); + let pubkey = state.validators[index].pubkey.clone().into(); + + ProposerData { pubkey, slot } + }) + .collect::<Vec<_>>(); + + assert_eq!(result, expected); + + self + } + + pub async fn test_block_production(self) -> Self { + let fork = self.chain.head_info().unwrap().fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() * 3 { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + + let block = self + .client + .get_validator_blocks::<E>(slot, randao_reveal, None) + .await + .unwrap() + .data; + + let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + + self.client.post_beacon_blocks(&signed_block).await.unwrap(); + + assert_eq!(self.chain.head_beacon_block().unwrap(), signed_block); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + + self + } + + pub async fn test_get_validator_attestation_data(self) -> Self { + let mut state = self.chain.head_beacon_state().unwrap(); + let slot = state.slot; + state + .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) + .unwrap(); + + for index in 0..state.get_committee_count_at_slot(slot).unwrap() { + let result = self + .client + .get_validator_attestation_data(slot, index) + .await + .unwrap() + .data; + + let expected = self + .chain + .produce_unaggregated_attestation(slot, index) + .unwrap() + .data; + + assert_eq!(result, expected); + } + + self + } + + pub async fn test_get_validator_aggregate_attestation(self) -> Self { + let attestation = self + .chain + .head_beacon_block() + .unwrap() + .message + .body + .attestations[0] + .clone(); + + let result = self + .client + .get_validator_aggregate_attestation( + attestation.data.slot, + attestation.data.tree_hash_root(), + ) + .await + .unwrap() + .unwrap() + .data; + + let expected = attestation; + + assert_eq!(result, expected); + + self + } + + pub async fn get_aggregate(&mut self) -> SignedAggregateAndProof<E> { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let mut head = self.chain.head().unwrap(); + while head.beacon_state.current_epoch() < epoch { + per_slot_processing(&mut head.beacon_state, None, &self.chain.spec).unwrap(); + } + head.beacon_state + .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) + .unwrap(); + + let committee_len = head.beacon_state.get_committee_count_at_slot(slot).unwrap(); + let fork = head.beacon_state.fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + let mut duties = vec![]; + for i in 0..self.validator_keypairs.len() { + duties.push( + self.client + .get_validator_duties_attester(epoch, Some(&[i as u64])) + .await + .unwrap() + .data[0] + .clone(), + ) + } + + let (i, kp, duty, proof) = self + .validator_keypairs + .iter() + .enumerate() + .find_map(|(i, kp)| { + let duty = duties[i].clone(); + + let proof = SelectionProof::new::<E>( + duty.slot, + &kp.sk, + &fork, + genesis_validators_root, + &self.chain.spec, + ); + + if proof + .is_aggregator(committee_len as usize, &self.chain.spec) + .unwrap() + { + Some((i, kp, duty, proof)) + } else { + None + } + }) + .expect("there is at least one aggregator for this epoch") + .clone(); + + if duty.slot > slot { + self.chain.slot_clock.set_slot(duty.slot.into()); + } + + let attestation_data = self + .client + .get_validator_attestation_data(duty.slot, duty.committee_index) + .await + .unwrap() + .data; + + let mut attestation = Attestation { + aggregation_bits: BitList::with_capacity(duty.committee_length as usize).unwrap(), + data: attestation_data, + signature: AggregateSignature::infinity(), + }; + + attestation + .sign( + &kp.sk, + duty.validator_committee_index as usize, + &fork, + genesis_validators_root, + &self.chain.spec, + ) + .unwrap(); + + SignedAggregateAndProof::from_aggregate( + i as u64, + attestation, + Some(proof), + &kp.sk, + &fork, + genesis_validators_root, + &self.chain.spec, + ) + } + + pub async fn test_get_validator_aggregate_and_proofs_valid(mut self) -> Self { + let aggregate = self.get_aggregate().await; + + self.client + .post_validator_aggregate_and_proof::<E>(&aggregate) + .await + .unwrap(); + + assert!(self.network_rx.try_recv().is_ok()); + + self + } + + pub async fn test_get_validator_aggregate_and_proofs_invalid(mut self) -> Self { + let mut aggregate = self.get_aggregate().await; + + aggregate.message.aggregate.data.slot += 1; + + self.client + .post_validator_aggregate_and_proof::<E>(&aggregate) + .await + .unwrap_err(); + + assert!(self.network_rx.try_recv().is_err()); + + self + } + + pub async fn test_get_validator_beacon_committee_subscriptions(mut self) -> Self { + let subscription = BeaconCommitteeSubscription { + validator_index: 0, + committee_index: 0, + committees_at_slot: 1, + slot: Slot::new(1), + is_aggregator: true, + }; + + self.client + .post_validator_beacon_committee_subscriptions(&[subscription]) + .await + .unwrap(); + + self.network_rx.try_recv().unwrap(); + + self + } + + #[cfg(target_os = "linux")] + pub async fn test_get_lighthouse_health(self) -> Self { + self.client.get_lighthouse_health().await.unwrap(); + + self + } + + #[cfg(not(target_os = "linux"))] + pub async fn test_get_lighthouse_health(self) -> Self { + self.client.get_lighthouse_health().await.unwrap_err(); + + self + } + + pub async fn test_get_lighthouse_syncing(self) -> Self { + self.client.get_lighthouse_syncing().await.unwrap(); + + self + } + + pub async fn test_get_lighthouse_proto_array(self) -> Self { + self.client.get_lighthouse_proto_array().await.unwrap(); + + self + } + + pub async fn test_get_lighthouse_validator_inclusion_global(self) -> Self { + let epoch = self.chain.epoch().unwrap() - 1; + self.client + .get_lighthouse_validator_inclusion_global(epoch) + .await + .unwrap(); + + self + } + + pub async fn test_get_lighthouse_validator_inclusion(self) -> Self { + let epoch = self.chain.epoch().unwrap() - 1; + self.client + .get_lighthouse_validator_inclusion(epoch, ValidatorId::Index(0)) + .await + .unwrap(); + + self + } +} + +#[tokio::test(core_threads = 2)] +async fn beacon_genesis() { + ApiTester::new().test_beacon_genesis().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_root() { + ApiTester::new().test_beacon_states_root().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_fork() { + ApiTester::new().test_beacon_states_fork().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_finality_checkpoints() { + ApiTester::new() + .test_beacon_states_finality_checkpoints() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_validators() { + ApiTester::new().test_beacon_states_validators().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_committees() { + ApiTester::new().test_beacon_states_committees().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_validator_id() { + ApiTester::new().test_beacon_states_validator_id().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_headers() { + ApiTester::new() + .test_beacon_headers_all_slots() + .await + .test_beacon_headers_all_parents() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_headers_block_id() { + ApiTester::new().test_beacon_headers_block_id().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_blocks() { + ApiTester::new().test_beacon_blocks().await; +} + +#[tokio::test(core_threads = 2)] +async fn post_beacon_blocks_valid() { + ApiTester::new().test_post_beacon_blocks_valid().await; +} + +#[tokio::test(core_threads = 2)] +async fn post_beacon_blocks_invalid() { + ApiTester::new().test_post_beacon_blocks_invalid().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_blocks_root() { + ApiTester::new().test_beacon_blocks_root().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_blocks_attestations() { + ApiTester::new().test_beacon_blocks_attestations().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_get() { + ApiTester::new() + .test_get_beacon_pool_attestations() + .await + .test_get_beacon_pool_attester_slashings() + .await + .test_get_beacon_pool_proposer_slashings() + .await + .test_get_beacon_pool_voluntary_exits() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_attestations_valid() { + ApiTester::new() + .test_post_beacon_pool_attestations_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_attestations_invalid() { + ApiTester::new() + .test_post_beacon_pool_attestations_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_attester_slashings_valid() { + ApiTester::new() + .test_post_beacon_pool_attester_slashings_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_attester_slashings_invalid() { + ApiTester::new() + .test_post_beacon_pool_attester_slashings_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_proposer_slashings_valid() { + ApiTester::new() + .test_post_beacon_pool_proposer_slashings_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_proposer_slashings_invalid() { + ApiTester::new() + .test_post_beacon_pool_proposer_slashings_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_voluntary_exits_valid() { + ApiTester::new() + .test_post_beacon_pool_voluntary_exits_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_voluntary_exits_invalid() { + ApiTester::new() + .test_post_beacon_pool_voluntary_exits_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn config_get() { + ApiTester::new() + .test_get_config_fork_schedule() + .await + .test_get_config_spec() + .await + .test_get_config_deposit_contract() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn debug_get() { + ApiTester::new() + .test_get_debug_beacon_states() + .await + .test_get_debug_beacon_heads() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn node_get() { + ApiTester::new() + .test_get_node_version() + .await + .test_get_node_syncing() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_duties_attester() { + ApiTester::new().test_get_validator_duties_attester().await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_duties_attester_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_duties_attester() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_duties_proposer() { + ApiTester::new().test_get_validator_duties_proposer().await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_duties_proposer_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_duties_proposer() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn block_production() { + ApiTester::new().test_block_production().await; +} + +#[tokio::test(core_threads = 2)] +async fn block_production_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_block_production() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_attestation_data() { + ApiTester::new().test_get_validator_attestation_data().await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_attestation_data_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_attestation_data() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_attestation() { + ApiTester::new() + .test_get_validator_aggregate_attestation() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_attestation_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_aggregate_attestation() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_and_proofs_valid() { + ApiTester::new() + .test_get_validator_aggregate_and_proofs_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_and_proofs_valid_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_aggregate_and_proofs_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_and_proofs_invalid() { + ApiTester::new() + .test_get_validator_aggregate_and_proofs_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_and_proofs_invalid_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_aggregate_and_proofs_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_beacon_committee_subscriptions() { + ApiTester::new() + .test_get_validator_beacon_committee_subscriptions() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn lighthouse_endpoints() { + ApiTester::new() + .test_get_lighthouse_health() + .await + .test_get_lighthouse_syncing() + .await + .test_get_lighthouse_proto_array() + .await + .test_get_lighthouse_validator_inclusion() + .await + .test_get_lighthouse_validator_inclusion_global() + .await; +} diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml new file mode 100644 index 00000000000..482f7a5debc --- /dev/null +++ b/beacon_node/http_metrics/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "http_metrics" +version = "0.1.0" +authors = ["Paul Hauner <paul@paulhauner.com>"] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +prometheus = "0.9.0" +warp = "0.2.5" +serde = { version = "1.0.110", features = ["derive"] } +slog = "2.5.2" +beacon_chain = { path = "../beacon_chain" } +store = { path = "../store" } +eth2_libp2p = { path = "../eth2_libp2p" } +slot_clock = { path = "../../common/slot_clock" } +lighthouse_metrics = { path = "../../common/lighthouse_metrics" } +lazy_static = "1.4.0" +eth2 = { path = "../../common/eth2" } +lighthouse_version = { path = "../../common/lighthouse_version" } +warp_utils = { path = "../../common/warp_utils" } + +[dev-dependencies] +tokio = { version = "0.2.21", features = ["sync"] } +reqwest = { version = "0.10.8", features = ["json"] } +environment = { path = "../../lighthouse/environment" } +types = { path = "../../consensus/types" } diff --git a/beacon_node/http_metrics/src/lib.rs b/beacon_node/http_metrics/src/lib.rs new file mode 100644 index 00000000000..37eac82bda4 --- /dev/null +++ b/beacon_node/http_metrics/src/lib.rs @@ -0,0 +1,135 @@ +//! This crate provides a HTTP server that is solely dedicated to serving the `/metrics` endpoint. +//! +//! For other endpoints, see the `http_api` crate. + +#[macro_use] +extern crate lazy_static; + +mod metrics; + +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use lighthouse_version::version_with_platform; +use serde::{Deserialize, Serialize}; +use slog::{crit, info, Logger}; +use std::future::Future; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use std::path::PathBuf; +use std::sync::Arc; +use warp::{http::Response, Filter}; + +#[derive(Debug)] +pub enum Error { + Warp(warp::Error), + Other(String), +} + +impl From<warp::Error> for Error { + fn from(e: warp::Error) -> Self { + Error::Warp(e) + } +} + +impl From<String> for Error { + fn from(e: String) -> Self { + Error::Other(e) + } +} + +/// A wrapper around all the items required to spawn the HTTP server. +/// +/// The server will gracefully handle the case where any fields are `None`. +pub struct Context<T: BeaconChainTypes> { + pub config: Config, + pub chain: Option<Arc<BeaconChain<T>>>, + pub db_path: Option<PathBuf>, + pub freezer_db_path: Option<PathBuf>, + pub log: Logger, +} + +/// Configuration for the HTTP server. +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct Config { + pub enabled: bool, + pub listen_addr: Ipv4Addr, + pub listen_port: u16, + pub allow_origin: Option<String>, +} + +impl Default for Config { + fn default() -> Self { + Self { + enabled: false, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 5054, + allow_origin: None, + } + } +} + +/// Creates a server that will serve requests using information from `ctx`. +/// +/// The server will shut down gracefully when the `shutdown` future resolves. +/// +/// ## Returns +/// +/// This function will bind the server to the provided address and then return a tuple of: +/// +/// - `SocketAddr`: the address that the HTTP server will listen on. +/// - `Future`: the actual server future that will need to be awaited. +/// +/// ## Errors +/// +/// Returns an error if the server is unable to bind or there is another error during +/// configuration. +pub fn serve<T: BeaconChainTypes>( + ctx: Arc<Context<T>>, + shutdown: impl Future<Output = ()> + Send + Sync + 'static, +) -> Result<(SocketAddr, impl Future<Output = ()>), Error> { + let config = &ctx.config; + let log = ctx.log.clone(); + let allow_origin = config.allow_origin.clone(); + + // Sanity check. + if !config.enabled { + crit!(log, "Cannot start disabled metrics HTTP server"); + return Err(Error::Other( + "A disabled metrics server should not be started".to_string(), + )); + } + + let inner_ctx = ctx.clone(); + let routes = warp::get() + .and(warp::path("metrics")) + .map(move || inner_ctx.clone()) + .and_then(|ctx: Arc<Context<T>>| async move { + Ok::<_, warp::Rejection>( + metrics::gather_prometheus_metrics(&ctx) + .map(|body| Response::builder().status(200).body(body).unwrap()) + .unwrap_or_else(|e| { + Response::builder() + .status(500) + .body(format!("Unable to gather metrics: {:?}", e)) + .unwrap() + }), + ) + }) + // Add a `Server` header. + .map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform())) + // Maybe add some CORS headers. + .map(move |reply| warp_utils::reply::maybe_cors(reply, allow_origin.as_ref())); + + let (listening_socket, server) = warp::serve(routes).try_bind_with_graceful_shutdown( + SocketAddrV4::new(config.listen_addr, config.listen_port), + async { + shutdown.await; + }, + )?; + + info!( + log, + "Metrics HTTP server started"; + "listen_address" => listening_socket.to_string(), + ); + + Ok((listening_socket, server)) +} diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs similarity index 69% rename from beacon_node/rest_api/src/metrics.rs rename to beacon_node/http_metrics/src/metrics.rs index 4b1ba737d7a..bcd803c405e 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -1,38 +1,11 @@ -use crate::{ApiError, Context}; +use crate::Context; use beacon_chain::BeaconChainTypes; +use eth2::lighthouse::Health; use lighthouse_metrics::{Encoder, TextEncoder}; -use rest_types::Health; -use std::sync::Arc; pub use lighthouse_metrics::*; lazy_static! { - pub static ref BEACON_HTTP_API_REQUESTS_TOTAL: Result<IntCounterVec> = - try_create_int_counter_vec( - "beacon_http_api_requests_total", - "Count of HTTP requests received", - &["endpoint"] - ); - pub static ref BEACON_HTTP_API_SUCCESS_TOTAL: Result<IntCounterVec> = - try_create_int_counter_vec( - "beacon_http_api_success_total", - "Count of HTTP requests that returned 200 OK", - &["endpoint"] - ); - pub static ref BEACON_HTTP_API_ERROR_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec( - "beacon_http_api_error_total", - "Count of HTTP that did not return 200 OK", - &["endpoint"] - ); - pub static ref BEACON_HTTP_API_TIMES_TOTAL: Result<HistogramVec> = try_create_histogram_vec( - "beacon_http_api_times_total", - "Duration to process HTTP requests", - &["endpoint"] - ); - pub static ref REQUEST_RESPONSE_TIME: Result<Histogram> = try_create_histogram( - "http_server_request_duration_seconds", - "Time taken to build a response to a HTTP request" - ); pub static ref PROCESS_NUM_THREADS: Result<IntGauge> = try_create_int_gauge( "process_num_threads", "Number of threads used by the current process" @@ -67,14 +40,9 @@ lazy_static! { try_create_float_gauge("system_loadavg_15", "Loadavg over 15 minutes"); } -/// Returns the full set of Prometheus metrics for the Beacon Node application. -/// -/// # Note -/// -/// This is a HTTP handler method. -pub fn get_prometheus<T: BeaconChainTypes>( - ctx: Arc<Context<T>>, -) -> std::result::Result<String, ApiError> { +pub fn gather_prometheus_metrics<T: BeaconChainTypes>( + ctx: &Context<T>, +) -> std::result::Result<String, String> { let mut buffer = vec![]; let encoder = TextEncoder::new(); @@ -94,9 +62,17 @@ pub fn get_prometheus<T: BeaconChainTypes>( // using `lighthouse_metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into // a string that can be returned via HTTP. - slot_clock::scrape_for_metrics::<T::EthSpec, T::SlotClock>(&ctx.beacon_chain.slot_clock); - store::scrape_for_metrics(&ctx.db_path, &ctx.freezer_db_path); - beacon_chain::scrape_for_metrics(&ctx.beacon_chain); + if let Some(beacon_chain) = ctx.chain.as_ref() { + slot_clock::scrape_for_metrics::<T::EthSpec, T::SlotClock>(&beacon_chain.slot_clock); + beacon_chain::scrape_for_metrics(beacon_chain); + } + + if let (Some(db_path), Some(freezer_db_path)) = + (ctx.db_path.as_ref(), ctx.freezer_db_path.as_ref()) + { + store::scrape_for_metrics(db_path, freezer_db_path); + } + eth2_libp2p::scrape_discovery_metrics(); // This will silently fail if we are unable to observe the health. This is desired behaviour @@ -125,6 +101,5 @@ pub fn get_prometheus<T: BeaconChainTypes>( .encode(&lighthouse_metrics::gather(), &mut buffer) .unwrap(); - String::from_utf8(buffer) - .map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e))) + String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e)) } diff --git a/beacon_node/http_metrics/tests/tests.rs b/beacon_node/http_metrics/tests/tests.rs new file mode 100644 index 00000000000..18a40d4f849 --- /dev/null +++ b/beacon_node/http_metrics/tests/tests.rs @@ -0,0 +1,46 @@ +use beacon_chain::test_utils::BlockingMigratorEphemeralHarnessType; +use environment::null_logger; +use http_metrics::Config; +use reqwest::StatusCode; +use std::net::Ipv4Addr; +use std::sync::Arc; +use tokio::sync::oneshot; +use types::MainnetEthSpec; + +type Context = http_metrics::Context<BlockingMigratorEphemeralHarnessType<MainnetEthSpec>>; + +#[tokio::test(core_threads = 2)] +async fn returns_200_ok() { + let log = null_logger().unwrap(); + + let context = Arc::new(Context { + config: Config { + enabled: true, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 0, + allow_origin: None, + }, + chain: None, + db_path: None, + freezer_db_path: None, + log, + }); + + let ctx = context.clone(); + let (_shutdown_tx, shutdown_rx) = oneshot::channel::<()>(); + let server_shutdown = async { + // It's not really interesting why this triggered, just that it happened. + let _ = shutdown_rx.await; + }; + let (listening_socket, server) = http_metrics::serve(ctx, server_shutdown).unwrap(); + + tokio::spawn(async { server.await }); + + let url = format!( + "http://{}:{}/metrics", + listening_socket.ip(), + listening_socket.port() + ); + + assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK); +} diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 0448e7762f8..ad856a6d253 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -17,7 +17,6 @@ beacon_chain = { path = "../beacon_chain" } store = { path = "../store" } eth2_libp2p = { path = "../eth2_libp2p" } hashset_delay = { path = "../../common/hashset_delay" } -rest_types = { path = "../../common/rest_types" } types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } slot_clock = { path = "../../common/slot_clock" } diff --git a/beacon_node/network/src/attestation_service/mod.rs b/beacon_node/network/src/attestation_service/mod.rs index 59f63890a29..7c017d295b9 100644 --- a/beacon_node/network/src/attestation_service/mod.rs +++ b/beacon_node/network/src/attestation_service/mod.rs @@ -15,9 +15,8 @@ use slog::{debug, error, o, trace, warn}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::SubnetDiscovery; use hashset_delay::HashSetDelay; -use rest_types::ValidatorSubscription; use slot_clock::SlotClock; -use types::{Attestation, EthSpec, Slot, SubnetId}; +use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription}; use crate::metrics; diff --git a/beacon_node/network/src/beacon_processor/worker.rs b/beacon_node/network/src/beacon_processor/worker.rs index 653922dfe5e..1abb2a27917 100644 --- a/beacon_node/network/src/beacon_processor/worker.rs +++ b/beacon_node/network/src/beacon_processor/worker.rs @@ -45,7 +45,7 @@ impl<T: BeaconChainTypes> Worker<T> { let attestation = match self .chain - .verify_unaggregated_attestation_for_gossip(attestation, subnet_id) + .verify_unaggregated_attestation_for_gossip(attestation, Some(subnet_id)) { Ok(attestation) => attestation, Err(e) => { diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 1147562b4c7..1f9ddd6a0ef 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -15,13 +15,12 @@ use eth2_libp2p::{ }; use eth2_libp2p::{MessageAcceptance, Service as LibP2PService}; use futures::prelude::*; -use rest_types::ValidatorSubscription; use slog::{debug, error, info, o, trace, warn}; use std::{collections::HashMap, sync::Arc, time::Duration}; use store::HotColdDB; use tokio::sync::mpsc; use tokio::time::Delay; -use types::EthSpec; +use types::{EthSpec, ValidatorSubscription}; mod tests; diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 5b664c87726..6d6a8d1cdc1 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -332,6 +332,51 @@ impl<T: EthSpec> OperationPool<T> { pub fn num_voluntary_exits(&self) -> usize { self.voluntary_exits.read().len() } + + /// Returns all known `Attestation` objects. + /// + /// This method may return objects that are invalid for block inclusion. + pub fn get_all_attestations(&self) -> Vec<Attestation<T>> { + self.attestations + .read() + .iter() + .map(|(_, attns)| attns.iter().cloned()) + .flatten() + .collect() + } + + /// Returns all known `AttesterSlashing` objects. + /// + /// This method may return objects that are invalid for block inclusion. + pub fn get_all_attester_slashings(&self) -> Vec<AttesterSlashing<T>> { + self.attester_slashings + .read() + .iter() + .map(|(slashing, _)| slashing.clone()) + .collect() + } + + /// Returns all known `ProposerSlashing` objects. + /// + /// This method may return objects that are invalid for block inclusion. + pub fn get_all_proposer_slashings(&self) -> Vec<ProposerSlashing> { + self.proposer_slashings + .read() + .iter() + .map(|(_, slashing)| slashing.clone()) + .collect() + } + + /// Returns all known `SignedVoluntaryExit` objects. + /// + /// This method may return objects that are invalid for block inclusion. + pub fn get_all_voluntary_exits(&self) -> Vec<SignedVoluntaryExit> { + self.voluntary_exits + .read() + .iter() + .map(|(_, exit)| exit.clone()) + .collect() + } } /// Filter up to a maximum number of operations out of an iterator. diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs deleted file mode 100644 index ad2688bb0ff..00000000000 --- a/beacon_node/rest_api/src/beacon.rs +++ /dev/null @@ -1,499 +0,0 @@ -use crate::helpers::*; -use crate::validator::get_state_for_epoch; -use crate::Context; -use crate::{ApiError, UrlQuery}; -use beacon_chain::{ - observed_operations::ObservationOutcome, BeaconChain, BeaconChainTypes, StateSkipConfig, -}; -use futures::executor::block_on; -use hyper::body::Bytes; -use hyper::{Body, Request}; -use rest_types::{ - BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse, - ValidatorRequest, ValidatorResponse, -}; -use std::io::Write; -use std::sync::Arc; - -use slog::error; -use types::{ - AttesterSlashing, BeaconState, EthSpec, Hash256, ProposerSlashing, PublicKeyBytes, - RelativeEpoch, SignedBeaconBlockHash, Slot, -}; - -/// Returns a summary of the head of the beacon chain. -pub fn get_head<T: BeaconChainTypes>( - ctx: Arc<Context<T>>, -) -> Result<CanonicalHeadResponse, ApiError> { - let beacon_chain = &ctx.beacon_chain; - let chain_head = beacon_chain.head()?; - - Ok(CanonicalHeadResponse { - slot: chain_head.beacon_state.slot, - block_root: chain_head.beacon_block_root, - state_root: chain_head.beacon_state_root, - finalized_slot: chain_head - .beacon_state - .finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - finalized_block_root: chain_head.beacon_state.finalized_checkpoint.root, - justified_slot: chain_head - .beacon_state - .current_justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - justified_block_root: chain_head.beacon_state.current_justified_checkpoint.root, - previous_justified_slot: chain_head - .beacon_state - .previous_justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - previous_justified_block_root: chain_head.beacon_state.previous_justified_checkpoint.root, - }) -} - -/// Return the list of heads of the beacon chain. -pub fn get_heads<T: BeaconChainTypes>(ctx: Arc<Context<T>>) -> Vec<HeadBeaconBlock> { - ctx.beacon_chain - .heads() - .into_iter() - .map(|(beacon_block_root, beacon_block_slot)| HeadBeaconBlock { - beacon_block_root, - beacon_block_slot, - }) - .collect() -} - -/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. -pub fn get_block<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<BlockResponse<T::EthSpec>, ApiError> { - let beacon_chain = &ctx.beacon_chain; - let query_params = ["root", "slot"]; - let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?; - - let block_root = match (key.as_ref(), value) { - ("slot", value) => { - let target = parse_slot(&value)?; - - block_root_at_slot(beacon_chain, target)?.ok_or_else(|| { - ApiError::NotFound(format!( - "Unable to find SignedBeaconBlock for slot {:?}", - target - )) - })? - } - ("root", value) => parse_root(&value)?, - _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), - }; - - let block = beacon_chain.store.get_block(&block_root)?.ok_or_else(|| { - ApiError::NotFound(format!( - "Unable to find SignedBeaconBlock for root {:?}", - block_root - )) - })?; - - Ok(BlockResponse { - root: block_root, - beacon_block: block, - }) -} - -/// HTTP handler to return a `SignedBeaconBlock` root at a given `slot`. -pub fn get_block_root<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<Hash256, ApiError> { - let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; - let target = parse_slot(&slot_string)?; - - block_root_at_slot(&ctx.beacon_chain, target)?.ok_or_else(|| { - ApiError::NotFound(format!( - "Unable to find SignedBeaconBlock for slot {:?}", - target - )) - }) -} - -fn make_sse_response_chunk(new_head_hash: SignedBeaconBlockHash) -> std::io::Result<Bytes> { - let mut buffer = Vec::new(); - { - let mut sse_message = uhttp_sse::SseMessage::new(&mut buffer); - let untyped_hash: Hash256 = new_head_hash.into(); - write!(sse_message.data()?, "{:?}", untyped_hash)?; - } - let bytes: Bytes = buffer.into(); - Ok(bytes) -} - -pub fn stream_forks<T: BeaconChainTypes>(ctx: Arc<Context<T>>) -> Result<Body, ApiError> { - let mut events = ctx.events.lock().add_rx(); - let (mut sender, body) = Body::channel(); - std::thread::spawn(move || { - while let Ok(new_head_hash) = events.recv() { - let chunk = match make_sse_response_chunk(new_head_hash) { - Ok(chunk) => chunk, - Err(e) => { - error!(ctx.log, "Failed to make SSE chunk"; "error" => e.to_string()); - sender.abort(); - break; - } - }; - match block_on(sender.send_data(chunk)) { - Err(e) if e.is_closed() => break, - Err(e) => error!(ctx.log, "Couldn't stream piece {:?}", e), - Ok(_) => (), - } - } - }); - Ok(body) -} - -/// HTTP handler to which accepts a query string of a list of validator pubkeys and maps it to a -/// `ValidatorResponse`. -/// -/// This method is limited to as many `pubkeys` that can fit in a URL. See `post_validators` for -/// doing bulk requests. -pub fn get_validators<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<Vec<ValidatorResponse>, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let validator_pubkeys = query - .all_of("validator_pubkeys")? - .iter() - .map(|validator_pubkey_str| parse_pubkey_bytes(validator_pubkey_str)) - .collect::<Result<Vec<_>, _>>()?; - - let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) { - Some(parse_root(&value)?) - } else { - None - }; - - validator_responses_by_pubkey(&ctx.beacon_chain, state_root_opt, validator_pubkeys) -} - -/// HTTP handler to return all validators, each as a `ValidatorResponse`. -pub fn get_all_validators<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<Vec<ValidatorResponse>, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) { - Some(parse_root(&value)?) - } else { - None - }; - - let mut state = get_state_from_root_opt(&ctx.beacon_chain, state_root_opt)?; - - let validators = state.validators.clone(); - validators - .iter() - .map(|validator| validator_response_by_pubkey(&mut state, validator.pubkey.clone())) - .collect::<Result<Vec<_>, _>>() -} - -/// HTTP handler to return all active validators, each as a `ValidatorResponse`. -pub fn get_active_validators<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<Vec<ValidatorResponse>, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) { - Some(parse_root(&value)?) - } else { - None - }; - - let mut state = get_state_from_root_opt(&ctx.beacon_chain, state_root_opt)?; - - let validators = state.validators.clone(); - let current_epoch = state.current_epoch(); - - validators - .iter() - .filter(|validator| validator.is_active_at(current_epoch)) - .map(|validator| validator_response_by_pubkey(&mut state, validator.pubkey.clone())) - .collect::<Result<Vec<_>, _>>() -} - -/// HTTP handler to which accepts a `ValidatorRequest` and returns a `ValidatorResponse` for -/// each of the given `pubkeys`. When `state_root` is `None`, the canonical head is used. -/// -/// This method allows for a basically unbounded list of `pubkeys`, where as the `get_validators` -/// request is limited by the max number of pubkeys you can fit in a URL. -pub fn post_validators<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<Vec<ValidatorResponse>, ApiError> { - serde_json::from_slice::<ValidatorRequest>(&req.into_body()) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorRequest: {:?}", - e - )) - }) - .and_then(|bulk_request| { - validator_responses_by_pubkey( - &ctx.beacon_chain, - bulk_request.state_root, - bulk_request.pubkeys, - ) - }) -} - -/// Returns either the state given by `state_root_opt`, or the canonical head state if it is -/// `None`. -fn get_state_from_root_opt<T: BeaconChainTypes>( - beacon_chain: &BeaconChain<T>, - state_root_opt: Option<Hash256>, -) -> Result<BeaconState<T::EthSpec>, ApiError> { - if let Some(state_root) = state_root_opt { - beacon_chain - .get_state(&state_root, None) - .map_err(|e| { - ApiError::ServerError(format!( - "Database error when reading state root {}: {:?}", - state_root, e - )) - })? - .ok_or_else(|| ApiError::NotFound(format!("No state exists with root: {}", state_root))) - } else { - Ok(beacon_chain.head()?.beacon_state) - } -} - -/// Maps a vec of `validator_pubkey` to a vec of `ValidatorResponse`, using the state at the given -/// `state_root`. If `state_root.is_none()`, uses the canonial head state. -fn validator_responses_by_pubkey<T: BeaconChainTypes>( - beacon_chain: &BeaconChain<T>, - state_root_opt: Option<Hash256>, - validator_pubkeys: Vec<PublicKeyBytes>, -) -> Result<Vec<ValidatorResponse>, ApiError> { - let mut state = get_state_from_root_opt(beacon_chain, state_root_opt)?; - - validator_pubkeys - .into_iter() - .map(|validator_pubkey| validator_response_by_pubkey(&mut state, validator_pubkey)) - .collect::<Result<Vec<_>, ApiError>>() -} - -/// Maps a `validator_pubkey` to a `ValidatorResponse`, using the given state. -/// -/// The provided `state` must have a fully up-to-date pubkey cache. -fn validator_response_by_pubkey<E: EthSpec>( - state: &mut BeaconState<E>, - validator_pubkey: PublicKeyBytes, -) -> Result<ValidatorResponse, ApiError> { - let validator_index_opt = state - .get_validator_index(&validator_pubkey) - .map_err(|e| ApiError::ServerError(format!("Unable to read pubkey cache: {:?}", e)))?; - - if let Some(validator_index) = validator_index_opt { - let balance = state.balances.get(validator_index).ok_or_else(|| { - ApiError::ServerError(format!("Invalid balances index: {:?}", validator_index)) - })?; - - let validator = state - .validators - .get(validator_index) - .ok_or_else(|| { - ApiError::ServerError(format!("Invalid validator index: {:?}", validator_index)) - })? - .clone(); - - Ok(ValidatorResponse { - pubkey: validator_pubkey, - validator_index: Some(validator_index), - balance: Some(*balance), - validator: Some(validator), - }) - } else { - Ok(ValidatorResponse { - pubkey: validator_pubkey, - validator_index: None, - balance: None, - validator: None, - }) - } -} - -/// HTTP handler -pub fn get_committees<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<Vec<Committee>, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let epoch = query.epoch()?; - - let mut state = - get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; - - let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch).map_err(|e| { - ApiError::ServerError(format!("Failed to get state suitable for epoch: {:?}", e)) - })?; - - state - .build_committee_cache(relative_epoch, &ctx.beacon_chain.spec) - .map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?; - - Ok(state - .get_beacon_committees_at_epoch(relative_epoch) - .map_err(|e| ApiError::ServerError(format!("Unable to get all committees: {:?}", e)))? - .into_iter() - .map(|c| Committee { - slot: c.slot, - index: c.index, - committee: c.committee.to_vec(), - }) - .collect::<Vec<_>>()) -} - -/// HTTP handler to return a `BeaconState` at a given `root` or `slot`. -/// -/// Will not return a state if the request slot is in the future. Will return states higher than -/// the current head by skipping slots. -pub fn get_state<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<StateResponse<T::EthSpec>, ApiError> { - let head_state = ctx.beacon_chain.head()?.beacon_state; - - let (key, value) = match UrlQuery::from_request(&req) { - Ok(query) => { - // We have *some* parameters, just check them. - let query_params = ["root", "slot"]; - query.first_of(&query_params)? - } - Err(ApiError::BadRequest(_)) => { - // No parameters provided at all, use current slot. - (String::from("slot"), head_state.slot.to_string()) - } - Err(e) => { - return Err(e); - } - }; - - let (root, state): (Hash256, BeaconState<T::EthSpec>) = match (key.as_ref(), value) { - ("slot", value) => state_at_slot(&ctx.beacon_chain, parse_slot(&value)?)?, - ("root", value) => { - let root = &parse_root(&value)?; - - let state = ctx - .beacon_chain - .store - .get_state(root, None)? - .ok_or_else(|| ApiError::NotFound(format!("No state for root: {:?}", root)))?; - - (*root, state) - } - _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), - }; - - Ok(StateResponse { - root, - beacon_state: state, - }) -} - -/// HTTP handler to return a `BeaconState` root at a given `slot`. -/// -/// Will not return a state if the request slot is in the future. Will return states higher than -/// the current head by skipping slots. -pub fn get_state_root<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<Hash256, ApiError> { - let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; - let slot = parse_slot(&slot_string)?; - - state_root_at_slot(&ctx.beacon_chain, slot, StateSkipConfig::WithStateRoots) -} - -/// HTTP handler to return a `BeaconState` at the genesis block. -/// -/// This is an undocumented convenience method used during testing. For production, simply do a -/// state request at slot 0. -pub fn get_genesis_state<T: BeaconChainTypes>( - ctx: Arc<Context<T>>, -) -> Result<BeaconState<T::EthSpec>, ApiError> { - state_at_slot(&ctx.beacon_chain, Slot::new(0)).map(|(_root, state)| state) -} - -pub fn proposer_slashing<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<bool, ApiError> { - let body = req.into_body(); - - serde_json::from_slice::<ProposerSlashing>(&body) - .map_err(|e| format!("Unable to parse JSON into ProposerSlashing: {:?}", e)) - .and_then(move |proposer_slashing| { - if ctx.beacon_chain.eth1_chain.is_some() { - let obs_outcome = ctx - .beacon_chain - .verify_proposer_slashing_for_gossip(proposer_slashing) - .map_err(|e| format!("Error while verifying proposer slashing: {:?}", e))?; - if let ObservationOutcome::New(verified_proposer_slashing) = obs_outcome { - ctx.beacon_chain - .import_proposer_slashing(verified_proposer_slashing); - Ok(()) - } else { - Err("Proposer slashing for that validator index already known".into()) - } - } else { - Err("Cannot insert proposer slashing on node without Eth1 connection.".to_string()) - } - }) - .map_err(ApiError::BadRequest)?; - - Ok(true) -} - -pub fn attester_slashing<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<bool, ApiError> { - let body = req.into_body(); - serde_json::from_slice::<AttesterSlashing<T::EthSpec>>(&body) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into AttesterSlashing: {:?}", - e - )) - }) - .and_then(move |attester_slashing| { - if ctx.beacon_chain.eth1_chain.is_some() { - ctx.beacon_chain - .verify_attester_slashing_for_gossip(attester_slashing) - .map_err(|e| format!("Error while verifying attester slashing: {:?}", e)) - .and_then(|outcome| { - if let ObservationOutcome::New(verified_attester_slashing) = outcome { - ctx.beacon_chain - .import_attester_slashing(verified_attester_slashing) - .map_err(|e| { - format!("Error while importing attester slashing: {:?}", e) - }) - } else { - Err("Attester slashing only covers already slashed indices".to_string()) - } - }) - .map_err(ApiError::BadRequest) - } else { - Err(ApiError::BadRequest( - "Cannot insert attester slashing on node without Eth1 connection.".to_string(), - )) - } - })?; - - Ok(true) -} diff --git a/beacon_node/rest_api/src/config.rs b/beacon_node/rest_api/src/config.rs deleted file mode 100644 index 815fccfd01c..00000000000 --- a/beacon_node/rest_api/src/config.rs +++ /dev/null @@ -1,55 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::net::Ipv4Addr; - -/// Defines the encoding for the API. -#[derive(Clone, Serialize, Deserialize, Copy)] -pub enum ApiEncodingFormat { - JSON, - YAML, - SSZ, -} - -impl ApiEncodingFormat { - pub fn get_content_type(&self) -> &str { - match self { - ApiEncodingFormat::JSON => "application/json", - ApiEncodingFormat::YAML => "application/yaml", - ApiEncodingFormat::SSZ => "application/ssz", - } - } -} - -impl From<&str> for ApiEncodingFormat { - fn from(f: &str) -> ApiEncodingFormat { - match f { - "application/yaml" => ApiEncodingFormat::YAML, - "application/ssz" => ApiEncodingFormat::SSZ, - _ => ApiEncodingFormat::JSON, - } - } -} - -/// HTTP REST API Configuration -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Config { - /// Enable the REST API server. - pub enabled: bool, - /// The IPv4 address the REST API HTTP server will listen on. - pub listen_address: Ipv4Addr, - /// The port the REST API HTTP server will listen on. - pub port: u16, - /// If something else than "", a 'Access-Control-Allow-Origin' header will be present in - /// responses. Put *, to allow any origin. - pub allow_origin: String, -} - -impl Default for Config { - fn default() -> Self { - Config { - enabled: false, - listen_address: Ipv4Addr::new(127, 0, 0, 1), - port: 5052, - allow_origin: "".to_string(), - } - } -} diff --git a/beacon_node/rest_api/src/consensus.rs b/beacon_node/rest_api/src/consensus.rs deleted file mode 100644 index 9df57f05528..00000000000 --- a/beacon_node/rest_api/src/consensus.rs +++ /dev/null @@ -1,126 +0,0 @@ -use crate::helpers::*; -use crate::{ApiError, Context, UrlQuery}; -use beacon_chain::BeaconChainTypes; -use hyper::Request; -use rest_types::{IndividualVotesRequest, IndividualVotesResponse}; -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use state_processing::per_epoch_processing::{TotalBalances, ValidatorStatuses}; -use std::sync::Arc; -use types::EthSpec; - -/// The results of validators voting during an epoch. -/// -/// Provides information about the current and previous epochs. -#[derive(Serialize, Deserialize, Encode, Decode)] -pub struct VoteCount { - /// The total effective balance of all active validators during the _current_ epoch. - pub current_epoch_active_gwei: u64, - /// The total effective balance of all active validators during the _previous_ epoch. - pub previous_epoch_active_gwei: u64, - /// The total effective balance of all validators who attested during the _current_ epoch. - pub current_epoch_attesting_gwei: u64, - /// The total effective balance of all validators who attested during the _current_ epoch and - /// agreed with the state about the beacon block at the first slot of the _current_ epoch. - pub current_epoch_target_attesting_gwei: u64, - /// The total effective balance of all validators who attested during the _previous_ epoch. - pub previous_epoch_attesting_gwei: u64, - /// The total effective balance of all validators who attested during the _previous_ epoch and - /// agreed with the state about the beacon block at the first slot of the _previous_ epoch. - pub previous_epoch_target_attesting_gwei: u64, - /// The total effective balance of all validators who attested during the _previous_ epoch and - /// agreed with the state about the beacon block at the time of attestation. - pub previous_epoch_head_attesting_gwei: u64, -} - -impl Into<VoteCount> for TotalBalances { - fn into(self) -> VoteCount { - VoteCount { - current_epoch_active_gwei: self.current_epoch(), - previous_epoch_active_gwei: self.previous_epoch(), - current_epoch_attesting_gwei: self.current_epoch_attesters(), - current_epoch_target_attesting_gwei: self.current_epoch_target_attesters(), - previous_epoch_attesting_gwei: self.previous_epoch_attesters(), - previous_epoch_target_attesting_gwei: self.previous_epoch_target_attesters(), - previous_epoch_head_attesting_gwei: self.previous_epoch_head_attesters(), - } - } -} - -/// HTTP handler return a `VoteCount` for some given `Epoch`. -pub fn get_vote_count<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<VoteCount, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let epoch = query.epoch()?; - // This is the last slot of the given epoch (one prior to the first slot of the next epoch). - let target_slot = (epoch + 1).start_slot(T::EthSpec::slots_per_epoch()) - 1; - - let (_root, state) = state_at_slot(&ctx.beacon_chain, target_slot)?; - let spec = &ctx.beacon_chain.spec; - - let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; - validator_statuses.process_attestations(&state, spec)?; - - Ok(validator_statuses.total_balances.into()) -} - -pub fn post_individual_votes<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<Vec<IndividualVotesResponse>, ApiError> { - let body = req.into_body(); - - serde_json::from_slice::<IndividualVotesRequest>(&body) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorDutiesRequest: {:?}", - e - )) - }) - .and_then(move |body| { - let epoch = body.epoch; - - // This is the last slot of the given epoch (one prior to the first slot of the next epoch). - let target_slot = (epoch + 1).start_slot(T::EthSpec::slots_per_epoch()) - 1; - - let (_root, mut state) = state_at_slot(&ctx.beacon_chain, target_slot)?; - let spec = &ctx.beacon_chain.spec; - - let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; - validator_statuses.process_attestations(&state, spec)?; - - body.pubkeys - .into_iter() - .map(|pubkey| { - let validator_index_opt = state.get_validator_index(&pubkey).map_err(|e| { - ApiError::ServerError(format!("Unable to read pubkey cache: {:?}", e)) - })?; - - if let Some(validator_index) = validator_index_opt { - let vote = validator_statuses - .statuses - .get(validator_index) - .cloned() - .map(Into::into); - - Ok(IndividualVotesResponse { - epoch, - pubkey, - validator_index: Some(validator_index), - vote, - }) - } else { - Ok(IndividualVotesResponse { - epoch, - pubkey, - validator_index: None, - vote: None, - }) - } - }) - .collect::<Result<Vec<_>, _>>() - }) -} diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs deleted file mode 100644 index 66b5bd1a0c1..00000000000 --- a/beacon_node/rest_api/src/helpers.rs +++ /dev/null @@ -1,260 +0,0 @@ -use crate::{ApiError, NetworkChannel}; -use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig}; -use bls::PublicKeyBytes; -use eth2_libp2p::PubsubMessage; -use itertools::process_results; -use network::NetworkMessage; -use ssz::Decode; -use store::iter::AncestorIter; -use types::{ - BeaconState, CommitteeIndex, Epoch, EthSpec, Hash256, RelativeEpoch, SignedBeaconBlock, Slot, -}; - -/// Parse a slot. -/// -/// E.g., `"1234"` -pub fn parse_slot(string: &str) -> Result<Slot, ApiError> { - string - .parse::<u64>() - .map(Slot::from) - .map_err(|e| ApiError::BadRequest(format!("Unable to parse slot: {:?}", e))) -} - -/// Parse an epoch. -/// -/// E.g., `"13"` -pub fn parse_epoch(string: &str) -> Result<Epoch, ApiError> { - string - .parse::<u64>() - .map(Epoch::from) - .map_err(|e| ApiError::BadRequest(format!("Unable to parse epoch: {:?}", e))) -} - -/// Parse a CommitteeIndex. -/// -/// E.g., `"18"` -pub fn parse_committee_index(string: &str) -> Result<CommitteeIndex, ApiError> { - string - .parse::<CommitteeIndex>() - .map_err(|e| ApiError::BadRequest(format!("Unable to parse committee index: {:?}", e))) -} - -/// Parse an SSZ object from some hex-encoded bytes. -/// -/// E.g., A signature is `"0x0000000000000000000000000000000000000000000000000000000000000000"` -pub fn parse_hex_ssz_bytes<T: Decode>(string: &str) -> Result<T, ApiError> { - const PREFIX: &str = "0x"; - - if string.starts_with(PREFIX) { - let trimmed = string.trim_start_matches(PREFIX); - let bytes = hex::decode(trimmed) - .map_err(|e| ApiError::BadRequest(format!("Unable to parse SSZ hex: {:?}", e)))?; - T::from_ssz_bytes(&bytes) - .map_err(|e| ApiError::BadRequest(format!("Unable to parse SSZ bytes: {:?}", e))) - } else { - Err(ApiError::BadRequest( - "Hex bytes must have a 0x prefix".to_string(), - )) - } -} - -/// Parse a root from a `0x` prefixed string. -/// -/// E.g., `"0x0000000000000000000000000000000000000000000000000000000000000000"` -pub fn parse_root(string: &str) -> Result<Hash256, ApiError> { - const PREFIX: &str = "0x"; - - if string.starts_with(PREFIX) { - let trimmed = string.trim_start_matches(PREFIX); - trimmed - .parse() - .map_err(|e| ApiError::BadRequest(format!("Unable to parse root: {:?}", e))) - } else { - Err(ApiError::BadRequest( - "Root must have a 0x prefix".to_string(), - )) - } -} - -/// Parse a PublicKey from a `0x` prefixed hex string -pub fn parse_pubkey_bytes(string: &str) -> Result<PublicKeyBytes, ApiError> { - const PREFIX: &str = "0x"; - if string.starts_with(PREFIX) { - let pubkey_bytes = hex::decode(string.trim_start_matches(PREFIX)) - .map_err(|e| ApiError::BadRequest(format!("Invalid hex string: {:?}", e)))?; - let pubkey = PublicKeyBytes::deserialize(pubkey_bytes.as_slice()).map_err(|e| { - ApiError::BadRequest(format!("Unable to deserialize public key: {:?}.", e)) - })?; - Ok(pubkey) - } else { - Err(ApiError::BadRequest( - "Public key must have a 0x prefix".to_string(), - )) - } -} - -/// Returns the root of the `SignedBeaconBlock` in the canonical chain of `beacon_chain` at the given -/// `slot`, if possible. -/// -/// May return a root for a previous slot, in the case of skip slots. -pub fn block_root_at_slot<T: BeaconChainTypes>( - beacon_chain: &BeaconChain<T>, - target: Slot, -) -> Result<Option<Hash256>, ApiError> { - Ok(process_results( - beacon_chain.rev_iter_block_roots()?, - |iter| { - iter.take_while(|(_, slot)| *slot >= target) - .find(|(_, slot)| *slot == target) - .map(|(root, _)| root) - }, - )?) -} - -/// Returns a `BeaconState` and it's root in the canonical chain of `beacon_chain` at the given -/// `slot`, if possible. -/// -/// Will not return a state if the request slot is in the future. Will return states higher than -/// the current head by skipping slots. -pub fn state_at_slot<T: BeaconChainTypes>( - beacon_chain: &BeaconChain<T>, - slot: Slot, -) -> Result<(Hash256, BeaconState<T::EthSpec>), ApiError> { - let head = beacon_chain.head()?; - - if head.beacon_state.slot == slot { - Ok((head.beacon_state_root, head.beacon_state)) - } else { - let root = state_root_at_slot(beacon_chain, slot, StateSkipConfig::WithStateRoots)?; - - let state: BeaconState<T::EthSpec> = beacon_chain - .store - .get_state(&root, Some(slot))? - .ok_or_else(|| ApiError::NotFound(format!("Unable to find state at root {}", root)))?; - - Ok((root, state)) - } -} - -/// Returns the root of the `BeaconState` in the canonical chain of `beacon_chain` at the given -/// `slot`, if possible. -/// -/// Will not return a state root if the request slot is in the future. Will return state roots -/// higher than the current head by skipping slots. -pub fn state_root_at_slot<T: BeaconChainTypes>( - beacon_chain: &BeaconChain<T>, - slot: Slot, - config: StateSkipConfig, -) -> Result<Hash256, ApiError> { - let head_state = &beacon_chain.head()?.beacon_state; - let current_slot = beacon_chain - .slot() - .map_err(|_| ApiError::ServerError("Unable to read slot clock".to_string()))?; - - // There are four scenarios when obtaining a state for a given slot: - // - // 1. The request slot is in the future. - // 2. The request slot is the same as the best block (head) slot. - // 3. The request slot is prior to the head slot. - // 4. The request slot is later than the head slot. - if current_slot < slot { - // 1. The request slot is in the future. Reject the request. - // - // We could actually speculate about future state roots by skipping slots, however that's - // likely to cause confusion for API users. - Err(ApiError::BadRequest(format!( - "Requested slot {} is past the current slot {}", - slot, current_slot - ))) - } else if head_state.slot == slot { - // 2. The request slot is the same as the best block (head) slot. - // - // The head state root is stored in memory, return a reference. - Ok(beacon_chain.head()?.beacon_state_root) - } else if head_state.slot > slot { - // 3. The request slot is prior to the head slot. - // - // Iterate through the state roots on the head state to find the root for that - // slot. Once the root is found, load it from the database. - process_results( - head_state - .try_iter_ancestor_roots(beacon_chain.store.clone()) - .ok_or_else(|| { - ApiError::ServerError("Failed to create roots iterator".to_string()) - })?, - |mut iter| iter.find(|(_, s)| *s == slot).map(|(root, _)| root), - )? - .ok_or_else(|| ApiError::NotFound(format!("Unable to find state at slot {}", slot))) - } else { - // 4. The request slot is later than the head slot. - // - // Use `per_slot_processing` to advance the head state to the present slot, - // assuming that all slots do not contain a block (i.e., they are skipped slots). - let mut state = beacon_chain.head()?.beacon_state; - let spec = &T::EthSpec::default_spec(); - - let skip_state_root = match config { - StateSkipConfig::WithStateRoots => None, - StateSkipConfig::WithoutStateRoots => Some(Hash256::zero()), - }; - - for _ in state.slot.as_u64()..slot.as_u64() { - // Ensure the next epoch state caches are built in case of an epoch transition. - state.build_committee_cache(RelativeEpoch::Next, spec)?; - - state_processing::per_slot_processing(&mut state, skip_state_root, spec)?; - } - - // Note: this is an expensive operation. Once the tree hash cache is implement it may be - // used here. - Ok(state.canonical_root()) - } -} - -pub fn publish_beacon_block_to_network<T: BeaconChainTypes + 'static>( - chan: &NetworkChannel<T::EthSpec>, - block: SignedBeaconBlock<T::EthSpec>, -) -> Result<(), ApiError> { - // send the block via SSZ encoding - let messages = vec![PubsubMessage::BeaconBlock(Box::new(block))]; - - // Publish the block to the p2p network via gossipsub. - if let Err(e) = chan.send(NetworkMessage::Publish { messages }) { - return Err(ApiError::ServerError(format!( - "Unable to send new block to network: {:?}", - e - ))); - } - - Ok(()) -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn parse_root_works() { - assert_eq!( - parse_root("0x0000000000000000000000000000000000000000000000000000000000000000"), - Ok(Hash256::zero()) - ); - assert_eq!( - parse_root("0x000000000000000000000000000000000000000000000000000000000000002a"), - Ok(Hash256::from_low_u64_be(42)) - ); - assert!( - parse_root("0000000000000000000000000000000000000000000000000000000000000042").is_err() - ); - assert!(parse_root("0x").is_err()); - assert!(parse_root("0x00").is_err()); - } - - #[test] - fn parse_slot_works() { - assert_eq!(parse_slot("0"), Ok(Slot::new(0))); - assert_eq!(parse_slot("42"), Ok(Slot::new(42))); - assert_eq!(parse_slot("10000000"), Ok(Slot::new(10_000_000))); - assert!(parse_slot("cats").is_err()); - } -} diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs deleted file mode 100644 index 405e08e217d..00000000000 --- a/beacon_node/rest_api/src/lib.rs +++ /dev/null @@ -1,127 +0,0 @@ -#[macro_use] -extern crate lazy_static; -mod router; -extern crate network as client_network; - -mod beacon; -pub mod config; -mod consensus; -mod helpers; -mod lighthouse; -mod metrics; -mod node; -mod url_query; -mod validator; - -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use bus::Bus; -use client_network::NetworkMessage; -pub use config::ApiEncodingFormat; -use eth2_config::Eth2Config; -use eth2_libp2p::NetworkGlobals; -use futures::future::TryFutureExt; -use hyper::server::conn::AddrStream; -use hyper::service::{make_service_fn, service_fn}; -use hyper::{Body, Request, Server}; -use parking_lot::Mutex; -use rest_types::ApiError; -use slog::{info, warn}; -use std::net::SocketAddr; -use std::path::PathBuf; -use std::sync::Arc; -use tokio::sync::mpsc; -use types::SignedBeaconBlockHash; -use url_query::UrlQuery; - -pub use crate::helpers::parse_pubkey_bytes; -pub use config::Config; -pub use router::Context; - -pub type NetworkChannel<T> = mpsc::UnboundedSender<NetworkMessage<T>>; - -pub struct NetworkInfo<T: BeaconChainTypes> { - pub network_globals: Arc<NetworkGlobals<T::EthSpec>>, - pub network_chan: NetworkChannel<T::EthSpec>, -} - -// Allowing more than 7 arguments. -#[allow(clippy::too_many_arguments)] -pub fn start_server<T: BeaconChainTypes>( - executor: environment::TaskExecutor, - config: &Config, - beacon_chain: Arc<BeaconChain<T>>, - network_info: NetworkInfo<T>, - db_path: PathBuf, - freezer_db_path: PathBuf, - eth2_config: Eth2Config, - events: Arc<Mutex<Bus<SignedBeaconBlockHash>>>, -) -> Result<SocketAddr, hyper::Error> { - let log = executor.log(); - let eth2_config = Arc::new(eth2_config); - - let context = Arc::new(Context { - executor: executor.clone(), - config: config.clone(), - beacon_chain, - network_globals: network_info.network_globals.clone(), - network_chan: network_info.network_chan, - eth2_config, - log: log.clone(), - db_path, - freezer_db_path, - events, - }); - - // Define the function that will build the request handler. - let make_service = make_service_fn(move |_socket: &AddrStream| { - let ctx = context.clone(); - - async move { - Ok::<_, hyper::Error>(service_fn(move |req: Request<Body>| { - router::on_http_request(req, ctx.clone()) - })) - } - }); - - let bind_addr = (config.listen_address, config.port).into(); - let server = Server::bind(&bind_addr).serve(make_service); - - // Determine the address the server is actually listening on. - // - // This may be different to `bind_addr` if bind port was 0 (this allows the OS to choose a free - // port). - let actual_listen_addr = server.local_addr(); - - // Build a channel to kill the HTTP server. - let exit = executor.exit(); - let inner_log = log.clone(); - let server_exit = async move { - let _ = exit.await; - info!(inner_log, "HTTP service shutdown"); - }; - - // Configure the `hyper` server to gracefully shutdown when the shutdown channel is triggered. - let inner_log = log.clone(); - let server_future = server - .with_graceful_shutdown(async { - server_exit.await; - }) - .map_err(move |e| { - warn!( - inner_log, - "HTTP server failed to start, Unable to bind"; "address" => format!("{:?}", e) - ) - }) - .unwrap_or_else(|_| ()); - - info!( - log, - "HTTP API started"; - "address" => format!("{}", actual_listen_addr.ip()), - "port" => actual_listen_addr.port(), - ); - - executor.spawn_without_exit(server_future, "http"); - - Ok(actual_listen_addr) -} diff --git a/beacon_node/rest_api/src/lighthouse.rs b/beacon_node/rest_api/src/lighthouse.rs deleted file mode 100644 index 4d0fae926df..00000000000 --- a/beacon_node/rest_api/src/lighthouse.rs +++ /dev/null @@ -1,48 +0,0 @@ -//! This contains a collection of lighthouse specific HTTP endpoints. - -use crate::{ApiError, Context}; -use beacon_chain::BeaconChainTypes; -use eth2_libp2p::PeerInfo; -use serde::Serialize; -use std::sync::Arc; -use types::EthSpec; - -/// Returns all known peers and corresponding information -pub fn peers<T: BeaconChainTypes>(ctx: Arc<Context<T>>) -> Result<Vec<Peer<T::EthSpec>>, ApiError> { - Ok(ctx - .network_globals - .peers - .read() - .peers() - .map(|(peer_id, peer_info)| Peer { - peer_id: peer_id.to_string(), - peer_info: peer_info.clone(), - }) - .collect()) -} - -/// Returns all known connected peers and their corresponding information -pub fn connected_peers<T: BeaconChainTypes>( - ctx: Arc<Context<T>>, -) -> Result<Vec<Peer<T::EthSpec>>, ApiError> { - Ok(ctx - .network_globals - .peers - .read() - .connected_peers() - .map(|(peer_id, peer_info)| Peer { - peer_id: peer_id.to_string(), - peer_info: peer_info.clone(), - }) - .collect()) -} - -/// Information returned by `peers` and `connected_peers`. -#[derive(Clone, Debug, Serialize)] -#[serde(bound = "T: EthSpec")] -pub struct Peer<T: EthSpec> { - /// The Peer's ID - peer_id: String, - /// The PeerInfo associated with the peer. - peer_info: PeerInfo<T>, -} diff --git a/beacon_node/rest_api/src/node.rs b/beacon_node/rest_api/src/node.rs deleted file mode 100644 index bd5615de34d..00000000000 --- a/beacon_node/rest_api/src/node.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::{ApiError, Context}; -use beacon_chain::BeaconChainTypes; -use eth2_libp2p::types::SyncState; -use rest_types::{SyncingResponse, SyncingStatus}; -use std::sync::Arc; -use types::Slot; - -/// Returns a syncing status. -pub fn syncing<T: BeaconChainTypes>(ctx: Arc<Context<T>>) -> Result<SyncingResponse, ApiError> { - let current_slot = ctx - .beacon_chain - .head_info() - .map_err(|e| ApiError::ServerError(format!("Unable to read head slot: {:?}", e)))? - .slot; - - let (starting_slot, highest_slot) = match ctx.network_globals.sync_state() { - SyncState::SyncingFinalized { - start_slot, - head_slot, - .. - } - | SyncState::SyncingHead { - start_slot, - head_slot, - } => (start_slot, head_slot), - SyncState::Synced | SyncState::Stalled => (Slot::from(0u64), current_slot), - }; - - let sync_status = SyncingStatus { - starting_slot, - current_slot, - highest_slot, - }; - - Ok(SyncingResponse { - is_syncing: ctx.network_globals.is_syncing(), - sync_status, - }) -} diff --git a/beacon_node/rest_api/src/router.rs b/beacon_node/rest_api/src/router.rs deleted file mode 100644 index bed7ba77aa6..00000000000 --- a/beacon_node/rest_api/src/router.rs +++ /dev/null @@ -1,322 +0,0 @@ -use crate::{ - beacon, config::Config, consensus, lighthouse, metrics, node, validator, NetworkChannel, -}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use bus::Bus; -use environment::TaskExecutor; -use eth2_config::Eth2Config; -use eth2_libp2p::{NetworkGlobals, PeerId}; -use hyper::header::HeaderValue; -use hyper::{Body, Method, Request, Response}; -use lighthouse_version::version_with_platform; -use operation_pool::PersistedOperationPool; -use parking_lot::Mutex; -use rest_types::{ApiError, Handler, Health}; -use slog::debug; -use std::path::PathBuf; -use std::sync::Arc; -use std::time::Instant; -use types::{EthSpec, SignedBeaconBlockHash}; - -pub struct Context<T: BeaconChainTypes> { - pub executor: TaskExecutor, - pub config: Config, - pub beacon_chain: Arc<BeaconChain<T>>, - pub network_globals: Arc<NetworkGlobals<T::EthSpec>>, - pub network_chan: NetworkChannel<T::EthSpec>, - pub eth2_config: Arc<Eth2Config>, - pub log: slog::Logger, - pub db_path: PathBuf, - pub freezer_db_path: PathBuf, - pub events: Arc<Mutex<Bus<SignedBeaconBlockHash>>>, -} - -pub async fn on_http_request<T: BeaconChainTypes>( - req: Request<Body>, - ctx: Arc<Context<T>>, -) -> Result<Response<Body>, ApiError> { - let path = req.uri().path().to_string(); - - let _timer = metrics::start_timer_vec(&metrics::BEACON_HTTP_API_TIMES_TOTAL, &[&path]); - metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_REQUESTS_TOTAL, &[&path]); - - let received_instant = Instant::now(); - let log = ctx.log.clone(); - let allow_origin = ctx.config.allow_origin.clone(); - - match route(req, ctx).await { - Ok(mut response) => { - metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_SUCCESS_TOTAL, &[&path]); - - if allow_origin != "" { - let headers = response.headers_mut(); - headers.insert( - hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, - HeaderValue::from_str(&allow_origin)?, - ); - headers.insert(hyper::header::VARY, HeaderValue::from_static("Origin")); - } - - debug!( - log, - "HTTP API request successful"; - "path" => path, - "duration_ms" => Instant::now().duration_since(received_instant).as_millis() - ); - Ok(response) - } - - Err(error) => { - metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_ERROR_TOTAL, &[&path]); - - debug!( - log, - "HTTP API request failure"; - "path" => path, - "duration_ms" => Instant::now().duration_since(received_instant).as_millis() - ); - Ok(error.into()) - } - } -} - -async fn route<T: BeaconChainTypes>( - req: Request<Body>, - ctx: Arc<Context<T>>, -) -> Result<Response<Body>, ApiError> { - let path = req.uri().path().to_string(); - let ctx = ctx.clone(); - let method = req.method().clone(); - let executor = ctx.executor.clone(); - let handler = Handler::new(req, ctx, executor)?; - - match (method, path.as_ref()) { - (Method::GET, "/node/version") => handler - .static_value(version_with_platform()) - .await? - .serde_encodings(), - (Method::GET, "/node/health") => handler - .static_value(Health::observe().map_err(ApiError::ServerError)?) - .await? - .serde_encodings(), - (Method::GET, "/node/syncing") => handler - .allow_body() - .in_blocking_task(|_, ctx| node::syncing(ctx)) - .await? - .serde_encodings(), - (Method::GET, "/network/enr") => handler - .in_core_task(|_, ctx| Ok(ctx.network_globals.local_enr().to_base64())) - .await? - .serde_encodings(), - (Method::GET, "/network/peer_count") => handler - .in_core_task(|_, ctx| Ok(ctx.network_globals.connected_peers())) - .await? - .serde_encodings(), - (Method::GET, "/network/peer_id") => handler - .in_core_task(|_, ctx| Ok(ctx.network_globals.local_peer_id().to_base58())) - .await? - .serde_encodings(), - (Method::GET, "/network/peers") => handler - .in_blocking_task(|_, ctx| { - Ok(ctx - .network_globals - .peers - .read() - .connected_peer_ids() - .map(PeerId::to_string) - .collect::<Vec<_>>()) - }) - .await? - .serde_encodings(), - (Method::GET, "/network/listen_port") => handler - .in_core_task(|_, ctx| Ok(ctx.network_globals.listen_port_tcp())) - .await? - .serde_encodings(), - (Method::GET, "/network/listen_addresses") => handler - .in_blocking_task(|_, ctx| Ok(ctx.network_globals.listen_multiaddrs())) - .await? - .serde_encodings(), - (Method::GET, "/beacon/head") => handler - .in_blocking_task(|_, ctx| beacon::get_head(ctx)) - .await? - .all_encodings(), - (Method::GET, "/beacon/heads") => handler - .in_blocking_task(|_, ctx| Ok(beacon::get_heads(ctx))) - .await? - .all_encodings(), - (Method::GET, "/beacon/block") => handler - .in_blocking_task(beacon::get_block) - .await? - .all_encodings(), - (Method::GET, "/beacon/block_root") => handler - .in_blocking_task(beacon::get_block_root) - .await? - .all_encodings(), - (Method::GET, "/beacon/fork") => handler - .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.fork)) - .await? - .all_encodings(), - (Method::GET, "/beacon/fork/stream") => { - handler.sse_stream(|_, ctx| beacon::stream_forks(ctx)).await - } - (Method::GET, "/beacon/genesis_time") => handler - .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.genesis_time)) - .await? - .all_encodings(), - (Method::GET, "/beacon/genesis_validators_root") => handler - .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.genesis_validators_root)) - .await? - .all_encodings(), - (Method::GET, "/beacon/validators") => handler - .in_blocking_task(beacon::get_validators) - .await? - .all_encodings(), - (Method::POST, "/beacon/validators") => handler - .allow_body() - .in_blocking_task(beacon::post_validators) - .await? - .all_encodings(), - (Method::GET, "/beacon/validators/all") => handler - .in_blocking_task(beacon::get_all_validators) - .await? - .all_encodings(), - (Method::GET, "/beacon/validators/active") => handler - .in_blocking_task(beacon::get_active_validators) - .await? - .all_encodings(), - (Method::GET, "/beacon/state") => handler - .in_blocking_task(beacon::get_state) - .await? - .all_encodings(), - (Method::GET, "/beacon/state_root") => handler - .in_blocking_task(beacon::get_state_root) - .await? - .all_encodings(), - (Method::GET, "/beacon/state/genesis") => handler - .in_blocking_task(|_, ctx| beacon::get_genesis_state(ctx)) - .await? - .all_encodings(), - (Method::GET, "/beacon/committees") => handler - .in_blocking_task(beacon::get_committees) - .await? - .all_encodings(), - (Method::POST, "/beacon/proposer_slashing") => handler - .allow_body() - .in_blocking_task(beacon::proposer_slashing) - .await? - .serde_encodings(), - (Method::POST, "/beacon/attester_slashing") => handler - .allow_body() - .in_blocking_task(beacon::attester_slashing) - .await? - .serde_encodings(), - (Method::POST, "/validator/duties") => handler - .allow_body() - .in_blocking_task(validator::post_validator_duties) - .await? - .serde_encodings(), - (Method::POST, "/validator/subscribe") => handler - .allow_body() - .in_blocking_task(validator::post_validator_subscriptions) - .await? - .serde_encodings(), - (Method::GET, "/validator/duties/all") => handler - .in_blocking_task(validator::get_all_validator_duties) - .await? - .serde_encodings(), - (Method::GET, "/validator/duties/active") => handler - .in_blocking_task(validator::get_active_validator_duties) - .await? - .serde_encodings(), - (Method::GET, "/validator/block") => handler - .in_blocking_task(validator::get_new_beacon_block) - .await? - .serde_encodings(), - (Method::POST, "/validator/block") => handler - .allow_body() - .in_blocking_task(validator::publish_beacon_block) - .await? - .serde_encodings(), - (Method::GET, "/validator/attestation") => handler - .in_blocking_task(validator::get_new_attestation) - .await? - .serde_encodings(), - (Method::GET, "/validator/aggregate_attestation") => handler - .in_blocking_task(validator::get_aggregate_attestation) - .await? - .serde_encodings(), - (Method::POST, "/validator/attestations") => handler - .allow_body() - .in_blocking_task(validator::publish_attestations) - .await? - .serde_encodings(), - (Method::POST, "/validator/aggregate_and_proofs") => handler - .allow_body() - .in_blocking_task(validator::publish_aggregate_and_proofs) - .await? - .serde_encodings(), - (Method::GET, "/consensus/global_votes") => handler - .allow_body() - .in_blocking_task(consensus::get_vote_count) - .await? - .serde_encodings(), - (Method::POST, "/consensus/individual_votes") => handler - .allow_body() - .in_blocking_task(consensus::post_individual_votes) - .await? - .serde_encodings(), - (Method::GET, "/spec") => handler - // TODO: this clone is not ideal. - .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.spec.clone())) - .await? - .serde_encodings(), - (Method::GET, "/spec/slots_per_epoch") => handler - .static_value(T::EthSpec::slots_per_epoch()) - .await? - .serde_encodings(), - (Method::GET, "/spec/eth2_config") => handler - // TODO: this clone is not ideal. - .in_blocking_task(|_, ctx| Ok(ctx.eth2_config.as_ref().clone())) - .await? - .serde_encodings(), - (Method::GET, "/advanced/fork_choice") => handler - .in_blocking_task(|_, ctx| { - Ok(ctx - .beacon_chain - .fork_choice - .read() - .proto_array() - .core_proto_array() - .clone()) - }) - .await? - .serde_encodings(), - (Method::GET, "/advanced/operation_pool") => handler - .in_blocking_task(|_, ctx| { - Ok(PersistedOperationPool::from_operation_pool( - &ctx.beacon_chain.op_pool, - )) - }) - .await? - .serde_encodings(), - (Method::GET, "/metrics") => handler - .in_blocking_task(|_, ctx| metrics::get_prometheus(ctx)) - .await? - .text_encoding(), - (Method::GET, "/lighthouse/syncing") => handler - .in_blocking_task(|_, ctx| Ok(ctx.network_globals.sync_state())) - .await? - .serde_encodings(), - (Method::GET, "/lighthouse/peers") => handler - .in_blocking_task(|_, ctx| lighthouse::peers(ctx)) - .await? - .serde_encodings(), - (Method::GET, "/lighthouse/connected_peers") => handler - .in_blocking_task(|_, ctx| lighthouse::connected_peers(ctx)) - .await? - .serde_encodings(), - _ => Err(ApiError::NotFound( - "Request path and/or method not found.".to_owned(), - )), - } -} diff --git a/beacon_node/rest_api/src/url_query.rs b/beacon_node/rest_api/src/url_query.rs deleted file mode 100644 index fee0cf437e6..00000000000 --- a/beacon_node/rest_api/src/url_query.rs +++ /dev/null @@ -1,166 +0,0 @@ -use crate::helpers::{parse_committee_index, parse_epoch, parse_hex_ssz_bytes, parse_slot}; -use crate::ApiError; -use hyper::Request; -use types::{AttestationData, CommitteeIndex, Epoch, Signature, Slot}; - -/// Provides handy functions for parsing the query parameters of a URL. - -#[derive(Clone, Copy)] -pub struct UrlQuery<'a>(url::form_urlencoded::Parse<'a>); - -impl<'a> UrlQuery<'a> { - /// Instantiate from an existing `Request`. - /// - /// Returns `Err` if `req` does not contain any query parameters. - pub fn from_request<T>(req: &'a Request<T>) -> Result<Self, ApiError> { - let query_str = req.uri().query().unwrap_or_else(|| ""); - - Ok(UrlQuery(url::form_urlencoded::parse(query_str.as_bytes()))) - } - - /// Returns the first `(key, value)` pair found where the `key` is in `keys`. - /// - /// If no match is found, an `InvalidQueryParams` error is returned. - pub fn first_of(mut self, keys: &[&str]) -> Result<(String, String), ApiError> { - self.0 - .find(|(key, _value)| keys.contains(&&**key)) - .map(|(key, value)| (key.into_owned(), value.into_owned())) - .ok_or_else(|| { - ApiError::BadRequest(format!( - "URL query must be valid and contain at least one of the following keys: {:?}", - keys - )) - }) - } - - /// Returns the first `(key, value)` pair found where the `key` is in `keys`, if any. - /// - /// Returns `None` if no match is found. - pub fn first_of_opt(mut self, keys: &[&str]) -> Option<(String, String)> { - self.0 - .find(|(key, _value)| keys.contains(&&**key)) - .map(|(key, value)| (key.into_owned(), value.into_owned())) - } - - /// Returns the value for `key`, if and only if `key` is the only key present in the query - /// parameters. - pub fn only_one(self, key: &str) -> Result<String, ApiError> { - let queries: Vec<_> = self - .0 - .map(|(k, v)| (k.into_owned(), v.into_owned())) - .collect(); - - if queries.len() == 1 { - let (first_key, first_value) = &queries[0]; // Must have 0 index if len is 1. - if first_key == key { - Ok(first_value.to_string()) - } else { - Err(ApiError::BadRequest(format!( - "Only the {} query parameter is supported", - key - ))) - } - } else { - Err(ApiError::BadRequest(format!( - "Only one query parameter is allowed, {} supplied", - queries.len() - ))) - } - } - - /// Returns a vector of all values present where `key` is in `keys - /// - /// If no match is found, an `InvalidQueryParams` error is returned. - pub fn all_of(self, key: &str) -> Result<Vec<String>, ApiError> { - let queries: Vec<_> = self - .0 - .filter_map(|(k, v)| { - if k.eq(key) { - Some(v.into_owned()) - } else { - None - } - }) - .collect(); - Ok(queries) - } - - /// Returns the value of the first occurrence of the `epoch` key. - pub fn epoch(self) -> Result<Epoch, ApiError> { - self.first_of(&["epoch"]) - .and_then(|(_key, value)| parse_epoch(&value)) - } - - /// Returns the value of the first occurrence of the `slot` key. - pub fn slot(self) -> Result<Slot, ApiError> { - self.first_of(&["slot"]) - .and_then(|(_key, value)| parse_slot(&value)) - } - - /// Returns the value of the first occurrence of the `committee_index` key. - pub fn committee_index(self) -> Result<CommitteeIndex, ApiError> { - self.first_of(&["committee_index"]) - .and_then(|(_key, value)| parse_committee_index(&value)) - } - - /// Returns the value of the first occurrence of the `randao_reveal` key. - pub fn randao_reveal(self) -> Result<Signature, ApiError> { - self.first_of(&["randao_reveal"]) - .and_then(|(_key, value)| parse_hex_ssz_bytes(&value)) - } - - /// Returns the value of the first occurrence of the `attestation_data` key. - pub fn attestation_data(self) -> Result<AttestationData, ApiError> { - self.first_of(&["attestation_data"]) - .and_then(|(_key, value)| parse_hex_ssz_bytes(&value)) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn only_one() { - let get_result = |addr: &str, key: &str| -> Result<String, ApiError> { - UrlQuery(url::Url::parse(addr).unwrap().query_pairs()).only_one(key) - }; - - assert_eq!(get_result("http://cat.io/?a=42", "a"), Ok("42".to_string())); - assert!(get_result("http://cat.io/?a=42", "b").is_err()); - assert!(get_result("http://cat.io/?a=42&b=12", "a").is_err()); - assert!(get_result("http://cat.io/", "").is_err()); - } - - #[test] - fn first_of() { - let url = url::Url::parse("http://lighthouse.io/cats?a=42&b=12&c=100").unwrap(); - let get_query = || UrlQuery(url.query_pairs()); - - assert_eq!( - get_query().first_of(&["a"]), - Ok(("a".to_string(), "42".to_string())) - ); - assert_eq!( - get_query().first_of(&["a", "b", "c"]), - Ok(("a".to_string(), "42".to_string())) - ); - assert_eq!( - get_query().first_of(&["a", "a", "a"]), - Ok(("a".to_string(), "42".to_string())) - ); - assert_eq!( - get_query().first_of(&["a", "b", "c"]), - Ok(("a".to_string(), "42".to_string())) - ); - assert_eq!( - get_query().first_of(&["b", "c"]), - Ok(("b".to_string(), "12".to_string())) - ); - assert_eq!( - get_query().first_of(&["c"]), - Ok(("c".to_string(), "100".to_string())) - ); - assert!(get_query().first_of(&["nothing"]).is_err()); - } -} diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs deleted file mode 100644 index 49342ddaa30..00000000000 --- a/beacon_node/rest_api/src/validator.rs +++ /dev/null @@ -1,747 +0,0 @@ -use crate::helpers::{parse_hex_ssz_bytes, publish_beacon_block_to_network}; -use crate::{ApiError, Context, NetworkChannel, UrlQuery}; -use beacon_chain::{ - attestation_verification::Error as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, - BlockError, ForkChoiceError, StateSkipConfig, -}; -use bls::PublicKeyBytes; -use eth2_libp2p::PubsubMessage; -use hyper::Request; -use network::NetworkMessage; -use rest_types::{ValidatorDutiesRequest, ValidatorDutyBytes, ValidatorSubscription}; -use slog::{error, info, trace, warn, Logger}; -use std::sync::Arc; -use types::beacon_state::EthSpec; -use types::{ - Attestation, AttestationData, BeaconBlock, BeaconState, Epoch, RelativeEpoch, SelectionProof, - SignedAggregateAndProof, SignedBeaconBlock, SubnetId, -}; - -/// HTTP Handler to retrieve the duties for a set of validators during a particular epoch. This -/// method allows for collecting bulk sets of validator duties without risking exceeding the max -/// URL length with query pairs. -pub fn post_validator_duties<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<Vec<ValidatorDutyBytes>, ApiError> { - let body = req.into_body(); - - serde_json::from_slice::<ValidatorDutiesRequest>(&body) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorDutiesRequest: {:?}", - e - )) - }) - .and_then(|bulk_request| { - return_validator_duties( - &ctx.beacon_chain.clone(), - bulk_request.epoch, - bulk_request.pubkeys.into_iter().map(Into::into).collect(), - ) - }) -} - -/// HTTP Handler to retrieve subscriptions for a set of validators. This allows the node to -/// organise peer discovery and topic subscription for known validators. -pub fn post_validator_subscriptions<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<(), ApiError> { - let body = req.into_body(); - - serde_json::from_slice(&body) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorSubscriptions: {:?}", - e - )) - }) - .and_then(move |subscriptions: Vec<ValidatorSubscription>| { - ctx.network_chan - .send(NetworkMessage::Subscribe { subscriptions }) - .map_err(|e| { - ApiError::ServerError(format!( - "Unable to subscriptions to the network: {:?}", - e - )) - })?; - Ok(()) - }) -} - -/// HTTP Handler to retrieve all validator duties for the given epoch. -pub fn get_all_validator_duties<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<Vec<ValidatorDutyBytes>, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let epoch = query.epoch()?; - - let state = get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; - - let validator_pubkeys = state - .validators - .iter() - .map(|validator| validator.pubkey.clone()) - .collect(); - - return_validator_duties(&ctx.beacon_chain, epoch, validator_pubkeys) -} - -/// HTTP Handler to retrieve all active validator duties for the given epoch. -pub fn get_active_validator_duties<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<Vec<ValidatorDutyBytes>, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let epoch = query.epoch()?; - - let state = get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; - - let validator_pubkeys = state - .validators - .iter() - .filter(|validator| validator.is_active_at(state.current_epoch())) - .map(|validator| validator.pubkey.clone()) - .collect(); - - return_validator_duties(&ctx.beacon_chain, epoch, validator_pubkeys) -} - -/// Helper function to return the state that can be used to determine the duties for some `epoch`. -pub fn get_state_for_epoch<T: BeaconChainTypes>( - beacon_chain: &BeaconChain<T>, - epoch: Epoch, - config: StateSkipConfig, -) -> Result<BeaconState<T::EthSpec>, ApiError> { - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let head = beacon_chain.head()?; - let current_epoch = beacon_chain.epoch()?; - let head_epoch = head.beacon_state.current_epoch(); - - if head_epoch == current_epoch && RelativeEpoch::from_epoch(current_epoch, epoch).is_ok() { - Ok(head.beacon_state) - } else { - // If epoch is ahead of current epoch, then it should be a "next epoch" request for - // attestation duties. So, go to the start slot of the epoch prior to that, - // which should be just the next wall-clock epoch. - let slot = if epoch > current_epoch { - (epoch - 1).start_slot(slots_per_epoch) - } - // Otherwise, go to the start of the request epoch. - else { - epoch.start_slot(slots_per_epoch) - }; - - beacon_chain.state_at_slot(slot, config).map_err(|e| { - ApiError::ServerError(format!("Unable to load state for epoch {}: {:?}", epoch, e)) - }) - } -} - -/// Helper function to get the duties for some `validator_pubkeys` in some `epoch`. -fn return_validator_duties<T: BeaconChainTypes>( - beacon_chain: &BeaconChain<T>, - epoch: Epoch, - validator_pubkeys: Vec<PublicKeyBytes>, -) -> Result<Vec<ValidatorDutyBytes>, ApiError> { - let mut state = get_state_for_epoch(&beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; - - let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch) - .map_err(|_| ApiError::ServerError(String::from("Loaded state is in the wrong epoch")))?; - - state - .build_committee_cache(relative_epoch, &beacon_chain.spec) - .map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?; - - // Get a list of all validators for this epoch. - // - // Used for quickly determining the slot for a proposer. - let validator_proposers = if epoch == state.current_epoch() { - Some( - epoch - .slot_iter(T::EthSpec::slots_per_epoch()) - .map(|slot| { - state - .get_beacon_proposer_index(slot, &beacon_chain.spec) - .map(|i| (i, slot)) - .map_err(|e| { - ApiError::ServerError(format!( - "Unable to get proposer index for validator: {:?}", - e - )) - }) - }) - .collect::<Result<Vec<_>, _>>()?, - ) - } else { - None - }; - - validator_pubkeys - .into_iter() - .map(|validator_pubkey| { - // The `beacon_chain` can return a validator index that does not exist in all states. - // Therefore, we must check to ensure that the validator index is valid for our - // `state`. - let validator_index = beacon_chain - .validator_index(&validator_pubkey) - .map_err(|e| { - ApiError::ServerError(format!("Unable to get validator index: {:?}", e)) - })? - .filter(|i| *i < state.validators.len()); - - if let Some(validator_index) = validator_index { - let duties = state - .get_attestation_duties(validator_index, relative_epoch) - .map_err(|e| { - ApiError::ServerError(format!( - "Unable to obtain attestation duties: {:?}", - e - )) - })?; - - let committee_count_at_slot = duties - .map(|d| state.get_committee_count_at_slot(d.slot)) - .transpose() - .map_err(|e| { - ApiError::ServerError(format!( - "Unable to find committee count at slot: {:?}", - e - )) - })?; - - let aggregator_modulo = duties - .map(|duties| SelectionProof::modulo(duties.committee_len, &beacon_chain.spec)) - .transpose() - .map_err(|e| { - ApiError::ServerError(format!("Unable to find modulo: {:?}", e)) - })?; - - let block_proposal_slots = validator_proposers.as_ref().map(|proposers| { - proposers - .iter() - .filter(|(i, _slot)| validator_index == *i) - .map(|(_i, slot)| *slot) - .collect() - }); - - Ok(ValidatorDutyBytes { - validator_pubkey, - validator_index: Some(validator_index as u64), - attestation_slot: duties.map(|d| d.slot), - attestation_committee_index: duties.map(|d| d.index), - committee_count_at_slot, - attestation_committee_position: duties.map(|d| d.committee_position), - block_proposal_slots, - aggregator_modulo, - }) - } else { - Ok(ValidatorDutyBytes { - validator_pubkey, - validator_index: None, - attestation_slot: None, - attestation_committee_index: None, - attestation_committee_position: None, - block_proposal_slots: None, - committee_count_at_slot: None, - aggregator_modulo: None, - }) - } - }) - .collect::<Result<Vec<_>, ApiError>>() -} - -/// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator. -pub fn get_new_beacon_block<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<BeaconBlock<T::EthSpec>, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let slot = query.slot()?; - let randao_reveal = query.randao_reveal()?; - - let validator_graffiti = if let Some((_key, value)) = query.first_of_opt(&["graffiti"]) { - Some(parse_hex_ssz_bytes(&value)?) - } else { - None - }; - - let (new_block, _state) = ctx - .beacon_chain - .produce_block(randao_reveal, slot, validator_graffiti) - .map_err(|e| { - error!( - ctx.log, - "Error whilst producing block"; - "error" => format!("{:?}", e) - ); - - ApiError::ServerError(format!( - "Beacon node is not able to produce a block: {:?}", - e - )) - })?; - - Ok(new_block) -} - -/// HTTP Handler to publish a SignedBeaconBlock, which has been signed by a validator. -pub fn publish_beacon_block<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<(), ApiError> { - let body = req.into_body(); - - serde_json::from_slice(&body).map_err(|e| { - ApiError::BadRequest(format!("Unable to parse JSON into SignedBeaconBlock: {:?}", e)) - }) - .and_then(move |block: SignedBeaconBlock<T::EthSpec>| { - let slot = block.slot(); - match ctx.beacon_chain.process_block(block.clone()) { - Ok(block_root) => { - // Block was processed, publish via gossipsub - info!( - ctx.log, - "Block from local validator"; - "block_root" => format!("{}", block_root), - "block_slot" => slot, - ); - - publish_beacon_block_to_network::<T>(&ctx.network_chan, block)?; - - // Run the fork choice algorithm and enshrine a new canonical head, if - // found. - // - // The new head may or may not be the block we just received. - if let Err(e) = ctx.beacon_chain.fork_choice() { - error!( - ctx.log, - "Failed to find beacon chain head"; - "error" => format!("{:?}", e) - ); - } else { - // In the best case, validators should produce blocks that become the - // head. - // - // Potential reasons this may not be the case: - // - // - A quick re-org between block produce and publish. - // - Excessive time between block produce and publish. - // - A validator is using another beacon node to produce blocks and - // submitting them here. - if ctx.beacon_chain.head()?.beacon_block_root != block_root { - warn!( - ctx.log, - "Block from validator is not head"; - "desc" => "potential re-org", - ); - - } - } - - Ok(()) - } - Err(BlockError::BeaconChainError(e)) => { - error!( - ctx.log, - "Error whilst processing block"; - "error" => format!("{:?}", e) - ); - - Err(ApiError::ServerError(format!( - "Error while processing block: {:?}", - e - ))) - } - Err(other) => { - warn!( - ctx.log, - "Invalid block from local validator"; - "outcome" => format!("{:?}", other) - ); - - Err(ApiError::ProcessingError(format!( - "The SignedBeaconBlock could not be processed and has not been published: {:?}", - other - ))) - } - } - }) -} - -/// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. -pub fn get_new_attestation<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<Attestation<T::EthSpec>, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let slot = query.slot()?; - let index = query.committee_index()?; - - ctx.beacon_chain - .produce_unaggregated_attestation(slot, index) - .map_err(|e| ApiError::BadRequest(format!("Unable to produce attestation: {:?}", e))) -} - -/// HTTP Handler to retrieve the aggregate attestation for a slot -pub fn get_aggregate_attestation<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<Attestation<T::EthSpec>, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let attestation_data = query.attestation_data()?; - - match ctx - .beacon_chain - .get_aggregated_attestation(&attestation_data) - { - Ok(Some(attestation)) => Ok(attestation), - Ok(None) => Err(ApiError::NotFound(format!( - "No matching aggregate attestation for slot {:?} is known in slot {:?}", - attestation_data.slot, - ctx.beacon_chain.slot() - ))), - Err(e) => Err(ApiError::ServerError(format!( - "Unable to obtain attestation: {:?}", - e - ))), - } -} - -/// HTTP Handler to publish a list of Attestations, which have been signed by a number of validators. -pub fn publish_attestations<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<(), ApiError> { - let bytes = req.into_body(); - - serde_json::from_slice(&bytes) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to deserialize JSON into a list of attestations: {:?}", - e - )) - }) - // Process all of the aggregates _without_ exiting early if one fails. - .map( - move |attestations: Vec<(Attestation<T::EthSpec>, SubnetId)>| { - attestations - .into_iter() - .enumerate() - .map(|(i, (attestation, subnet_id))| { - process_unaggregated_attestation( - &ctx.beacon_chain, - ctx.network_chan.clone(), - attestation, - subnet_id, - i, - &ctx.log, - ) - }) - .collect::<Vec<Result<_, _>>>() - }, - ) - // Iterate through all the results and return on the first `Err`. - // - // Note: this will only provide info about the _first_ failure, not all failures. - .and_then(|processing_results| processing_results.into_iter().try_for_each(|result| result)) - .map(|_| ()) -} - -/// Processes an unaggregrated attestation that was included in a list of attestations with the -/// index `i`. -#[allow(clippy::redundant_clone)] // false positives in this function. -fn process_unaggregated_attestation<T: BeaconChainTypes>( - beacon_chain: &BeaconChain<T>, - network_chan: NetworkChannel<T::EthSpec>, - attestation: Attestation<T::EthSpec>, - subnet_id: SubnetId, - i: usize, - log: &Logger, -) -> Result<(), ApiError> { - let data = &attestation.data.clone(); - - // Verify that the attestation is valid to included on the gossip network. - let verified_attestation = beacon_chain - .verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id) - .map_err(|e| { - handle_attestation_error( - e, - &format!("unaggregated attestation {} failed gossip verification", i), - data, - log, - ) - })?; - - // Publish the attestation to the network - if let Err(e) = network_chan.send(NetworkMessage::Publish { - messages: vec![PubsubMessage::Attestation(Box::new(( - subnet_id, - attestation, - )))], - }) { - return Err(ApiError::ServerError(format!( - "Unable to send unaggregated attestation {} to network: {:?}", - i, e - ))); - } - - beacon_chain - .apply_attestation_to_fork_choice(&verified_attestation) - .map_err(|e| { - handle_fork_choice_error( - e, - &format!( - "unaggregated attestation {} was unable to be added to fork choice", - i - ), - data, - log, - ) - })?; - - beacon_chain - .add_to_naive_aggregation_pool(verified_attestation) - .map_err(|e| { - handle_attestation_error( - e, - &format!( - "unaggregated attestation {} was unable to be added to aggregation pool", - i - ), - data, - log, - ) - })?; - - Ok(()) -} - -/// HTTP Handler to publish an Attestation, which has been signed by a validator. -pub fn publish_aggregate_and_proofs<T: BeaconChainTypes>( - req: Request<Vec<u8>>, - ctx: Arc<Context<T>>, -) -> Result<(), ApiError> { - let body = req.into_body(); - - serde_json::from_slice(&body) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to deserialize JSON into a list of SignedAggregateAndProof: {:?}", - e - )) - }) - // Process all of the aggregates _without_ exiting early if one fails. - .map( - move |signed_aggregates: Vec<SignedAggregateAndProof<T::EthSpec>>| { - signed_aggregates - .into_iter() - .enumerate() - .map(|(i, signed_aggregate)| { - process_aggregated_attestation( - &ctx.beacon_chain, - ctx.network_chan.clone(), - signed_aggregate, - i, - &ctx.log, - ) - }) - .collect::<Vec<Result<_, _>>>() - }, - ) - // Iterate through all the results and return on the first `Err`. - // - // Note: this will only provide info about the _first_ failure, not all failures. - .and_then(|processing_results| processing_results.into_iter().try_for_each(|result| result)) -} - -/// Processes an aggregrated attestation that was included in a list of attestations with the index -/// `i`. -#[allow(clippy::redundant_clone)] // false positives in this function. -fn process_aggregated_attestation<T: BeaconChainTypes>( - beacon_chain: &BeaconChain<T>, - network_chan: NetworkChannel<T::EthSpec>, - signed_aggregate: SignedAggregateAndProof<T::EthSpec>, - i: usize, - log: &Logger, -) -> Result<(), ApiError> { - let data = &signed_aggregate.message.aggregate.data.clone(); - - // Verify that the attestation is valid to be included on the gossip network. - // - // Using this gossip check for local validators is not necessarily ideal, there will be some - // attestations that we reject that could possibly be included in a block (e.g., attestations - // that late by more than 1 epoch but less than 2). We can come pick this back up if we notice - // that it's materially affecting validator profits. Until then, I'm hesitant to introduce yet - // _another_ attestation verification path. - let verified_attestation = - match beacon_chain.verify_aggregated_attestation_for_gossip(signed_aggregate.clone()) { - Ok(verified_attestation) => verified_attestation, - Err(AttnError::AttestationAlreadyKnown(attestation_root)) => { - trace!( - log, - "Ignored known attn from local validator"; - "attn_root" => format!("{}", attestation_root) - ); - - // Exit early with success for a known attestation, there's no need to re-process - // an aggregate we already know. - return Ok(()); - } - /* - * It's worth noting that we don't check for `Error::AggregatorAlreadyKnown` since (at - * the time of writing) we check for `AttestationAlreadyKnown` first. - * - * Given this, it's impossible to hit `Error::AggregatorAlreadyKnown` without that - * aggregator having already produced a conflicting aggregation. This is not slashable - * but I think it's still the sort of condition we should error on, at least for now. - */ - Err(e) => { - return Err(handle_attestation_error( - e, - &format!("aggregated attestation {} failed gossip verification", i), - data, - log, - )) - } - }; - - // Publish the attestation to the network - if let Err(e) = network_chan.send(NetworkMessage::Publish { - messages: vec![PubsubMessage::AggregateAndProofAttestation(Box::new( - signed_aggregate, - ))], - }) { - return Err(ApiError::ServerError(format!( - "Unable to send aggregated attestation {} to network: {:?}", - i, e - ))); - } - - beacon_chain - .apply_attestation_to_fork_choice(&verified_attestation) - .map_err(|e| { - handle_fork_choice_error( - e, - &format!( - "aggregated attestation {} was unable to be added to fork choice", - i - ), - data, - log, - ) - })?; - - beacon_chain - .add_to_block_inclusion_pool(verified_attestation) - .map_err(|e| { - handle_attestation_error( - e, - &format!( - "aggregated attestation {} was unable to be added to op pool", - i - ), - data, - log, - ) - })?; - - Ok(()) -} - -/// Common handler for `AttnError` during attestation verification. -fn handle_attestation_error( - e: AttnError, - detail: &str, - data: &AttestationData, - log: &Logger, -) -> ApiError { - match e { - AttnError::BeaconChainError(e) => { - error!( - log, - "Internal error verifying local attestation"; - "detail" => detail, - "error" => format!("{:?}", e), - "target" => data.target.epoch, - "source" => data.source.epoch, - "index" => data.index, - "slot" => data.slot, - ); - - ApiError::ServerError(format!( - "Internal error verifying local attestation. Error: {:?}. Detail: {}", - e, detail - )) - } - e => { - error!( - log, - "Invalid local attestation"; - "detail" => detail, - "reason" => format!("{:?}", e), - "target" => data.target.epoch, - "source" => data.source.epoch, - "index" => data.index, - "slot" => data.slot, - ); - - ApiError::ProcessingError(format!( - "Invalid local attestation. Error: {:?} Detail: {}", - e, detail - )) - } - } -} - -/// Common handler for `ForkChoiceError` during attestation verification. -fn handle_fork_choice_error( - e: BeaconChainError, - detail: &str, - data: &AttestationData, - log: &Logger, -) -> ApiError { - match e { - BeaconChainError::ForkChoiceError(ForkChoiceError::InvalidAttestation(e)) => { - error!( - log, - "Local attestation invalid for fork choice"; - "detail" => detail, - "reason" => format!("{:?}", e), - "target" => data.target.epoch, - "source" => data.source.epoch, - "index" => data.index, - "slot" => data.slot, - ); - - ApiError::ProcessingError(format!( - "Invalid local attestation. Error: {:?} Detail: {}", - e, detail - )) - } - e => { - error!( - log, - "Internal error applying attn to fork choice"; - "detail" => detail, - "error" => format!("{:?}", e), - "target" => data.target.epoch, - "source" => data.source.epoch, - "index" => data.index, - "slot" => data.slot, - ); - - ApiError::ServerError(format!( - "Internal error verifying local attestation. Error: {:?}. Detail: {}", - e, detail - )) - } - } -} diff --git a/beacon_node/rest_api/tests/test.rs b/beacon_node/rest_api/tests/test.rs deleted file mode 100644 index 160ee667ccf..00000000000 --- a/beacon_node/rest_api/tests/test.rs +++ /dev/null @@ -1,1345 +0,0 @@ -#![cfg(test)] - -#[macro_use] -extern crate assert_matches; - -use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig}; -use node_test_rig::{ - environment::{Environment, EnvironmentBuilder}, - testing_client_config, ClientConfig, ClientGenesis, LocalBeaconNode, -}; -use remote_beacon_node::{ - Committee, HeadBeaconBlock, PersistedOperationPool, PublishStatus, ValidatorResponse, -}; -use rest_types::ValidatorDutyBytes; -use std::convert::TryInto; -use std::sync::Arc; -use std::time::{SystemTime, UNIX_EPOCH}; -use types::{ - test_utils::{ - build_double_vote_attester_slashing, build_proposer_slashing, - generate_deterministic_keypair, AttesterSlashingTestTask, ProposerSlashingTestTask, - }, - BeaconBlock, BeaconState, ChainSpec, Domain, Epoch, EthSpec, MinimalEthSpec, PublicKey, - RelativeEpoch, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedRoot, Slot, - SubnetId, Validator, -}; - -type E = MinimalEthSpec; - -fn build_env() -> Environment<E> { - EnvironmentBuilder::minimal() - .null_logger() - .expect("should build env logger") - .single_thread_tokio_runtime() - .expect("should start tokio runtime") - .build() - .expect("environment should build") -} - -fn build_node<E: EthSpec>(env: &mut Environment<E>, config: ClientConfig) -> LocalBeaconNode<E> { - let context = env.core_context(); - env.runtime() - .block_on(LocalBeaconNode::production(context, config)) - .expect("should block until node created") -} - -/// Returns the randao reveal for the given slot (assuming the given `beacon_chain` uses -/// deterministic keypairs). -fn get_randao_reveal<T: BeaconChainTypes>( - beacon_chain: Arc<BeaconChain<T>>, - slot: Slot, - spec: &ChainSpec, -) -> Signature { - let head = beacon_chain.head().expect("should get head"); - let fork = head.beacon_state.fork; - let genesis_validators_root = head.beacon_state.genesis_validators_root; - let proposer_index = beacon_chain - .block_proposer(slot) - .expect("should get proposer index"); - let keypair = generate_deterministic_keypair(proposer_index); - let epoch = slot.epoch(E::slots_per_epoch()); - let domain = spec.get_domain(epoch, Domain::Randao, &fork, genesis_validators_root); - let message = epoch.signing_root(domain); - keypair.sk.sign(message) -} - -/// Signs the given block (assuming the given `beacon_chain` uses deterministic keypairs). -fn sign_block<T: BeaconChainTypes>( - beacon_chain: Arc<BeaconChain<T>>, - block: BeaconBlock<T::EthSpec>, - spec: &ChainSpec, -) -> SignedBeaconBlock<T::EthSpec> { - let head = beacon_chain.head().expect("should get head"); - let fork = head.beacon_state.fork; - let genesis_validators_root = head.beacon_state.genesis_validators_root; - let proposer_index = beacon_chain - .block_proposer(block.slot) - .expect("should get proposer index"); - let keypair = generate_deterministic_keypair(proposer_index); - block.sign(&keypair.sk, &fork, genesis_validators_root, spec) -} - -#[test] -fn validator_produce_attestation() { - let mut env = build_env(); - - let spec = &E::default_spec(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - let genesis_validators_root = beacon_chain.genesis_validators_root; - let state = beacon_chain.head().expect("should get head").beacon_state; - - // Find a validator that has duties in the current slot of the chain. - let mut validator_index = 0; - let duties = loop { - let duties = state - .get_attestation_duties(validator_index, RelativeEpoch::Current) - .expect("should have attestation duties cache") - .expect("should have attestation duties"); - - if duties.slot == node.client.beacon_chain().unwrap().slot().unwrap() { - break duties; - } else { - validator_index += 1 - } - }; - - let mut attestation = env - .runtime() - .block_on( - remote_node - .http - .validator() - .produce_attestation(duties.slot, duties.index), - ) - .expect("should fetch attestation from http api"); - - assert_eq!( - attestation.data.index, duties.index, - "should have same index" - ); - assert_eq!(attestation.data.slot, duties.slot, "should have same slot"); - assert_eq!( - attestation.aggregation_bits.num_set_bits(), - 0, - "should have empty aggregation bits" - ); - - let keypair = generate_deterministic_keypair(validator_index); - - // Fetch the duties again, but via HTTP for authenticity. - let duties = env - .runtime() - .block_on(remote_node.http.validator().get_duties( - attestation.data.slot.epoch(E::slots_per_epoch()), - &[keypair.pk.clone()], - )) - .expect("should fetch duties from http api"); - let duties = &duties[0]; - let committee_count = duties - .committee_count_at_slot - .expect("should have committee count"); - let subnet_id = SubnetId::compute_subnet::<E>( - attestation.data.slot, - attestation.data.index, - committee_count, - spec, - ) - .unwrap(); - // Try publishing the attestation without a signature or a committee bit set, ensure it is - // raises an error. - let publish_status = env - .runtime() - .block_on( - remote_node - .http - .validator() - .publish_attestations(vec![(attestation.clone(), subnet_id)]), - ) - .expect("should publish unsigned attestation"); - assert!( - !publish_status.is_valid(), - "the unsigned published attestation should be invalid" - ); - - // Set the aggregation bit. - attestation - .aggregation_bits - .set( - duties - .attestation_committee_position - .expect("should have committee position"), - true, - ) - .expect("should set attestation bit"); - - // Try publishing with an aggreagation bit set, but an invalid signature. - let publish_status = env - .runtime() - .block_on( - remote_node - .http - .validator() - .publish_attestations(vec![(attestation.clone(), subnet_id)]), - ) - .expect("should publish attestation with invalid signature"); - assert!( - !publish_status.is_valid(), - "the unsigned published attestation should not be valid" - ); - - // Un-set the aggregation bit, so signing doesn't error. - attestation - .aggregation_bits - .set( - duties - .attestation_committee_position - .expect("should have committee position"), - false, - ) - .expect("should un-set attestation bit"); - - attestation - .sign( - &keypair.sk, - duties - .attestation_committee_position - .expect("should have committee position"), - &state.fork, - state.genesis_validators_root, - spec, - ) - .expect("should sign attestation"); - - // Try publishing the valid attestation. - let publish_status = env - .runtime() - .block_on( - remote_node - .http - .validator() - .publish_attestations(vec![(attestation.clone(), subnet_id)]), - ) - .expect("should publish attestation"); - assert!( - publish_status.is_valid(), - "the signed published attestation should be valid" - ); - - // Try obtaining an aggregated attestation with a matching attestation data to the previous - // one. - let aggregated_attestation = env - .runtime() - .block_on( - remote_node - .http - .validator() - .produce_aggregate_attestation(&attestation.data), - ) - .expect("should fetch aggregated attestation from http api"); - - let signed_aggregate_and_proof = SignedAggregateAndProof::from_aggregate( - validator_index as u64, - aggregated_attestation, - None, - &keypair.sk, - &state.fork, - genesis_validators_root, - spec, - ); - - // Publish the signed aggregate. - let publish_status = env - .runtime() - .block_on( - remote_node - .http - .validator() - .publish_aggregate_and_proof(vec![signed_aggregate_and_proof]), - ) - .expect("should publish aggregate and proof"); - assert!( - publish_status.is_valid(), - "the signed aggregate and proof should be valid" - ); -} - -#[test] -fn validator_duties() { - let mut env = build_env(); - - let spec = &E::default_spec(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - - let mut epoch = Epoch::new(0); - - let validators = beacon_chain - .head() - .expect("should get head") - .beacon_state - .validators - .iter() - .map(|v| (&v.pubkey).try_into().expect("pubkey should be valid")) - .collect::<Vec<_>>(); - - let duties = env - .runtime() - .block_on(remote_node.http.validator().get_duties(epoch, &validators)) - .expect("should fetch duties from http api"); - - // 1. Check at the current epoch. - check_duties( - duties, - epoch, - validators.clone(), - beacon_chain.clone(), - spec, - ); - - epoch += 4; - let duties = env - .runtime() - .block_on(remote_node.http.validator().get_duties(epoch, &validators)) - .expect("should fetch duties from http api"); - - // 2. Check with a long skip forward. - check_duties(duties, epoch, validators, beacon_chain, spec); - - // TODO: test an epoch in the past. Blocked because the `LocalBeaconNode` cannot produce a - // chain, yet. -} - -fn check_duties<T: BeaconChainTypes>( - duties: Vec<ValidatorDutyBytes>, - epoch: Epoch, - validators: Vec<PublicKey>, - beacon_chain: Arc<BeaconChain<T>>, - spec: &ChainSpec, -) { - assert_eq!( - validators.len(), - duties.len(), - "there should be a duty for each validator" - ); - - // Are the duties from the current epoch of the beacon chain, and thus are proposer indices - // known? - let proposers_known = epoch == beacon_chain.epoch().unwrap(); - - let mut state = beacon_chain - .state_at_slot( - epoch.start_slot(T::EthSpec::slots_per_epoch()), - StateSkipConfig::WithStateRoots, - ) - .expect("should get state at slot"); - - state.build_all_caches(spec).expect("should build caches"); - - validators - .iter() - .zip(duties.iter()) - .for_each(|(validator, duty)| { - assert_eq!( - *validator, - (&duty.validator_pubkey) - .try_into() - .expect("should be valid pubkey"), - "pubkey should match" - ); - - let validator_index = state - .get_validator_index(&validator.clone().into()) - .expect("should have pubkey cache") - .expect("pubkey should exist"); - - let attestation_duty = state - .get_attestation_duties(validator_index, RelativeEpoch::Current) - .expect("should have attestation duties cache") - .expect("should have attestation duties"); - - assert_eq!( - Some(attestation_duty.slot), - duty.attestation_slot, - "attestation slot should match" - ); - - assert_eq!( - Some(attestation_duty.index), - duty.attestation_committee_index, - "attestation index should match" - ); - - if proposers_known { - let block_proposal_slots = duty.block_proposal_slots.as_ref().unwrap(); - - if !block_proposal_slots.is_empty() { - for slot in block_proposal_slots { - let expected_proposer = state - .get_beacon_proposer_index(*slot, spec) - .expect("should know proposer"); - assert_eq!( - expected_proposer, validator_index, - "should get correct proposal slot" - ); - } - } else { - epoch.slot_iter(E::slots_per_epoch()).for_each(|slot| { - let slot_proposer = state - .get_beacon_proposer_index(slot, spec) - .expect("should know proposer"); - assert_ne!( - slot_proposer, validator_index, - "validator should not have proposal slot in this epoch" - ) - }) - } - } else { - assert_eq!(duty.block_proposal_slots, None); - } - }); - - if proposers_known { - // Validator duties should include a proposer for every slot of the epoch. - let mut all_proposer_slots: Vec<Slot> = duties - .iter() - .flat_map(|duty| duty.block_proposal_slots.clone().unwrap()) - .collect(); - all_proposer_slots.sort(); - - let all_slots: Vec<Slot> = epoch.slot_iter(E::slots_per_epoch()).collect(); - assert_eq!(all_proposer_slots, all_slots); - } -} - -#[test] -fn validator_block_post() { - let mut env = build_env(); - - let spec = &E::default_spec(); - - let two_slots_secs = (spec.milliseconds_per_slot / 1_000) * 2; - - let mut config = testing_client_config(); - config.genesis = ClientGenesis::Interop { - validator_count: 8, - genesis_time: SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs() - - two_slots_secs, - }; - - let node = build_node(&mut env, config); - let remote_node = node.remote_node().expect("should produce remote node"); - - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - - let slot = Slot::new(1); - let randao_reveal = get_randao_reveal(beacon_chain.clone(), slot, spec); - - let block = env - .runtime() - .block_on( - remote_node - .http - .validator() - .produce_block(slot, randao_reveal, None), - ) - .expect("should fetch block from http api"); - - // Try publishing the block without a signature, ensure it is flagged as invalid. - let empty_sig_block = SignedBeaconBlock { - message: block.clone(), - signature: Signature::empty(), - }; - let publish_status = env - .runtime() - .block_on(remote_node.http.validator().publish_block(empty_sig_block)) - .expect("should publish block"); - if cfg!(not(feature = "fake_crypto")) { - assert!( - !publish_status.is_valid(), - "the unsigned published block should not be valid" - ); - } - - let signed_block = sign_block(beacon_chain.clone(), block, spec); - let block_root = signed_block.canonical_root(); - - let publish_status = env - .runtime() - .block_on(remote_node.http.validator().publish_block(signed_block)) - .expect("should publish block"); - - if cfg!(not(feature = "fake_crypto")) { - assert_eq!( - publish_status, - PublishStatus::Valid, - "the signed published block should be valid" - ); - } - - let head = env - .runtime() - .block_on(remote_node.http.beacon().get_head()) - .expect("should get head"); - - assert_eq!( - head.block_root, block_root, - "the published block should become the head block" - ); - - // Note: this heads check is not super useful for this test, however it is include so it get - // _some_ testing. If you remove this call, make sure it's tested somewhere else. - let heads = env - .runtime() - .block_on(remote_node.http.beacon().get_heads()) - .expect("should get heads"); - - assert_eq!(heads.len(), 1, "there should be only one head"); - assert_eq!( - heads, - vec![HeadBeaconBlock { - beacon_block_root: head.block_root, - beacon_block_slot: head.slot, - }], - "there should be only one head" - ); -} - -#[test] -fn validator_block_get() { - let mut env = build_env(); - - let spec = &E::default_spec(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - - let slot = Slot::new(1); - let randao_reveal = get_randao_reveal(beacon_chain, slot, spec); - - let block = env - .runtime() - .block_on( - remote_node - .http - .validator() - .produce_block(slot, randao_reveal.clone(), None), - ) - .expect("should fetch block from http api"); - - let (expected_block, _state) = node - .client - .beacon_chain() - .expect("client should have beacon chain") - .produce_block(randao_reveal, slot, None) - .expect("should produce block"); - - assert_eq!( - block, expected_block, - "the block returned from the API should be as expected" - ); -} - -#[test] -fn validator_block_get_with_graffiti() { - let mut env = build_env(); - - let spec = &E::default_spec(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - - let slot = Slot::new(1); - let randao_reveal = get_randao_reveal(beacon_chain, slot, spec); - - let block = env - .runtime() - .block_on(remote_node.http.validator().produce_block( - slot, - randao_reveal.clone(), - Some(*b"test-graffiti-test-graffiti-test"), - )) - .expect("should fetch block from http api"); - - let (expected_block, _state) = node - .client - .beacon_chain() - .expect("client should have beacon chain") - .produce_block( - randao_reveal, - slot, - Some(*b"test-graffiti-test-graffiti-test"), - ) - .expect("should produce block"); - - assert_eq!( - block, expected_block, - "the block returned from the API should be as expected" - ); -} - -#[test] -fn beacon_state() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let (state_by_slot, root) = env - .runtime() - .block_on(remote_node.http.beacon().get_state_by_slot(Slot::new(0))) - .expect("should fetch state from http api"); - - let (state_by_root, root_2) = env - .runtime() - .block_on(remote_node.http.beacon().get_state_by_root(root)) - .expect("should fetch state from http api"); - - let mut db_state = node - .client - .beacon_chain() - .expect("client should have beacon chain") - .state_at_slot(Slot::new(0), StateSkipConfig::WithStateRoots) - .expect("should find state"); - db_state.drop_all_caches(); - - assert_eq!( - root, root_2, - "the two roots returned from the api should be identical" - ); - assert_eq!( - root, - db_state.canonical_root(), - "root from database should match that from the API" - ); - assert_eq!( - state_by_slot, db_state, - "genesis state by slot from api should match that from the DB" - ); - assert_eq!( - state_by_root, db_state, - "genesis state by root from api should match that from the DB" - ); -} - -#[test] -fn beacon_block() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let (block_by_slot, root) = env - .runtime() - .block_on(remote_node.http.beacon().get_block_by_slot(Slot::new(0))) - .expect("should fetch block from http api"); - - let (block_by_root, root_2) = env - .runtime() - .block_on(remote_node.http.beacon().get_block_by_root(root)) - .expect("should fetch block from http api"); - - let db_block = node - .client - .beacon_chain() - .expect("client should have beacon chain") - .block_at_slot(Slot::new(0)) - .expect("should find block") - .expect("block should not be none"); - - assert_eq!( - root, root_2, - "the two roots returned from the api should be identical" - ); - assert_eq!( - root, - db_block.canonical_root(), - "root from database should match that from the API" - ); - assert_eq!( - block_by_slot, db_block, - "genesis block by slot from api should match that from the DB" - ); - assert_eq!( - block_by_root, db_block, - "genesis block by root from api should match that from the DB" - ); -} - -#[test] -fn genesis_time() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let genesis_time = env - .runtime() - .block_on(remote_node.http.beacon().get_genesis_time()) - .expect("should fetch genesis time from http api"); - - assert_eq!( - node.client - .beacon_chain() - .expect("should have beacon chain") - .head() - .expect("should get head") - .beacon_state - .genesis_time, - genesis_time, - "should match genesis time from head state" - ); -} - -#[test] -fn genesis_validators_root() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let genesis_validators_root = env - .runtime() - .block_on(remote_node.http.beacon().get_genesis_validators_root()) - .expect("should fetch genesis time from http api"); - - assert_eq!( - node.client - .beacon_chain() - .expect("should have beacon chain") - .head() - .expect("should get head") - .beacon_state - .genesis_validators_root, - genesis_validators_root, - "should match genesis time from head state" - ); -} - -#[test] -fn fork() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let fork = env - .runtime() - .block_on(remote_node.http.beacon().get_fork()) - .expect("should fetch from http api"); - - assert_eq!( - node.client - .beacon_chain() - .expect("should have beacon chain") - .head() - .expect("should get head") - .beacon_state - .fork, - fork, - "should match head state" - ); -} - -#[test] -fn eth2_config() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let eth2_config = env - .runtime() - .block_on(remote_node.http.spec().get_eth2_config()) - .expect("should fetch eth2 config from http api"); - - // TODO: check the entire eth2_config, not just the spec. - - assert_eq!( - node.client - .beacon_chain() - .expect("should have beacon chain") - .spec, - eth2_config.spec, - "should match genesis time from head state" - ); -} - -#[test] -fn get_version() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let version = env - .runtime() - .block_on(remote_node.http.node().get_version()) - .expect("should fetch version from http api"); - - assert_eq!( - lighthouse_version::version_with_platform(), - version, - "result should be as expected" - ); -} - -#[test] -fn get_genesis_state_root() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let slot = Slot::new(0); - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_state_root(slot)) - .expect("should fetch from http api"); - - let expected = node - .client - .beacon_chain() - .expect("should have beacon chain") - .rev_iter_state_roots() - .expect("should get iter") - .map(Result::unwrap) - .find(|(_cur_root, cur_slot)| slot == *cur_slot) - .map(|(cur_root, _)| cur_root) - .expect("chain should have state root at slot"); - - assert_eq!(result, expected, "result should be as expected"); -} - -#[test] -fn get_genesis_block_root() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let slot = Slot::new(0); - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_block_root(slot)) - .expect("should fetch from http api"); - - let expected = node - .client - .beacon_chain() - .expect("should have beacon chain") - .rev_iter_block_roots() - .expect("should get iter") - .map(Result::unwrap) - .find(|(_cur_root, cur_slot)| slot == *cur_slot) - .map(|(cur_root, _)| cur_root) - .expect("chain should have state root at slot"); - - assert_eq!(result, expected, "result should be as expected"); -} - -#[test] -fn get_validators() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - let state = &chain.head().expect("should get head").beacon_state; - - let validators = state.validators.iter().take(2).collect::<Vec<_>>(); - let pubkeys = validators - .iter() - .map(|v| (&v.pubkey).try_into().expect("should decode pubkey bytes")) - .collect(); - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_validators(pubkeys, None)) - .expect("should fetch from http api"); - - result - .iter() - .zip(validators.iter()) - .for_each(|(response, validator)| compare_validator_response(state, response, validator)); -} - -#[test] -fn get_all_validators() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - let state = &chain.head().expect("should get head").beacon_state; - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_all_validators(None)) - .expect("should fetch from http api"); - - result - .iter() - .zip(state.validators.iter()) - .for_each(|(response, validator)| compare_validator_response(state, response, validator)); -} - -#[test] -fn get_active_validators() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - let state = &chain.head().expect("should get head").beacon_state; - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_active_validators(None)) - .expect("should fetch from http api"); - - /* - * This test isn't comprehensive because all of the validators in the state are active (i.e., - * there is no one to exclude. - * - * This should be fixed once we can generate more interesting scenarios with the - * `NodeTestRig`. - */ - - let validators = state - .validators - .iter() - .filter(|validator| validator.is_active_at(state.current_epoch())); - - result - .iter() - .zip(validators) - .for_each(|(response, validator)| compare_validator_response(state, response, validator)); -} - -#[test] -fn get_committees() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - - let epoch = Epoch::new(0); - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_committees(epoch)) - .expect("should fetch from http api"); - - let expected = chain - .head() - .expect("should get head") - .beacon_state - .get_beacon_committees_at_epoch(RelativeEpoch::Current) - .expect("should get committees") - .iter() - .map(|c| Committee { - slot: c.slot, - index: c.index, - committee: c.committee.to_vec(), - }) - .collect::<Vec<_>>(); - - assert_eq!(result, expected, "result should be as expected"); -} - -#[test] -fn get_fork_choice() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let fork_choice = env - .runtime() - .block_on(remote_node.http.advanced().get_fork_choice()) - .expect("should not error when getting fork choice"); - - assert_eq!( - fork_choice, - *node - .client - .beacon_chain() - .expect("node should have beacon chain") - .fork_choice - .read() - .proto_array() - .core_proto_array(), - "result should be as expected" - ); -} - -#[test] -fn get_operation_pool() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let result = env - .runtime() - .block_on(remote_node.http.advanced().get_operation_pool()) - .expect("should not error when getting fork choice"); - - let expected = PersistedOperationPool::from_operation_pool( - &node - .client - .beacon_chain() - .expect("node should have chain") - .op_pool, - ); - - assert_eq!(result, expected, "result should be as expected"); -} - -fn compare_validator_response<T: EthSpec>( - state: &BeaconState<T>, - response: &ValidatorResponse, - validator: &Validator, -) { - let response_validator = response.validator.clone().expect("should have validator"); - let i = response - .validator_index - .expect("should have validator index"); - let balance = response.balance.expect("should have balance"); - - assert_eq!(response.pubkey, validator.pubkey, "pubkey"); - assert_eq!(response_validator, *validator, "validator"); - assert_eq!(state.balances[i], balance, "balances"); - assert_eq!(state.validators[i], *validator, "validator index"); -} - -#[test] -fn proposer_slashing() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - - let state = chain - .head() - .expect("should have retrieved state") - .beacon_state; - - let spec = &chain.spec; - - // Check that there are no proposer slashings before insertion - let (proposer_slashings, _attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(proposer_slashings.len(), 0); - - let slot = state.slot; - let proposer_index = chain - .block_proposer(slot) - .expect("should get proposer index"); - let keypair = generate_deterministic_keypair(proposer_index); - let key = &keypair.sk; - let fork = &state.fork; - let proposer_slashing = build_proposer_slashing::<E>( - ProposerSlashingTestTask::Valid, - proposer_index as u64, - &key, - fork, - state.genesis_validators_root, - spec, - ); - - let result = env - .runtime() - .block_on( - remote_node - .http - .beacon() - .proposer_slashing(proposer_slashing.clone()), - ) - .expect("should fetch from http api"); - assert!(result, true); - - // Length should be just one as we've inserted only one proposer slashing - let (proposer_slashings, _attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(proposer_slashings.len(), 1); - assert_eq!(proposer_slashing.clone(), proposer_slashings[0]); - - let mut invalid_proposer_slashing = build_proposer_slashing::<E>( - ProposerSlashingTestTask::Valid, - proposer_index as u64, - &key, - fork, - state.genesis_validators_root, - spec, - ); - invalid_proposer_slashing.signed_header_2 = invalid_proposer_slashing.signed_header_1.clone(); - - let result = env.runtime().block_on( - remote_node - .http - .beacon() - .proposer_slashing(invalid_proposer_slashing), - ); - assert!(result.is_err()); - - // Length should still be one as we've inserted nothing since last time. - let (proposer_slashings, _attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(proposer_slashings.len(), 1); - assert_eq!(proposer_slashing, proposer_slashings[0]); -} - -#[test] -fn attester_slashing() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - - let state = chain - .head() - .expect("should have retrieved state") - .beacon_state; - let slot = state.slot; - let spec = &chain.spec; - - let proposer_index = chain - .block_proposer(slot) - .expect("should get proposer index"); - let keypair = generate_deterministic_keypair(proposer_index); - - let secret_keys = vec![&keypair.sk]; - let validator_indices = vec![proposer_index as u64]; - let fork = &state.fork; - - // Checking there are no attester slashings before insertion - let (_proposer_slashings, attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(attester_slashings.len(), 0); - - let attester_slashing = build_double_vote_attester_slashing( - AttesterSlashingTestTask::Valid, - &validator_indices[..], - &secret_keys[..], - fork, - state.genesis_validators_root, - spec, - ); - - let result = env - .runtime() - .block_on( - remote_node - .http - .beacon() - .attester_slashing(attester_slashing.clone()), - ) - .expect("should fetch from http api"); - assert!(result, true); - - // Length should be just one as we've inserted only one attester slashing - let (_proposer_slashings, attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(attester_slashings.len(), 1); - assert_eq!(attester_slashing, attester_slashings[0]); - - // Building an invalid attester slashing - let mut invalid_attester_slashing = build_double_vote_attester_slashing( - AttesterSlashingTestTask::Valid, - &validator_indices[..], - &secret_keys[..], - fork, - state.genesis_validators_root, - spec, - ); - invalid_attester_slashing.attestation_2 = invalid_attester_slashing.attestation_1.clone(); - - let result = env.runtime().block_on( - remote_node - .http - .beacon() - .attester_slashing(invalid_attester_slashing), - ); - result.unwrap_err(); - - // Length should still be one as we've failed to insert the attester slashing. - let (_proposer_slashings, attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(attester_slashings.len(), 1); - assert_eq!(attester_slashing, attester_slashings[0]); -} - -mod validator_attestation { - use super::*; - use http::StatusCode; - use node_test_rig::environment::Environment; - use remote_beacon_node::{Error::DidNotSucceed, HttpClient}; - use types::{Attestation, AttestationDuty, MinimalEthSpec}; - use url::Url; - - fn setup() -> ( - Environment<MinimalEthSpec>, - LocalBeaconNode<MinimalEthSpec>, - HttpClient<MinimalEthSpec>, - Url, - AttestationDuty, - ) { - let mut env = build_env(); - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let client = remote_node.http.clone(); - let socket_addr = node - .client - .http_listen_addr() - .expect("A remote beacon node must have a http server"); - let url = Url::parse(&format!( - "http://{}:{}/validator/attestation", - socket_addr.ip(), - socket_addr.port() - )) - .expect("should be valid endpoint"); - - // Find a validator that has duties in the current slot of the chain. - let mut validator_index = 0; - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - let state = beacon_chain.head().expect("should get head").beacon_state; - let duties = loop { - let duties = state - .get_attestation_duties(validator_index, RelativeEpoch::Current) - .expect("should have attestation duties cache") - .expect("should have attestation duties"); - - if duties.slot == node.client.beacon_chain().unwrap().slot().unwrap() { - break duties; - } else { - validator_index += 1 - } - }; - - (env, node, client, url, duties) - } - - #[test] - fn requires_query_parameters() { - let (mut env, _node, client, url, _duties) = setup(); - - let attestation = env.runtime().block_on( - // query parameters are missing - client.json_get::<Attestation<MinimalEthSpec>>(url.clone(), vec![]), - ); - - assert_matches!( - attestation.expect_err("should not succeed"), - DidNotSucceed { status, body } => { - assert_eq!(status, StatusCode::BAD_REQUEST); - assert_eq!(body, "URL query must be valid and contain at least one of the following keys: [\"slot\"]".to_owned()); - } - ); - } - - #[test] - fn requires_slot() { - let (mut env, _node, client, url, duties) = setup(); - - let attestation = env.runtime().block_on( - // `slot` is missing - client.json_get::<Attestation<MinimalEthSpec>>( - url.clone(), - vec![("committee_index".into(), format!("{}", duties.index))], - ), - ); - - assert_matches!( - attestation.expect_err("should not succeed"), - DidNotSucceed { status, body } => { - assert_eq!(status, StatusCode::BAD_REQUEST); - assert_eq!(body, "URL query must be valid and contain at least one of the following keys: [\"slot\"]".to_owned()); - } - ); - } - - #[test] - fn requires_committee_index() { - let (mut env, _node, client, url, duties) = setup(); - - let attestation = env.runtime().block_on( - // `committee_index` is missing. - client.json_get::<Attestation<MinimalEthSpec>>( - url.clone(), - vec![("slot".into(), format!("{}", duties.slot))], - ), - ); - - assert_matches!( - attestation.expect_err("should not succeed"), - DidNotSucceed { status, body } => { - assert_eq!(status, StatusCode::BAD_REQUEST); - assert_eq!(body, "URL query must be valid and contain at least one of the following keys: [\"committee_index\"]".to_owned()); - } - ); - } -} - -#[cfg(target_os = "linux")] -#[test] -fn get_health() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - env.runtime() - .block_on(remote_node.http.node().get_health()) - .unwrap(); -} diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 9f6ee79b15a..fd838e03384 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -142,7 +142,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("http") .long("http") - .help("Enable RESTful HTTP API server. Disabled by default.") + .help("Enable the RESTful HTTP API server. Disabled by default.") .takes_value(false), ) .arg( @@ -169,6 +169,38 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("") .takes_value(true), ) + /* Prometheus metrics HTTP server related arguments */ + .arg( + Arg::with_name("metrics") + .long("metrics") + .help("Enable the Prometheus metrics HTTP server. Disabled by default.") + .takes_value(false), + ) + .arg( + Arg::with_name("metrics-address") + .long("metrics-address") + .value_name("ADDRESS") + .help("Set the listen address for the Prometheus metrics HTTP server.") + .default_value("127.0.0.1") + .takes_value(true), + ) + .arg( + Arg::with_name("metrics-port") + .long("metrics-port") + .value_name("PORT") + .help("Set the listen TCP port for the Prometheus metrics HTTP server.") + .default_value("5054") + .takes_value(true), + ) + .arg( + Arg::with_name("metrics-allow-origin") + .long("metrics-allow-origin") + .value_name("ORIGIN") + .help("Set the value of the Access-Control-Allow-Origin response HTTP header for the Prometheus metrics HTTP server. \ + Use * to allow any origin (not recommended in production)") + .default_value("") + .takes_value(true), + ) /* Websocket related arguments */ .arg( Arg::with_name("ws") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index aabdbb35ca4..ba2dbe21a59 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -87,26 +87,26 @@ pub fn get_config<E: EthSpec>( */ if cli_args.is_present("staking") { - client_config.rest_api.enabled = true; + client_config.http_api.enabled = true; client_config.sync_eth1_chain = true; } /* - * Http server + * Http API server */ if cli_args.is_present("http") { - client_config.rest_api.enabled = true; + client_config.http_api.enabled = true; } if let Some(address) = cli_args.value_of("http-address") { - client_config.rest_api.listen_address = address + client_config.http_api.listen_addr = address .parse::<Ipv4Addr>() .map_err(|_| "http-address is not a valid IPv4 address.")?; } if let Some(port) = cli_args.value_of("http-port") { - client_config.rest_api.port = port + client_config.http_api.listen_port = port .parse::<u16>() .map_err(|_| "http-port is not a valid u16.")?; } @@ -117,7 +117,36 @@ pub fn get_config<E: EthSpec>( hyper::header::HeaderValue::from_str(allow_origin) .map_err(|_| "Invalid allow-origin value")?; - client_config.rest_api.allow_origin = allow_origin.to_string(); + client_config.http_api.allow_origin = Some(allow_origin.to_string()); + } + + /* + * Prometheus metrics HTTP server + */ + + if cli_args.is_present("metrics") { + client_config.http_metrics.enabled = true; + } + + if let Some(address) = cli_args.value_of("metrics-address") { + client_config.http_metrics.listen_addr = address + .parse::<Ipv4Addr>() + .map_err(|_| "metrics-address is not a valid IPv4 address.")?; + } + + if let Some(port) = cli_args.value_of("metrics-port") { + client_config.http_metrics.listen_port = port + .parse::<u16>() + .map_err(|_| "metrics-port is not a valid u16.")?; + } + + if let Some(allow_origin) = cli_args.value_of("metrics-allow-origin") { + // Pre-validate the config value to give feedback to the user on node startup, instead of + // as late as when the first API response is produced. + hyper::header::HeaderValue::from_str(allow_origin) + .map_err(|_| "Invalid allow-origin value")?; + + client_config.http_metrics.allow_origin = Some(allow_origin.to_string()); } // Log a warning indicating an open HTTP server if it wasn't specified explicitly @@ -125,7 +154,7 @@ pub fn get_config<E: EthSpec>( if cli_args.is_present("staking") { warn!( log, - "Running HTTP server on port {}", client_config.rest_api.port + "Running HTTP server on port {}", client_config.http_api.listen_port ); } @@ -219,7 +248,8 @@ pub fn get_config<E: EthSpec>( unused_port("tcp").map_err(|e| format!("Failed to get port for libp2p: {}", e))?; client_config.network.discovery_port = unused_port("udp").map_err(|e| format!("Failed to get port for discovery: {}", e))?; - client_config.rest_api.port = 0; + client_config.http_api.listen_port = 0; + client_config.http_metrics.listen_port = 0; client_config.websocket_server.port = 0; } @@ -230,6 +260,11 @@ pub fn get_config<E: EthSpec>( client_config.eth1.deposit_contract_address = format!("{:?}", eth2_testnet_config.deposit_contract_address()?); + let spec_contract_address = format!("{:?}", spec.deposit_contract_address); + if client_config.eth1.deposit_contract_address != spec_contract_address { + return Err("Testnet contract address does not match spec".into()); + } + client_config.eth1.deposit_contract_deploy_block = eth2_testnet_config.deposit_contract_deploy_block; client_config.eth1.lowest_cached_block_number = @@ -265,7 +300,7 @@ pub fn get_config<E: EthSpec>( }; let trimmed_graffiti_len = cmp::min(raw_graffiti.len(), GRAFFITI_BYTES_LEN); - client_config.graffiti[..trimmed_graffiti_len] + client_config.graffiti.0[..trimmed_graffiti_len] .copy_from_slice(&raw_graffiti[..trimmed_graffiti_len]); if let Some(max_skip_slots) = cli_args.value_of("max-skip-slots") { diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index a09f8c6cd32..feff1e3206f 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -71,7 +71,6 @@ impl<E: EthSpec> ProductionBeaconNode<E> { context: RuntimeContext<E>, mut client_config: ClientConfig, ) -> Result<Self, String> { - let http_eth2_config = context.eth2_config().clone(); let spec = context.eth2_config().spec.clone(); let client_config_1 = client_config.clone(); let client_genesis = client_config.genesis.clone(); @@ -118,26 +117,22 @@ impl<E: EthSpec> ProductionBeaconNode<E> { builder.no_eth1_backend()? }; - let (builder, events) = builder + let (builder, _events) = builder .system_time_slot_clock()? .tee_event_handler(client_config.websocket_server.clone())?; // Inject the executor into the discv5 network config. client_config.network.discv5_config.executor = Some(Box::new(executor)); - let builder = builder + builder .build_beacon_chain()? .network(&client_config.network) .await? - .notifier()?; - - let builder = if client_config.rest_api.enabled { - builder.http_server(&client_config, &http_eth2_config, events)? - } else { - builder - }; - - Ok(Self(builder.build())) + .notifier()? + .http_api_config(client_config.http_api.clone()) + .http_metrics_config(client_config.http_metrics.clone()) + .build() + .map(Self) } pub fn into_inner(self) -> ProductionClient<E> { diff --git a/beacon_node/tests/test.rs b/beacon_node/tests/test.rs index a845acf04df..7d860538f9b 100644 --- a/beacon_node/tests/test.rs +++ b/beacon_node/tests/test.rs @@ -3,6 +3,7 @@ use beacon_chain::StateSkipConfig; use node_test_rig::{ environment::{Environment, EnvironmentBuilder}, + eth2::types::StateId, testing_client_config, LocalBeaconNode, }; use types::{EthSpec, MinimalEthSpec, Slot}; @@ -34,10 +35,12 @@ fn http_server_genesis_state() { let node = build_node(&mut env); let remote_node = node.remote_node().expect("should produce remote node"); - let (api_state, _root) = env + let api_state = env .runtime() - .block_on(remote_node.http.beacon().get_state_by_slot(Slot::new(0))) - .expect("should fetch state from http api"); + .block_on(remote_node.get_debug_beacon_states(StateId::Slot(Slot::new(0)))) + .expect("should fetch state from http api") + .unwrap() + .data; let mut db_state = node .client diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 18e0ccad246..b570357b9db 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -14,20 +14,15 @@ * [Key recovery](./key-recovery.md) * [Validator Management](./validator-management.md) * [Importing from the Eth2 Launchpad](./validator-import-launchpad.md) -* [Local Testnets](./local-testnets.md) -* [API](./api.md) - * [HTTP (RESTful JSON)](./http.md) - * [/node](./http/node.md) - * [/beacon](./http/beacon.md) - * [/validator](./http/validator.md) - * [/consensus](./http/consensus.md) - * [/network](./http/network.md) - * [/spec](./http/spec.md) - * [/advanced](./http/advanced.md) - * [/lighthouse](./http/lighthouse.md) - * [WebSocket](./websockets.md) +* [APIs](./api.md) + * [Beacon Node API](./api-bn.md) + * [/lighthouse](./api-lighthouse.md) + * [Validator Inclusion APIs](./validator-inclusion.md) + * [Validator Client API](./api-vc.md) + * [Prometheus Metrics](./advanced_metrics.md) * [Advanced Usage](./advanced.md) * [Database Configuration](./advanced_database.md) + * [Local Testnets](./local-testnets.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/advanced_metrics.md b/book/src/advanced_metrics.md new file mode 100644 index 00000000000..6c901862ee0 --- /dev/null +++ b/book/src/advanced_metrics.md @@ -0,0 +1,34 @@ +# Prometheus Metrics + +Lighthouse provides an extensive suite of metrics and monitoring in the +[Prometheus](https://prometheus.io/docs/introduction/overview/) export format +via a HTTP server built into Lighthouse. + +These metrics are generally consumed by a Prometheus server and displayed via a +Grafana dashboard. These components are available in a docker-compose format at +[sigp/lighthouse-metrics](https://github.com/sigp/lighthouse-metrics). + +## Beacon Node Metrics + +By default, these metrics are disabled but can be enabled with the `--metrics` +flag. Use the `--metrics-address`, `--metrics-port` and +`--metrics-allow-origin` flags to customize the metrics server. + +### Example + +Start a beacon node with the metrics server enabled: + +```bash +lighthouse bn --metrics +``` + +Check to ensure that the metrics are available on the default port: + +```bash +curl localhost:5054/metrics +``` + +## Validator Client Metrics + +The validator client does not *yet* expose metrics, however this functionality +is expected to be implemented in late-September 2020. diff --git a/book/src/api-bn.md b/book/src/api-bn.md new file mode 100644 index 00000000000..d957e43768e --- /dev/null +++ b/book/src/api-bn.md @@ -0,0 +1,130 @@ +# Beacon Node API + +Lighthouse implements the standard [Eth2 Beacon Node API +specification][OpenAPI]. Please follow that link for a full description of each API endpoint. + +> **Warning:** the standard API specification is still in flux and the Lighthouse implementation is partially incomplete. You can track the status of each endpoint at [#1434](https://github.com/sigp/lighthouse/issues/1434). + +## Starting the server + +A Lighthouse beacon node can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `127.0.0.1:5052`. + +The following CLI flags control the HTTP server: + +- `--http`: enable the HTTP server (required even if the following flags are + provided). +- `--http-port`: specify the listen port of the server. +- `--http-address`: specify the listen address of the server. +- `--http-allow-origin`: specify the value of the `Access-Control-Allow-Origin` + header. The default is to not supply a header. + +The schema of the API aligns with the standard Eth2 Beacon Node API as defined +at [github.com/ethereum/eth2.0-APIs](https://github.com/ethereum/eth2.0-APIs). +An interactive specification is available [here][OpenAPI]. + +### CLI Example + +Start the beacon node with the HTTP server listening on [http://localhost:5052](http://localhost:5052): + +```bash +lighthouse bn --http +``` + +## HTTP Request/Response Examples + +This section contains some simple examples of using the HTTP API via `curl`. +All endpoints are documented in the [Eth2 Beacon Node API +specification][OpenAPI]. + +### View the head of the beacon chain + +Returns the block header at the head of the canonical chain. + +```bash +curl -X GET "http://localhost:5052/eth/v1/beacon/headers/head" -H "accept: +application/json" +``` + +```json +{ + "data": { + "root": "0x4381454174fc28c7095077e959dcab407ae5717b5dca447e74c340c1b743d7b2", + "canonical": true, + "header": { + "message": { + "slot": 3199, + "proposer_index": "19077", + "parent_root": "0xf1934973041c5896d0d608e52847c3cd9a5f809c59c64e76f6020e3d7cd0c7cd", + "state_root": "0xe8e468f9f5961655dde91968f66480868dab8d4147de9498111df2b7e4e6fe60", + "body_root": "0x6f183abc6c4e97f832900b00d4e08d4373bfdc819055d76b0f4ff850f559b883" + }, + "signature": "0x988064a2f9cf13fe3aae051a3d85f6a4bca5a8ff6196f2f504e32f1203b549d5f86a39c6509f7113678880701b1881b50925a0417c1c88a750c8da7cd302dda5aabae4b941e3104d0cf19f5043c4f22a7d75d0d50dad5dbdaf6991381dc159ab" + } + } +} +``` + +### View the status of a validator + +Shows the status of validator at index `1` at the `head` state. + +```bash +curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json" +``` + +```json +{ + "data": { + "index": "1", + "balance": "63985937939", + "status": "Active", + "validator": { + "pubkey": "0x873e73ee8b3e4fcf1d2fb0f1036ba996ac9910b5b348f6438b5f8ef50857d4da9075d0218a9d1b99a9eae235a39703e1", + "withdrawal_credentials": "0x00b8cdcf79ba7e74300a07e9d8f8121dd0d8dd11dcfd6d3f2807c45b426ac968", + "effective_balance": 32000000000, + "slashed": false, + "activation_eligibility_epoch": 0, + "activation_epoch": 0, + "exit_epoch": 18446744073709552000, + "withdrawable_epoch": 18446744073709552000 + } + } +} +``` + +## Troubleshooting + +### HTTP API is unavailable or refusing connections + +Ensure the `--http` flag has been supplied at the CLI. + +You can quickly check that the HTTP endpoint is up using `curl`: + +```bash +curl -X GET "http://localhost:5052/eth/v1/node/version" -H "accept: application/json" +``` + +The beacon node should respond with its version: + +```json +{"data":{"version":"Lighthouse/v0.2.9-6f7b4768a/x86_64-linux"}} +``` + +If this doesn't work, the server might not be started or there might be a +network connection error. + +### I cannot query my node from a web browser (e.g., Swagger) + +By default, the API does not provide an `Access-Control-Allow-Origin` header, +which causes browsers to reject responses with a CORS error. + +The `--http-allow-origin` flag can be used to add a wild-card CORS header: + +```bash +lighthouse bn --http --http-allow-origin "*" +``` + +> **Warning:** Adding the wild-card allow-origin flag can pose a security risk. +> Only use it in production if you understand the risks of a loose CORS policy. + +[OpenAPI]: https://ethereum.github.io/eth2.0-APIs/#/ diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md new file mode 100644 index 00000000000..3f37673fa9d --- /dev/null +++ b/book/src/api-lighthouse.md @@ -0,0 +1,179 @@ +# Lighthouse Non-Standard APIs + +Lighthouse fully supports the standardization efforts at +[github.com/ethereum/eth2.0-APIs](https://github.com/ethereum/eth2.0-APIs), +however sometimes development requires additional endpoints that shouldn't +necessarily be defined as a broad-reaching standard. Such endpoints are placed +behind the `/lighthouse` path. + +The endpoints behind the `/lighthouse` path are: + +- Not intended to be stable. +- Not guaranteed to be safe. +- For testing and debugging purposes only. + +Although we don't recommend that users rely on these endpoints, we +document them briefly so they can be utilized by developers and +researchers. + +### `/lighthouse/health` + +*Presently only available on Linux.* + +```bash +curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/json" | jq +``` + +```json +{ + "data": { + "pid": 1728254, + "pid_num_threads": 47, + "pid_mem_resident_set_size": 510054400, + "pid_mem_virtual_memory_size": 3963158528, + "sys_virt_mem_total": 16715530240, + "sys_virt_mem_available": 4065374208, + "sys_virt_mem_used": 11383402496, + "sys_virt_mem_free": 1368662016, + "sys_virt_mem_percent": 75.67906, + "sys_loadavg_1": 4.92, + "sys_loadavg_5": 5.53, + "sys_loadavg_15": 5.58 + } +} +``` + +### `/lighthouse/syncing` + +```bash +curl -X GET "http://localhost:5052/lighthouse/syncing" -H "accept: application/json" | jq +``` + +```json +{ + "data": { + "SyncingFinalized": { + "start_slot": 3104, + "head_slot": 343744, + "head_root": "0x1b434b5ed702338df53eb5e3e24336a90373bb51f74b83af42840be7421dd2bf" + } + } +} +``` + +### `/lighthouse/peers` + +```bash +curl -X GET "http://localhost:5052/lighthouse/peers" -H "accept: application/json" | jq +``` + +```json +[ + { + "peer_id": "16Uiu2HAmA9xa11dtNv2z5fFbgF9hER3yq35qYNTPvN7TdAmvjqqv", + "peer_info": { + "_status": "Healthy", + "score": { + "score": 0 + }, + "client": { + "kind": "Lighthouse", + "version": "v0.2.9-1c9a055c", + "os_version": "aarch64-linux", + "protocol_version": "lighthouse/libp2p", + "agent_string": "Lighthouse/v0.2.9-1c9a055c/aarch64-linux" + }, + "connection_status": { + "status": "disconnected", + "connections_in": 0, + "connections_out": 0, + "last_seen": 1082, + "banned_ips": [] + }, + "listening_addresses": [ + "/ip4/80.109.35.174/tcp/9000", + "/ip4/127.0.0.1/tcp/9000", + "/ip4/192.168.0.73/tcp/9000", + "/ip4/172.17.0.1/tcp/9000", + "/ip6/::1/tcp/9000" + ], + "sync_status": { + "Advanced": { + "info": { + "status_head_slot": 343829, + "status_head_root": "0xe34e43efc2bb462d9f364bc90e1f7f0094e74310fd172af698b5a94193498871", + "status_finalized_epoch": 10742, + "status_finalized_root": "0x1b434b5ed702338df53eb5e3e24336a90373bb51f74b83af42840be7421dd2bf" + } + } + }, + "meta_data": { + "seq_number": 160, + "attnets": "0x0000000800000080" + } + } + } +] +``` + +### `/lighthouse/peers/connected` + +```bash +curl -X GET "http://localhost:5052/lighthouse/peers/connected" -H "accept: application/json" | jq +``` + +```json +[ + { + "peer_id": "16Uiu2HAkzJC5TqDSKuLgVUsV4dWat9Hr8EjNZUb6nzFb61mrfqBv", + "peer_info": { + "_status": "Healthy", + "score": { + "score": 0 + }, + "client": { + "kind": "Lighthouse", + "version": "v0.2.8-87181204+", + "os_version": "x86_64-linux", + "protocol_version": "lighthouse/libp2p", + "agent_string": "Lighthouse/v0.2.8-87181204+/x86_64-linux" + }, + "connection_status": { + "status": "connected", + "connections_in": 1, + "connections_out": 0, + "last_seen": 0, + "banned_ips": [] + }, + "listening_addresses": [ + "/ip4/34.204.178.218/tcp/9000", + "/ip4/127.0.0.1/tcp/9000", + "/ip4/172.31.67.58/tcp/9000", + "/ip4/172.17.0.1/tcp/9000", + "/ip6/::1/tcp/9000" + ], + "sync_status": "Unknown", + "meta_data": { + "seq_number": 1819, + "attnets": "0xffffffffffffffff" + } + } + } +] +``` + +### `/lighthouse/proto_array` + +```bash +curl -X GET "http://localhost:5052/lighthouse/proto_array" -H "accept: application/json" | jq +``` + +*Example omitted for brevity.* + +### `/lighthouse/validator_inclusion/{epoch}/{validator_id}` + +See [Validator Inclusion APIs](./validator-inclusion.md). + +### `/lighthouse/validator_inclusion/{epoch}/global` + +See [Validator Inclusion APIs](./validator-inclusion.md). diff --git a/book/src/api-vc.md b/book/src/api-vc.md new file mode 100644 index 00000000000..e120f69bf5c --- /dev/null +++ b/book/src/api-vc.md @@ -0,0 +1,3 @@ +# Validator Client API + +The validator client API is planned for release in late September 2020. diff --git a/book/src/api.md b/book/src/api.md index 0fa6c300129..56c1ff5ce00 100644 --- a/book/src/api.md +++ b/book/src/api.md @@ -1,13 +1,9 @@ # APIs -The Lighthouse `beacon_node` provides two APIs for local consumption: +Lighthouse allows users to query the state of Eth2.0 using web-standard, +RESTful HTTP/JSON APIs. -- A [RESTful JSON HTTP API](http.html) which provides beacon chain, node and network - information. -- A read-only [WebSocket API](websockets.html) providing beacon chain events, as they occur. +There are two APIs served by Lighthouse: - -## Security - -These endpoints are not designed to be exposed to the public Internet or -untrusted users. They may pose a considerable DoS attack vector when used improperly. +- [Beacon Node API](./api-bn.md) +- [Validator Client API](./api-vc.md) (not yet released). diff --git a/book/src/http.md b/book/src/http.md index e07440e8da8..700535c2ac2 100644 --- a/book/src/http.md +++ b/book/src/http.md @@ -1,5 +1,9 @@ # HTTP API +[OpenAPI Specification](https://ethereum.github.io/eth2.0-APIs/#/) + +## Beacon Node + A Lighthouse beacon node can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `localhost:5052`. The following CLI flags control the HTTP server: @@ -9,24 +13,10 @@ The following CLI flags control the HTTP server: - `--http-port`: specify the listen port of the server. - `--http-address`: specify the listen address of the server. -The API is logically divided into several core endpoints, each documented in -detail: - -Endpoint | Description | -| --- | -- | -[`/node`](./http/node.md) | General information about the beacon node. -[`/beacon`](./http/beacon.md) | General information about the beacon chain. -[`/validator`](./http/validator.md) | Provides functionality to validator clients. -[`/consensus`](./http/consensus.md) | Proof-of-stake voting statistics. -[`/network`](./http/network.md) | Information about the p2p network. -[`/spec`](./http/spec.md) | Information about the specs that the client is running. -[`/advanced`](./http/advanced.md) | Provides endpoints for advanced inspection of Lighthouse specific objects. -[`/lighthouse`](./http/lighthouse.md) | Provides lighthouse specific endpoints. - -_Please note: The OpenAPI format at -[SwaggerHub: Lighthouse REST -API](https://app.swaggerhub.com/apis-docs/spble/lighthouse_rest_api/0.2.0) has -been **deprecated**. This documentation is now the source of truth for the REST API._ +The schema of the API aligns with the standard Eth2 Beacon Node API as defined +at [github.com/ethereum/eth2.0-APIs](https://github.com/ethereum/eth2.0-APIs). +It is an easy-to-use RESTful HTTP/JSON API. An interactive specification is +available [here](https://ethereum.github.io/eth2.0-APIs/#/). ## Troubleshooting diff --git a/book/src/http/advanced.md b/book/src/http/advanced.md deleted file mode 100644 index 822b6ffffd6..00000000000 --- a/book/src/http/advanced.md +++ /dev/null @@ -1,115 +0,0 @@ -# Lighthouse REST API: `/advanced` - -The `/advanced` endpoints provide information Lighthouse specific data structures for advanced debugging. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/advanced/fork_choice`](#advancedfork_choice) | Get the `proto_array` fork choice object. -[`/advanced/operation_pool`](#advancedoperation_pool) | Get the Lighthouse `PersistedOperationPool` object. - - -## `/advanced/fork_choice` - -Requests the `proto_array` fork choice object as represented in Lighthouse. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/advanced/fork_choice` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "prune_threshold": 256, - "justified_epoch": 25, - "finalized_epoch": 24, - "nodes": [ - { - "slot": 544, - "root": "0x27103c56d4427cb4309dd202920ead6381d54d43277c29cf0572ddf0d528e6ea", - "parent": null, - "justified_epoch": 16, - "finalized_epoch": 15, - "weight": 256000000000, - "best_child": 1, - "best_descendant": 296 - }, - { - "slot": 545, - "root": "0x09af0e8d4e781ea4280c9c969d168839c564fab3a03942e7db0bfbede7d4c745", - "parent": 0, - "justified_epoch": 16, - "finalized_epoch": 15, - "weight": 256000000000, - "best_child": 2, - "best_descendant": 296 - }, - ], - "indices": { - "0xb935bb3651eeddcb2d2961bf307156850de982021087062033f02576d5df00a3": 59, - "0x8f4ec47a34c6c1d69ede64d27165d195f7e2a97c711808ce51f1071a6e12d5b9": 189, - "0xf675eba701ef77ee2803a130dda89c3c5673a604d2782c9e25ea2be300d7d2da": 173, - "0x488a483c8d5083faaf5f9535c051b9f373ba60d5a16e77ddb1775f248245b281": 37 - } -} -``` -_Truncated for brevity._ - -## `/advanced/operation_pool` - -Requests the `PersistedOperationPool` object as represented in Lighthouse. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/advanced/operation_pool` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "attestations": [ - [ - { - "v": [39, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 112, 118, 215, 252, 51, 186, 76, 156, 157, 99, 91, 4, 137, 195, 209, 224, 26, 233, 233, 184, 38, 89, 215, 177, 247, 97, 243, 119, 229, 69, 50, 90, 24, 0, 0, 0, 0, 0, 0, 0, 79, 37, 38, 210, 96, 235, 121, 142, 129, 136, 206, 214, 179, 132, 22, 19, 222, 213, 203, 46, 112, 192, 26, 5, 254, 26, 103, 170, 158, 205, 72, 3, 25, 0, 0, 0, 0, 0, 0, 0, 164, 50, 214, 67, 98, 13, 50, 180, 108, 232, 248, 109, 128, 45, 177, 23, 221, 24, 218, 211, 8, 152, 172, 120, 24, 86, 198, 103, 68, 164, 67, 202, 1, 0, 0, 0, 0, 0, 0, 0] - }, - [ - { - "aggregation_bits": "0x03", - "data": { - "slot": 807, - "index": 0, - "beacon_block_root": "0x7076d7fc33ba4c9c9d635b0489c3d1e01ae9e9b82659d7b1f761f377e545325a", - "source": { - "epoch": 24, - "root": "0x4f2526d260eb798e8188ced6b3841613ded5cb2e70c01a05fe1a67aa9ecd4803" - }, - "target": { - "epoch": 25, - "root": "0xa432d643620d32b46ce8f86d802db117dd18dad30898ac781856c66744a443ca" - } - }, - "signature": "0x8b1d624b0cd5a7a0e13944e90826878a230e3901db34ea87dbef5b145ade2fedbc830b6752a38a0937a1594211ab85b615d65f9eef0baccd270acca945786036695f4db969d9ff1693c505c0fe568b2fe9831ea78a74cbf7c945122231f04026" - } - ] - ] - ], - "attester_slashings": [], - "proposer_slashings": [], - "voluntary_exits": [] -} -``` -_Truncated for brevity._ diff --git a/book/src/http/beacon.md b/book/src/http/beacon.md deleted file mode 100644 index 2149f444448..00000000000 --- a/book/src/http/beacon.md +++ /dev/null @@ -1,784 +0,0 @@ -# Lighthouse REST API: `/beacon` - -The `/beacon` endpoints provide information about the canonical head of the -beacon chain and also historical information about beacon blocks and states. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/beacon/head`](#beaconhead) | Info about the block at the head of the chain. -[`/beacon/heads`](#beaconheads) | Returns a list of all known chain heads. -[`/beacon/block`](#beaconblock) | Get a `BeaconBlock` by slot or root. -[`/beacon/block_root`](#beaconblock_root) | Resolve a slot to a block root. -[`/beacon/fork`](#beaconfork) | Get the fork of the head of the chain. -[`/beacon/genesis_time`](#beacongenesis_time) | Get the genesis time from the beacon state. -[`/beacon/genesis_validators_root`](#beacongenesis_validators_root) | Get the genesis validators root. -[`/beacon/validators`](#beaconvalidators) | Query for one or more validators. -[`/beacon/validators/all`](#beaconvalidatorsall) | Get all validators. -[`/beacon/validators/active`](#beaconvalidatorsactive) | Get all active validators. -[`/beacon/state`](#beaconstate) | Get a `BeaconState` by slot or root. -[`/beacon/state_root`](#beaconstate_root) | Resolve a slot to a state root. -[`/beacon/state/genesis`](#beaconstategenesis) | Get a `BeaconState` at genesis. -[`/beacon/committees`](#beaconcommittees) | Get the shuffling for an epoch. -[`/beacon/proposer_slashing`](#beaconproposer_slashing) | Insert a proposer slashing -[`/beacon/attester_slashing`](#beaconattester_slashing) | Insert an attester slashing - -## `/beacon/head` - -Requests information about the head of the beacon chain, from the node's -perspective. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/head` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "slot": 37923, - "block_root": "0xe865d4805395a0776b8abe46d714a9e64914ab8dc5ff66624e5a1776bcc1684b", - "state_root": "0xe500e3567ab273c9a6f8a057440deff476ab236f0983da27f201ee9494a879f0", - "finalized_slot": 37856, - "finalized_block_root": "0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86", - "justified_slot": 37888, - "justified_block_root": "0x01c2f516a407d8fdda23cad4ed4381e4ab8913d638f935a2fe9bd00d6ced5ec4", - "previous_justified_slot": 37856, - "previous_justified_block_root": "0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86" -} -``` - -## `/beacon/heads` - -Returns the roots of all known head blocks. Only one of these roots is the -canonical head and that is decided by the fork choice algorithm. See [`/beacon/head`](#beaconhead) for the canonical head. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/heads` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ - { - "beacon_block_root": "0x226b2fd7c5f3d31dbb21444b96dfafe715f0017cd16545ecc4ffa87229496a69", - "beacon_block_slot": 38373 - }, - { - "beacon_block_root": "0x41ed5b253c4fc841cba8a6d44acbe101866bc674c3cfa3c4e9f7388f465aa15b", - "beacon_block_slot": 38375 - } -] -``` - -## `/beacon/block` - -Request that the node return a beacon chain block that matches the provided -criteria (a block `root` or beacon chain `slot`). Only one of the parameters -should be provided as a criteria. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/block` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `root` -Typical Responses | 200, 404 - -### Parameters - -Accepts **only one** of the following parameters: - -- `slot` (`Slot`): Query by slot number. Any block returned must be in the canonical chain (i.e., -either the head or an ancestor of the head). -- `root` (`Bytes32`): Query by tree hash root. A returned block is not required to be in the -canonical chain. - -### Returns - -Returns an object containing a single [`SignedBeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#signedbeaconblock) and the block root of the inner [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#beaconblock). - -### Example Response - -```json -{ - "root": "0xc35ddf4e71c31774e0594bd7eb32dfe50b54dbc40abd594944254b4ec8895196", - "beacon_block": { - "message": { - "slot": 0, - "proposer_index": 14, - "parent_root": "0x0000000000000000000000000000000000000000000000000000000000000000", - "state_root": "0xf15690b6be4ed42ea1ee0741eb4bfd4619d37be8229b84b4ddd480fb028dcc8f", - "body": { - "randao_reveal": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "eth1_data": { - "deposit_root": "0x0000000000000000000000000000000000000000000000000000000000000000", - "deposit_count": 0, - "block_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" - }, - "graffiti": "0x0000000000000000000000000000000000000000000000000000000000000000", - "proposer_slashings": [], - "attester_slashings": [], - "attestations": [], - "deposits": [], - "voluntary_exits": [] - } - }, - "signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" - } -} -``` - -## `/beacon/block_root` - -Returns the block root for the given slot in the canonical chain. If there -is a re-org, the same slot may return a different root. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/block_root` -Method | GET -JSON Encoding | Object -Query Parameters | `slot` -Typical Responses | 200, 404 - -## Parameters - -- `slot` (`Slot`): the slot to be resolved to a root. - -### Example Response - -```json -"0xc35ddf4e71c31774e0594bd7eb32dfe50b54dbc40abd594944254b4ec8895196" -``` - -## `/beacon/committees` - -Request the committees (a.k.a. "shuffling") for all slots and committee indices -in a given `epoch`. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/committees` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200/500 - -### Parameters - -The `epoch` (`Epoch`) query parameter is required and defines the epoch for -which the committees will be returned. All slots contained within the response will -be inside this epoch. - -### Returns - -A list of beacon committees. - -### Example Response - -```json -[ - { - "slot": 4768, - "index": 0, - "committee": [ - 1154, - 492, - 9667, - 3089, - 8987, - 1421, - 224, - 11243, - 2127, - 2329, - 188, - 482, - 486 - ] - }, - { - "slot": 4768, - "index": 1, - "committee": [ - 5929, - 8482, - 5528, - 6130, - 14343, - 9777, - 10808, - 12739, - 15234, - 12819, - 5423, - 6320, - 9991 - ] - } -] -``` - -_Truncated for brevity._ - -## `/beacon/fork` - -Request that the node return the `fork` of the current head. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/fork` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - - -### Returns - -Returns an object containing the [`Fork`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#fork) of the current head. - -### Example Response - -```json -{ - "previous_version": "0x00000000", - "current_version": "0x00000000", - "epoch": 0 -} -``` - -## `/beacon/genesis_time` - -Request that the node return the genesis time from the beacon state. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/genesis_time` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - - -### Returns - -Returns an object containing the genesis time. - -### Example Response - -```json -1581576353 -``` - -## `/beacon/genesis_validators_root` - -Request that the node return the genesis validators root from the beacon state. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/genesis_validators_root` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - - -### Returns - -Returns an object containing the genesis validators root. - -### Example Response - -```json -0x4fbf23439a7a9b9dd91650e64e8124012dde5e2ea2940c552b86f04eb47f95de -``` - -## `/beacon/validators` - -Request that the node returns information about one or more validator public -keys. This request takes the form of a `POST` request to allow sending a large -number of pubkeys in the request. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/validators` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Request Body - -Expects the following object in the POST request body: - -``` -{ - state_root: Bytes32, - pubkeys: [PublicKey] -} -``` - -The `state_root` field indicates which `BeaconState` should be used to collect -the information. The `state_root` is optional and omitting it will result in -the canonical head state being used. - - -### Returns - -Returns an object describing several aspects of the given validator. - -### Example - -### Request Body - -```json -{ - "pubkeys": [ - "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42" - ] -} -``` - -_Note: for demonstration purposes the second pubkey is some unknown pubkey._ - -### Response Body - -```json -[ - { - "pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "validator_index": 14935, - "balance": 3228885987, - "validator": { - "pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "withdrawal_credentials": "0x00b7bec22d5bda6b2cca1343d4f640d0e9ccc204a06a73703605c590d4c0d28e", - "effective_balance": 3200000000, - "slashed": false, - "activation_eligibility_epoch": 0, - "activation_epoch": 0, - "exit_epoch": 18446744073709551615, - "withdrawable_epoch": 18446744073709551615 - } - }, - { - "pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42", - "validator_index": null, - "balance": null, - "validator": null - } -] -``` - -## `/beacon/validators/all` - -Returns all validators. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/validators/all` -Method | GET -JSON Encoding | Object -Query Parameters | `state_root` (optional) -Typical Responses | 200 - -### Parameters - -The optional `state_root` (`Bytes32`) query parameter indicates which -`BeaconState` should be used to collect the information. When omitted, the -canonical head state will be used. - -### Returns - -The return format is identical to the [`/beacon/validators`](#beaconvalidators) response body. - - -## `/beacon/validators/active` - -Returns all validators that are active in the state defined by `state_root`. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/validators/active` -Method | GET -JSON Encoding | Object -Query Parameters | `state_root` (optional) -Typical Responses | 200 - -### Parameters - -The optional `state_root` (`Bytes32`) query parameter indicates which -`BeaconState` should be used to collect the information. When omitted, the -canonical head state will be used. - -### Returns - -The return format is identical to the [`/beacon/validators`](#beaconvalidators) response body. - - -## `/beacon/state` - -Request that the node return a beacon chain state that matches the provided -criteria (a state `root` or beacon chain `slot`). Only one of the parameters -should be provided as a criteria. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/state` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `root` -Typical Responses | 200, 404 - -### Parameters - -Accepts **only one** of the following parameters: - -- `slot` (`Slot`): Query by slot number. Any state returned must be in the canonical chain (i.e., -either the head or an ancestor of the head). -- `root` (`Bytes32`): Query by tree hash root. A returned state is not required to be in the -canonical chain. - -### Returns - -Returns an object containing a single -[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconstate) -and its tree hash root. - -### Example Response - -```json -{ - "root": "0x528e54ca5d4c957729a73f40fc513ae312e054c7295775c4a2b21f423416a72b", - "beacon_state": { - "genesis_time": 1575652800, - "genesis_validators_root": "0xa8a9226edee1b2627fb4117d7dea4996e64dec2998f37f6e824f74f2ce39a538", - "slot": 18478 - } -} -``` - -_Truncated for brevity._ - -## `/beacon/state_root` - -Returns the state root for the given slot in the canonical chain. If there -is a re-org, the same slot may return a different root. - - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/state_root` -Method | GET -JSON Encoding | Object -Query Parameters | `slot` -Typical Responses | 200, 404 - -## Parameters - -- `slot` (`Slot`): the slot to be resolved to a root. - -### Example Response - -```json -"0xf15690b6be4ed42ea1ee0741eb4bfd4619d37be8229b84b4ddd480fb028dcc8f" -``` - -## `/beacon/state/genesis` - -Request that the node return a beacon chain state at genesis (slot 0). - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/state/genesis` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - - -### Returns - -Returns an object containing the genesis -[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconstate). - -### Example Response - -```json -{ - "genesis_time": 1581576353, - "slot": 0, - "fork": { - "previous_version": "0x00000000", - "current_version": "0x00000000", - "epoch": 0 - }, -} -``` - -_Truncated for brevity._ - - -## `/beacon/state/committees` - -Request that the node return a beacon chain state at genesis (slot 0). - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/state/genesis` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200 - - -### Returns - -Returns an object containing the committees for a given epoch. - -### Example Response - -```json -[ - {"slot":64,"index":0,"committee":[]}, - {"slot":65,"index":0,"committee":[3]}, - {"slot":66,"index":0,"committee":[]}, - {"slot":67,"index":0,"committee":[14]}, - {"slot":68,"index":0,"committee":[]}, - {"slot":69,"index":0,"committee":[9]}, - {"slot":70,"index":0,"committee":[]}, - {"slot":71,"index":0,"committee":[11]}, - {"slot":72,"index":0,"committee":[]}, - {"slot":73,"index":0,"committee":[5]}, - {"slot":74,"index":0,"committee":[]}, - {"slot":75,"index":0,"committee":[15]}, - {"slot":76,"index":0,"committee":[]}, - {"slot":77,"index":0,"committee":[0]} -] -``` - -_Truncated for brevity._ - - -## `/beacon/attester_slashing` - -Accepts an `attester_slashing` and verifies it. If it is valid, it is added to the operations pool for potential inclusion in a future block. Returns a 400 error if the `attester_slashing` is invalid. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/attester_slashing` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/400 - -### Parameters - -Expects the following object in the POST request body: - -``` -{ - attestation_1: { - attesting_indices: [u64], - data: { - slot: Slot, - index: u64, - beacon_block_root: Bytes32, - source: { - epoch: Epoch, - root: Bytes32 - }, - target: { - epoch: Epoch, - root: Bytes32 - } - } - signature: Bytes32 - }, - attestation_2: { - attesting_indices: [u64], - data: { - slot: Slot, - index: u64, - beacon_block_root: Bytes32, - source: { - epoch: Epoch, - root: Bytes32 - }, - target: { - epoch: Epoch, - root: Bytes32 - } - } - signature: Bytes32 - } -} -``` - -### Returns - -Returns `true` if the attester slashing was inserted successfully, or the corresponding error if it failed. - -### Example - -### Request Body - -```json -{ - "attestation_1": { - "attesting_indices": [0], - "data": { - "slot": 1, - "index": 0, - "beacon_block_root": "0x0000000000000000000000000000000000000000000000000100000000000000", - "source": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000100000000000000" - }, - "target": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000100000000000000" - } - }, - "signature": "0xb47f7397cd944b8d5856a13352166bbe74c85625a45b14b7347fc2c9f6f6f82acee674c65bc9ceb576fcf78387a6731c0b0eb3f8371c70db2da4e7f5dfbc451730c159d67263d3db56b6d0e009e4287a8ba3efcacac30b3ae3447e89dc71b5b9" - }, - "attestation_2": { - "attesting_indices": [0], - "data": { - "slot": 1, - "index": 0, - "beacon_block_root": "0x0000000000000000000000000000000000000000000000000100000000000000", - "source": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000100000000000000" - }, - "target": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000200000000000000" - } - }, - "signature": "0x93fef587a63acf72aaf8df627718fd43cb268035764071f802ffb4370a2969d226595cc650f4c0bf2291ae0c0a41fcac1700f318603d75d34bcb4b9f4a8368f61eeea0e1f5d969d92d5073ba5fbadec102b45ec87d418d25168d2e3c74b9fcbb" - } -} -``` - -_Note: data sent here is for demonstration purposes only_ - - - -## `/beacon/proposer_slashing` - -Accepts a `proposer_slashing` and verifies it. If it is valid, it is added to the operations pool for potential inclusion in a future block. Returns an 400 error if the `proposer_slashing` is invalid. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/proposer_slashing` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/400 - -### Request Body - -Expects the following object in the POST request body: - -``` -{ - proposer_index: u64, - header_1: { - slot: Slot, - parent_root: Bytes32, - state_root: Bytes32, - body_root: Bytes32, - signature: Bytes32 - }, - header_2: { - slot: Slot, - parent_root: Bytes32, - state_root: Bytes32, - body_root: Bytes32, - signature: Bytes32 - } -} -``` - -### Returns - -Returns `true` if the proposer slashing was inserted successfully, or the corresponding error if it failed. - -### Example - -### Request Body - -```json -{ - "proposer_index": 0, - "header_1": { - "slot": 0, - "parent_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "state_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "body_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "signature": "0xb8970d1342c6d5779c700ec366efd0ca819937ca330960db3ca5a55eb370a3edd83f4cbb2f74d06e82f934fcbd4bb80609a19c2254cc8b3532a4efff9e80edf312ac735757c059d77126851e377f875593e64ba50d1dffe69a809a409202dd12" - }, - "header_2": { - "slot": 0, - "parent_root": "0x0202020202020202020202020202020202020202020202020202020202020202", - "state_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "body_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "signature": "0xb60e6b348698a34e59b22e0af96f8809f977f00f95d52375383ade8d22e9102270a66c6d52b0434214897e11ca4896871510c01b3fd74d62108a855658d5705fcfc4ced5136264a1c6496f05918576926aa191b1ad311b7e27f5aa2167aba294" - } -} -``` - -_Note: data sent here is for demonstration purposes only_ - - - - - diff --git a/book/src/http/lighthouse.md b/book/src/http/lighthouse.md deleted file mode 100644 index d80c0f694a5..00000000000 --- a/book/src/http/lighthouse.md +++ /dev/null @@ -1,182 +0,0 @@ -# Lighthouse REST API: `/lighthouse` - -The `/lighthouse` endpoints provide lighthouse-specific information about the beacon node. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/lighthouse/syncing`](#lighthousesyncing) | Get the node's syncing status -[`/lighthouse/peers`](#lighthousepeers) | Get the peers info known by the beacon node -[`/lighthouse/connected_peers`](#lighthousepeers) | Get the connected_peers known by the beacon node - -## `/lighthouse/syncing` - -Requests the syncing state of a Lighthouse beacon node. Lighthouse as a -custom sync protocol, this request gets Lighthouse-specific sync information. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/lighthouse/syncing` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -If the node is undergoing a finalization sync: -```json -{ - "SyncingFinalized": { - "start_slot": 10, - "head_slot": 20, - "head_root":"0x74020d0e3c3c02d2ea6279d5760f7d0dd376c4924beaaec4d5c0cefd1c0c4465" - } -} -``` - -If the node is undergoing a head chain sync: -```json -{ - "SyncingHead": { - "start_slot":0, - "head_slot":1195 - } -} -``` - -If the node is synced -```json -{ -"Synced" -} -``` - -## `/lighthouse/peers` - -Get all known peers info from the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/lighthouse/peers` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ -{ - "peer_id" : "16Uiu2HAmTEinipUS3haxqucrn7d7SmCKx5XzAVbAZCiNW54ncynG", - "peer_info" : { - "_status" : "Healthy", - "client" : { - "agent_string" : "github.com/libp2p/go-libp2p", - "kind" : "Prysm", - "os_version" : "unknown", - "protocol_version" : "ipfs/0.1.0", - "version" : "unknown" - }, - "connection_status" : { - "Disconnected" : { - "since" : 3 - } - }, - "listening_addresses" : [ - "/ip4/10.3.58.241/tcp/9001", - "/ip4/35.172.14.146/tcp/9001", - "/ip4/35.172.14.146/tcp/9001" - ], - "meta_data" : { - "attnets" : "0x0000000000000000", - "seq_number" : 0 - }, - "reputation" : 20, - "sync_status" : { - "Synced" : { - "status_head_slot" : 18146 - } - } - } - }, - { - "peer_id" : "16Uiu2HAm8XZfPv3YjktCjitSRtfS7UfHfEvpiUyHrdiX6uAD55xZ", - "peer_info" : { - "_status" : "Healthy", - "client" : { - "agent_string" : null, - "kind" : "Unknown", - "os_version" : "unknown", - "protocol_version" : "unknown", - "version" : "unknown" - }, - "connection_status" : { - "Disconnected" : { - "since" : 5 - } - }, - "listening_addresses" : [], - "meta_data" : { - "attnets" : "0x0900000000000000", - "seq_number" : 0 - }, - "reputation" : 20, - "sync_status" : "Unknown" - } - }, -] -``` - -## `/lighthouse/connected_peers` - -Get all known peers info from the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/lighthouse/connected_peers` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ - { - "peer_id" : "16Uiu2HAm8XZfPv3YjktCjitSRtfS7UfHfEvpiUyHrdiX6uAD55xZ", - "peer_info" : { - "_status" : "Healthy", - "client" : { - "agent_string" : null, - "kind" : "Unknown", - "os_version" : "unknown", - "protocol_version" : "unknown", - "version" : "unknown" - }, - "connection_status" : { - "Connected" : { - "in" : 5, - "out" : 2 - } - }, - "listening_addresses" : [], - "meta_data" : { - "attnets" : "0x0900000000000000", - "seq_number" : 0 - }, - "reputation" : 20, - "sync_status" : "Unknown" - } - }, - ] -``` diff --git a/book/src/http/network.md b/book/src/http/network.md deleted file mode 100644 index 2ac0c83ba49..00000000000 --- a/book/src/http/network.md +++ /dev/null @@ -1,148 +0,0 @@ -# Lighthouse REST API: `/network` - -The `/network` endpoints provide information about the p2p network that -Lighthouse uses to communicate with other beacon nodes. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/network/enr`](#networkenr) | Get the local node's `ENR` as base64 . -[`/network/peer_count`](#networkpeer_count) | Get the count of connected peers. -[`/network/peer_id`](#networkpeer_id) | Get a node's libp2p `PeerId`. -[`/network/peers`](#networkpeers) | List a node's connected peers (as `PeerIds`). -[`/network/listen_port`](#networklisten_port) | Get a node's libp2p listening port. -[`/network/listen_addresses`](#networklisten_addresses) | Get a list of libp2p multiaddr the node is listening on. - -## `network/enr` - -Requests the beacon node for its listening `ENR` address. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/enr` -Method | GET -JSON Encoding | String (base64) -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -"-IW4QPYyGkXJSuJ2Eji8b-m4PTNrW4YMdBsNOBrYAdCk8NLMJcddAiQlpcv6G_hdNjiLACOPTkqTBhUjnC0wtIIhyQkEgmlwhKwqAPqDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhA1sBKo0yCfw4Z_jbggwflNfftjwKACu-a-CoFAQHJnrm" -``` - -## `/network/peer_count` - -Requests the count of peers connected to the client. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/peer_count` -Method | GET -JSON Encoding | Number -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -5 -``` -## `/network/peer_id` - -Requests the beacon node's local `PeerId`. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/peer_id` -Method | GET -JSON Encoding | String (base58) -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -"QmVFcULBYZecPdCKgGmpEYDqJLqvMecfhJadVBtB371Avd" -``` - -## `/network/peers` - -Requests one `MultiAddr` for each peer connected to the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/peers` -Method | GET -JSON Encoding | [String] (base58) -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ - "QmaPGeXcfKFMU13d8VgbnnpeTxcvoFoD9bUpnRGMUJ1L9w", - "QmZt47cP8V96MgiS35WzHKpPbKVBMqr1eoBNTLhQPqpP3m" -] -``` - - -## `/network/listen_port` - -Requests the TCP port that the client's libp2p service is listening on. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/listen_port` -Method | GET -JSON Encoding | Number -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -9000 -``` - -## `/network/listen_addresses` - -Requests the list of multiaddr that the client's libp2p service is listening on. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/listen_addresses` -Method | GET -JSON Encoding | Array -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ - "/ip4/127.0.0.1/tcp/9000", - "/ip4/192.168.31.115/tcp/9000", - "/ip4/172.24.0.1/tcp/9000", - "/ip4/172.21.0.1/tcp/9000", - "/ip4/172.17.0.1/tcp/9000", - "/ip4/172.18.0.1/tcp/9000", - "/ip4/172.19.0.1/tcp/9000", - "/ip4/172.42.0.1/tcp/9000", - "/ip6/::1/tcp/9000" -] -``` diff --git a/book/src/http/node.md b/book/src/http/node.md deleted file mode 100644 index ae370cbe981..00000000000 --- a/book/src/http/node.md +++ /dev/null @@ -1,91 +0,0 @@ -# Lighthouse REST API: `/node` - -The `/node` endpoints provide information about the lighthouse beacon node. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/node/version`](#nodeversion) | Get the node's version. -[`/node/syncing`](#nodesyncing) | Get the node's syncing status. -[`/node/health`](#nodehealth) | Get the node's health. - -## `/node/version` - -Requests the beacon node's version. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/node/version` -Method | GET -JSON Encoding | String -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -"Lighthouse-0.2.0-unstable" -``` - -## `/node/syncing` - -Requests the syncing status of the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/node/syncing` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - is_syncing: true, - sync_status: { - starting_slot: 0, - current_slot: 100, - highest_slot: 200, - } -} -``` - -## `/node/health` - -Requests information about the health of the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/node/health` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "pid": 96160, - "pid_num_threads": 30, - "pid_mem_resident_set_size": 55476224, - "pid_mem_virtual_memory_size": 2081382400, - "sys_virt_mem_total": 16721076224, - "sys_virt_mem_available": 7423197184, - "sys_virt_mem_used": 8450183168, - "sys_virt_mem_free": 3496345600, - "sys_virt_mem_percent": 55.605743, - "sys_loadavg_1": 1.56, - "sys_loadavg_5": 2.61, - "sys_loadavg_15": 2.43 -} -``` diff --git a/book/src/http/spec.md b/book/src/http/spec.md deleted file mode 100644 index 619a1d4e362..00000000000 --- a/book/src/http/spec.md +++ /dev/null @@ -1,154 +0,0 @@ -# Lighthouse REST API: `/spec` - -The `/spec` endpoints provide information about Eth2.0 specifications that the node is running. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/spec`](#spec) | Get the full spec object that a node's running. -[`/spec/slots_per_epoch`](#specslots_per_epoch) | Get the number of slots per epoch. -[`/spec/eth2_config`](#specseth2_config) | Get the full Eth2 config object. - -## `/spec` - -Requests the full spec object that a node's running. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/spec` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "genesis_slot": 0, - "base_rewards_per_epoch": 4, - "deposit_contract_tree_depth": 32, - "max_committees_per_slot": 64, - "target_committee_size": 128, - "min_per_epoch_churn_limit": 4, - "churn_limit_quotient": 65536, - "shuffle_round_count": 90, - "min_genesis_active_validator_count": 16384, - "min_genesis_time": 1578009600, - "min_deposit_amount": 1000000000, - "max_effective_balance": 32000000000, - "ejection_balance": 16000000000, - "effective_balance_increment": 1000000000, - "genesis_fork_version": "0x00000000", - "bls_withdrawal_prefix_byte": "0x00", - "genesis_delay": 172800, - "milliseconds_per_slot": 12000, - "min_attestation_inclusion_delay": 1, - "min_seed_lookahead": 1, - "max_seed_lookahead": 4, - "min_epochs_to_inactivity_penalty": 4, - "min_validator_withdrawability_delay": 256, - "shard_committee_period": 2048, - "base_reward_factor": 64, - "whistleblower_reward_quotient": 512, - "proposer_reward_quotient": 8, - "inactivity_penalty_quotient": 33554432, - "min_slashing_penalty_quotient": 32, - "domain_beacon_proposer": 0, - "domain_beacon_attester": 1, - "domain_randao": 2, - "domain_deposit": 3, - "domain_voluntary_exit": 4, - "safe_slots_to_update_justified": 8, - "eth1_follow_distance": 1024, - "seconds_per_eth1_block": 14, - "boot_nodes": [], - "network_id": 1 -} -``` - -## `/spec/eth2_config` - -Requests the full `Eth2Config` object. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/spec/eth2_config` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "spec_constants": "mainnet", - "spec": { - "genesis_slot": 0, - "base_rewards_per_epoch": 4, - "deposit_contract_tree_depth": 32, - "max_committees_per_slot": 64, - "target_committee_size": 128, - "min_per_epoch_churn_limit": 4, - "churn_limit_quotient": 65536, - "shuffle_round_count": 90, - "min_genesis_active_validator_count": 16384, - "min_genesis_time": 1578009600, - "min_deposit_amount": 1000000000, - "max_effective_balance": 32000000000, - "ejection_balance": 16000000000, - "effective_balance_increment": 1000000000, - "genesis_fork_version": "0x00000000", - "bls_withdrawal_prefix_byte": "0x00", - "genesis_delay": 172800, - "milliseconds_per_slot": 12000, - "min_attestation_inclusion_delay": 1, - "min_seed_lookahead": 1, - "max_seed_lookahead": 4, - "min_epochs_to_inactivity_penalty": 4, - "min_validator_withdrawability_delay": 256, - "shard_committee_period": 2048, - "base_reward_factor": 64, - "whistleblower_reward_quotient": 512, - "proposer_reward_quotient": 8, - "inactivity_penalty_quotient": 33554432, - "min_slashing_penalty_quotient": 32, - "domain_beacon_proposer": 0, - "domain_beacon_attester": 1, - "domain_randao": 2, - "domain_deposit": 3, - "domain_voluntary_exit": 4, - "safe_slots_to_update_justified": 8, - "eth1_follow_distance": 1024, - "seconds_per_eth1_block": 14, - "boot_nodes": [], - "network_id": 1 - } -} -``` - -## `/spec/slots_per_epoch` - -Requests the `SLOTS_PER_EPOCH` parameter from the specs that the node is running. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/spec/slots_per_epoch` -Method | GET -JSON Encoding | Number -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -32 -``` \ No newline at end of file diff --git a/book/src/http/validator.md b/book/src/http/validator.md deleted file mode 100644 index eff0c609501..00000000000 --- a/book/src/http/validator.md +++ /dev/null @@ -1,545 +0,0 @@ -# Lighthouse REST API: `/validator` - -The `/validator` endpoints provide the minimum functionality required for a validator -client to connect to the beacon node and produce blocks and attestations. - -## Endpoints - -HTTP Path | HTTP Method | Description | -| - | - | ---- | -[`/validator/duties`](#validatorduties) | POST | Provides block and attestation production information for validators. -[`/validator/subscribe`](#validatorsubscribe) | POST | Subscribes a list of validators to the beacon node for a particular duty/slot. -[`/validator/duties/all`](#validatordutiesall) | GET |Provides block and attestation production information for all validators. -[`/validator/duties/active`](#validatordutiesactive) | GET | Provides block and attestation production information for all active validators. -[`/validator/block`](#validatorblock-get) | GET | Retrieves the current beacon block for the validator to publish. -[`/validator/block`](#validatorblock-post) | POST | Publishes a signed block to the network. -[`/validator/attestation`](#validatorattestation) | GET | Retrieves the current best attestation for a validator to publish. -[`/validator/aggregate_attestation`](#validatoraggregate_attestation) | GET | Gets an aggregate attestation for validators to sign and publish. -[`/validator/attestations`](#validatorattestations) | POST | Publishes a list of raw unaggregated attestations to their appropriate subnets. -[`/validator/aggregate_and_proofs`](#validatoraggregate_and_proofs) | POST | Publishes a list of Signed aggregate and proofs for validators who are aggregators. - -## `/validator/duties` - -Request information about when a validator must produce blocks and attestations -at some given `epoch`. The information returned always refers to the canonical -chain and the same input parameters may yield different results after a re-org. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/duties` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Request Body - -Expects the following object in the POST request body: - -``` -{ - epoch: Epoch, - pubkeys: [PublicKey] -} -``` - -Duties are assigned on a per-epoch basis, all duties returned will contain -slots that are inside the given `epoch`. A set of duties will be returned for -each of the `pubkeys`. - -Validators who are not known to the beacon chain (e.g., have not yet deposited) -will have `null` values for most fields. - - -### Returns - -A set of duties for each given pubkey. - -### Example - -#### Request Body - -```json -{ - "epoch": 1203, - "pubkeys": [ - "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42" - ] -} -``` - -_Note: for demonstration purposes the second pubkey is some unknown pubkey._ - -#### Response Body - -```json -[ - { - "validator_pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "validator_index": 14935, - "attestation_slot": 38511, - "attestation_committee_index": 3, - "attestation_committee_position": 39, - "block_proposal_slots": [], - "aggregator_modulo": 5, - }, - { - "validator_pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42", - "validator_index": null, - "attestation_slot": null, - "attestation_committee_index": null, - "attestation_committee_position": null, - "block_proposal_slots": [] - "aggregator_modulo": null, - } -] -``` - -## `/validator/duties/all` - -Returns the duties for all validators, equivalent to calling [Validator -Duties](#validator-duties) while providing all known validator public keys. - -Considering that duties for non-active validators will just be `null`, it is -generally more efficient to query using [Active Validator -Duties](#active-validator-duties). - -This endpoint will only return validators that were in the beacon state -in the given epoch. For example, if the query epoch is 10 and some validator -deposit was included in epoch 11, that validator will not be included in the -result. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/duties/all` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200 - -### Parameters - -The duties returned will all be inside the given `epoch` (`Epoch`) query -parameter. This parameter is required. - -### Returns - -The return format is identical to the [Validator Duties](#validator-duties) response body. - -## `/validator/duties/active` - -Returns the duties for all active validators, equivalent to calling [Validator -Duties](#validator-duties) while providing all known validator public keys that -are active in the given epoch. - -This endpoint will only return validators that were in the beacon state -in the given epoch. For example, if the query epoch is 10 and some validator -deposit was included in epoch 11, that validator will not be included in the -result. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/duties/active` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200 - -### Parameters - -The duties returned will all be inside the given `epoch` (`Epoch`) query -parameter. This parameter is required. - -### Returns - -The return format is identical to the [Validator Duties](#validator-duties) response body. - -## `/validator/subscribe` - -Posts a list of `ValidatorSubscription` to subscribe validators to -particular slots to perform attestation duties. - -This informs the beacon node to search for peers and subscribe to -required attestation subnets to perform the attestation duties required. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/subscribe` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Request Body - -Expects the following object in the POST request body: - -``` -[ - { - validator_index: 10, - attestation_committee_index: 12, - slot: 3, - is_aggregator: true - } -] -``` - -The `is_aggregator` informs the beacon node if the validator is an aggregator -for this slot/committee. - -### Returns - -A null object on success and an error indicating any failures. - -## `/validator/block` GET - - -Produces and returns an unsigned `BeaconBlock` object. - -The block will be produced with the given `slot` and the parent block will be the -highest block in the canonical chain that has a slot less than `slot`. The -block will still be produced if some other block is also known to be at `slot` -(i.e., it may produce a block that would be slashable if signed). - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/block` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `randao_reveal` -Typical Responses | 200 - -### Parameters - - -- `slot` (`Slot`): The slot number for which the block is to be produced. -- `randao_reveal` (`Signature`): 96 bytes `Signature` for the randomness. - - -### Returns - -Returns a `BeaconBlock` object. - -#### Response Body - -```json -{ - "slot": 33, - "parent_root": "0xf54de54bd33e33aee4706cffff4bd991bcbf522f2551ab007180479c63f4fe912", - "state_root": "0x615c887bad27bc05754d627d941e1730e1b4c77b2eb4378c195ac8a8203bbf26", - "body": { - "randao_reveal": "0x8d7b2a32b026e9c79aae6ec6b83eabae89d60cacd65ac41ed7d2f4be9dd8c89c1bf7cd3d700374e18d03d12f6a054c23006f64f0e4e8b7cf37d6ac9a4c7d815c858120c54673b7d3cb2bb1550a4d659eaf46e34515677c678b70d6f62dbf89f", - "eth1_data": { - "deposit_root": "0x66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925", - "deposit_count": 8, - "block_hash": "0x2b32db6c2c0a6235fb1397e8225ea85e0f0e6e8c7b126d0016ccbde0e667151e" - }, - "graffiti": "0x736967702f6c69676874686f7573652d302e312e312d7076572656c65617365", - "proposer_slashings": [], - "attester_slashings": [], - "attestations": [], - "deposits": [], - "voluntary_exits": [] - } -} -``` - -## `/validator/block` POST - -Accepts a `SignedBeaconBlock` for verification. If it is valid, it will be -imported into the local database and published on the network. Invalid blocks -will not be published to the network. - -A block may be considered invalid because it is fundamentally incorrect, or its -parent has not yet been imported. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/block` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/202 - - -### Request Body - -Expects a JSON encoded `SignedBeaconBlock` in the POST request body: - -### Returns - -Returns a null object if the block passed all block validation and is published to the network. -Else, returns a processing error description. - -### Example - -### Request Body - -```json -{ - "message": { - "slot": 33, - "parent_root": "0xf54de54bd33e33aee4706cffff4bd991bcbf522f2551ab007180479c63f4fe912", - "state_root": "0x615c887bad27bc05754d627d941e1730e1b4c77b2eb4378c195ac8a8203bbf26", - "body": { - "randao_reveal": "0x8d7b2a32b026e9c79aae6ec6b83eabae89d60cacd65ac41ed7d2f4be9dd8c89c1bf7cd3d700374e18d03d12f6a054c23006f64f0e4e8b7cf37d6ac9a4c7d815c858120c54673b7d3cb2bb1550a4d659eaf46e34515677c678b70d6f62dbf89f", - "eth1_data": { - "deposit_root": "0x66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925", - "deposit_count": 8, - "block_hash": "0x2b32db6c2c0a6235fb1397e8225ea85e0f0e6e8c7b126d0016ccbde0e667151e" - }, - "graffiti": "0x736967702f6c69676874686f7573652d302e312e312d7076572656c65617365", - "proposer_slashings": [ - - ], - "attester_slashings": [ - - ], - "attestations": [ - - ], - "deposits": [ - - ], - "voluntary_exits": [ - - ] - } - }, - "signature": "0x965ced900dbabd0a78b81a0abb5d03407be0d38762104316416347f2ea6f82652b5759396f402e85df8ee18ba2c60145037c73b1c335f4272f1751a1cd89862b7b4937c035e350d0108554bd4a8930437ec3311c801a65fe8e5ba022689b5c24" -} -``` - -## `/validator/attestation` - -Produces and returns an unsigned `Attestation` from the current state. - -The attestation will reference the `beacon_block_root` of the highest block in -the canonical chain with a slot equal to or less than the given `slot`. - -An error will be returned if the given slot is more than -`SLOTS_PER_HISTORICAL_VECTOR` slots behind the current head block. - -This endpoint is not protected against slashing. Signing the returned -attestation may result in a slashable offence. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/attestation` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `committee_index` -Typical Responses | 200 - -### Parameters - - -- `slot` (`Slot`): The slot number for which the attestation is to be produced. -- `committee_index` (`CommitteeIndex`): The index of the committee that makes the attestation. - - -### Returns - -Returns a `Attestation` object with a default signature. The `signature` field should be replaced by the valid signature. - -#### Response Body - -```json -{ - "aggregation_bits": "0x01", - "data": { - "slot": 100, - "index": 0, - "beacon_block_root": "0xf22e4ec281136d119eabcd4d9d248aeacd042eb63d8d7642f73ad3e71f1c9283", - "source": { - "epoch": 2, - "root": "0x34c1244535c923f08e7f83170d41a076e4f1ec61013846b3a615a1d109d3c329" - }, - "target": { - "epoch": 3, - "root": "0xaefd23b384994dc0c1a6b77836bdb2f24f209ebfe6c4819324d9685f4a43b4e1" - } - }, - "signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" -} -``` - - - -## `/validator/aggregate_attestation` - -Requests an `AggregateAttestation` from the beacon node that has a -specific `attestation.data`. If no aggregate attestation is known this will -return a null object. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/aggregate_attestation` -Method | GET -JSON Encoding | Object -Query Parameters | `attestation_data` -Typical Responses | 200 - -### Returns - -Returns a null object if the attestation data passed is not known to the beacon -node. - -### Example - -### Request Body - -```json -{ - "aggregation_bits": "0x03", - "data": { - "slot": 3, - "index": 0, - "beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9", - "source": { - "epoch": 0, - "root": "0x0000000000000000000000000000000000000000000000000000000000000000" - }, - "target": { - "epoch": 0, - "root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd" - } - }, - "signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03$649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" -} -``` - - -## `/validator/attestations` - -Accepts a list of `Attestation` for verification. If they are valid, they will be imported -into the local database and published to the network. Invalid attestations will -not be published to the network. - -An attestation may be considered invalid because it is fundamentally incorrect -or because the beacon node has not imported the relevant blocks required to -verify it. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/attestations` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/202 - - -### Request Body - -Expects a JSON encoded list of signed `Attestation` objects in the POST request body. In -accordance with the naive aggregation scheme, the attestation _must_ have -exactly one of the `attestation.aggregation_bits` fields set. - -### Returns - -Returns a null object if the attestation passed all validation and is published to the network. -Else, returns a processing error description. - -### Example - -### Request Body - -```json -{ - "aggregation_bits": "0x03", - "data": { - "slot": 3, - "index": 0, - "beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9", - "source": { - "epoch": 0, - "root": "0x0000000000000000000000000000000000000000000000000000000000000000" - }, - "target": { - "epoch": 0, - "root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd" - } - }, - "signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03$649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" -} -``` - -## `/validator/aggregate_and_proofs` - -Accepts a list of `SignedAggregateAndProof` for publication. If they are valid -(the validator is an aggregator and the signatures can be verified) these -are published to the network on the global aggregate gossip topic. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/aggregate_and_proofs` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/202 - -### Request Body - -Expects a JSON encoded list of `SignedAggregateAndProof` objects in the POST request body. - -### Returns - -Returns a null object if the attestation passed all validation and is published to the network. -Else, returns a processing error description. - -### Example - -### Request Body - -```json -[ - { - "message": { - "aggregator_index": 12, - "aggregate": { - "aggregation_bits": "0x03", - "data": { - "slot": 3, - "index": 0, - "beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9", - "source": { - "epoch": 0, - "root": "0x0000000000000000000000000000000000000000000000000000000000000000" - }, - "target": { - "epoch": 0, - "root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd" - } - }, - "signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" - }, - "selection_proof": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" - } - signature: "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" - } -] -``` -_Note: The data in this request is for demonstrating types and does not -contain real data_ diff --git a/book/src/http/consensus.md b/book/src/validator-inclusion.md similarity index 52% rename from book/src/http/consensus.md rename to book/src/validator-inclusion.md index c71b78ce3e9..ce8e61cafee 100644 --- a/book/src/http/consensus.md +++ b/book/src/validator-inclusion.md @@ -1,16 +1,21 @@ -# Lighthouse REST API: `/consensus` +# Validator Inclusion APIs -The `/consensus` endpoints provide information on results of the proof-of-stake -voting process used for finality/justification under Casper FFG. +The `/lighthouse/validator_inclusion` API endpoints provide information on +results of the proof-of-stake voting process used for finality/justification +under Casper FFG. + +These endpoints are not stable or included in the Eth2 standard API. As such, +they are subject to change or removal without a change in major release +version. ## Endpoints HTTP Path | Description | | --- | -- | -[`/consensus/global_votes`](#consensusglobal_votes) | A global vote count for a given epoch. -[`/consensus/individual_votes`](#consensusindividual_votes) | A per-validator breakdown of votes in a given epoch. +[`/lighthouse/validator_inclusion/{epoch}/global`](#global) | A global vote count for a given epoch. +[`/lighthouse/validator_inclusion/{epoch}/{validator_id}`](#individual) | A per-validator breakdown of votes in a given epoch. -## `/consensus/global_votes` +## Global Returns a global count of votes for some given `epoch`. The results are included both for the current and previous (`epoch - 1`) epochs since both are required @@ -75,40 +80,27 @@ voting upon the previous epoch included in a block. When this value is greater than or equal to `2/3` it is possible that the beacon chain may justify and/or finalize the epoch. -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/consensus/global_votes` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200 - -### Parameters - -Requires the `epoch` (`Epoch`) query parameter to determine which epoch will be -considered the current epoch. - -### Returns - -A report on global validator voting participation. +### HTTP Example -### Example +```bash +curl -X GET "http://localhost:5052/lighthouse/validator_inclusion/0/global" -H "accept: application/json" | jq +``` ```json { - "current_epoch_active_gwei": 52377600000000, - "previous_epoch_active_gwei": 52377600000000, - "current_epoch_attesting_gwei": 50740900000000, - "current_epoch_target_attesting_gwei": 49526000000000, - "previous_epoch_attesting_gwei": 52377600000000, - "previous_epoch_target_attesting_gwei": 51063400000000, - "previous_epoch_head_attesting_gwei": 9248600000000 + "data": { + "current_epoch_active_gwei": 642688000000000, + "previous_epoch_active_gwei": 642688000000000, + "current_epoch_attesting_gwei": 366208000000000, + "current_epoch_target_attesting_gwei": 366208000000000, + "previous_epoch_attesting_gwei": 1000000000, + "previous_epoch_target_attesting_gwei": 1000000000, + "previous_epoch_head_attesting_gwei": 1000000000 + } } ``` -## `/consensus/individual_votes` +## Individual Returns a per-validator summary of how that validator performed during the current epoch. @@ -117,73 +109,26 @@ The [Global Votes](#consensusglobal_votes) endpoint is the summation of all of t individual values, please see it for definitions of terms like "current_epoch", "previous_epoch" and "target_attester". -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/consensus/individual_votes` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Request Body -Expects the following object in the POST request body: +### HTTP Example +```bash +curl -X GET "http://localhost:5052/lighthouse/validator_inclusion/0/42" -H "accept: application/json" | jq ``` -{ - epoch: Epoch, - pubkeys: [PublicKey] -} -``` - -### Returns - -A report on the validators voting participation. - -### Example - -#### Request Body ```json { - "epoch": 1203, - "pubkeys": [ - "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42" - ] + "data": { + "is_slashed": false, + "is_withdrawable_in_current_epoch": false, + "is_active_in_current_epoch": true, + "is_active_in_previous_epoch": true, + "current_epoch_effective_balance_gwei": 32000000000, + "is_current_epoch_attester": false, + "is_current_epoch_target_attester": false, + "is_previous_epoch_attester": false, + "is_previous_epoch_target_attester": false, + "is_previous_epoch_head_attester": false + } } ``` - -_Note: for demonstration purposes the second pubkey is some unknown pubkey._ - -#### Response Body - -```json -[ - { - "epoch": 1203, - "pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "validator_index": 14935, - "vote": { - "is_slashed": false, - "is_withdrawable_in_current_epoch": false, - "is_active_in_current_epoch": true, - "is_active_in_previous_epoch": true, - "current_epoch_effective_balance_gwei": 3200000000, - "is_current_epoch_attester": true, - "is_current_epoch_target_attester": true, - "is_previous_epoch_attester": true, - "is_previous_epoch_target_attester": true, - "is_previous_epoch_head_attester": false - } - }, - { - "epoch": 1203, - "pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42", - "validator_index": null, - "vote": null - } -] -``` diff --git a/book/src/websockets.md b/book/src/websockets.md deleted file mode 100644 index 69cf0e18d69..00000000000 --- a/book/src/websockets.md +++ /dev/null @@ -1,111 +0,0 @@ -# Websocket API - -**Note: the WebSocket server _only_ emits events. It does not accept any -requests. Use the [HTTP API](./http.md) for requests.** - -By default, a Lighthouse `beacon_node` exposes a websocket server on `localhost:5053`. - -The following CLI flags control the websocket server: - -- `--no-ws`: disable the websocket server. -- `--ws-port`: specify the listen port of the server. -- `--ws-address`: specify the listen address of the server. - -All clients connected to the websocket server will receive the same stream of events, all triggered -by the `BeaconChain`. Each event is a JSON object with the following schema: - -```json -{ - "event": "string", - "data": "object" -} -``` - -## Events - -The following events may be emitted: - -### Beacon Head Changed - -Occurs whenever the canonical head of the beacon chain changes. - -```json -{ - "event": "beacon_head_changed", - "data": { - "reorg": "boolean", - "current_head_beacon_block_root": "string", - "previous_head_beacon_block_root": "string" - } -} -``` - -### Beacon Finalization - -Occurs whenever the finalized checkpoint of the canonical head changes. - -```json -{ - "event": "beacon_finalization", - "data": { - "epoch": "number", - "root": "string" - } -} -``` - -### Beacon Block Imported - -Occurs whenever the beacon node imports a valid block. - -```json -{ - "event": "beacon_block_imported", - "data": { - "block": "object" - } -} -``` - -### Beacon Block Rejected - -Occurs whenever the beacon node rejects a block because it is invalid or an -error occurred during validation. - -```json -{ - "event": "beacon_block_rejected", - "data": { - "reason": "string", - "block": "object" - } -} -``` - -### Beacon Attestation Imported - -Occurs whenever the beacon node imports a valid attestation. - -```json -{ - "event": "beacon_attestation_imported", - "data": { - "attestation": "object" - } -} -``` - -### Beacon Attestation Rejected - -Occurs whenever the beacon node rejects an attestation because it is invalid or -an error occurred during validation. - -```json -{ - "event": "beacon_attestation_rejected", - "data": { - "reason": "string", - "attestation": "object" - } -} -``` diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml new file mode 100644 index 00000000000..f7ccfcf34eb --- /dev/null +++ b/common/eth2/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "eth2" +version = "0.1.0" +authors = ["Paul Hauner <paul@paulhauner.com>"] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +serde = { version = "1.0.110", features = ["derive"] } +serde_json = "1.0.52" +types = { path = "../../consensus/types" } +hex = "0.4.2" +reqwest = { version = "0.10.8", features = ["json"] } +eth2_libp2p = { path = "../../beacon_node/eth2_libp2p" } +proto_array = { path = "../../consensus/proto_array", optional = true } +serde_utils = { path = "../../consensus/serde_utils" } + +[target.'cfg(target_os = "linux")'.dependencies] +psutil = { version = "3.1.0", optional = true } +procinfo = { version = "0.4.2", optional = true } + +[features] +default = ["lighthouse"] +lighthouse = ["proto_array", "psutil", "procinfo"] diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs new file mode 100644 index 00000000000..b0fbc2566b1 --- /dev/null +++ b/common/eth2/src/lib.rs @@ -0,0 +1,784 @@ +//! This crate provides two major things: +//! +//! 1. The types served by the `http_api` crate. +//! 2. A wrapper around `reqwest` that forms a HTTP client, able of consuming the endpoints served +//! by the `http_api` crate. +//! +//! Eventually it would be ideal to publish this crate on crates.io, however we have some local +//! dependencies preventing this presently. + +#[cfg(feature = "lighthouse")] +pub mod lighthouse; +pub mod types; + +use self::types::*; +use reqwest::{IntoUrl, Response}; +use serde::{de::DeserializeOwned, Serialize}; +use std::convert::TryFrom; +use std::fmt; + +pub use reqwest; +pub use reqwest::{StatusCode, Url}; + +#[derive(Debug)] +pub enum Error { + /// The `reqwest` client raised an error. + Reqwest(reqwest::Error), + /// The server returned an error message where the body was able to be parsed. + ServerMessage(ErrorMessage), + /// The server returned an error message where the body was unable to be parsed. + StatusCode(StatusCode), + /// The supplied URL is badly formatted. It should look something like `http://127.0.0.1:5052`. + InvalidUrl(Url), +} + +impl Error { + /// If the error has a HTTP status code, return it. + pub fn status(&self) -> Option<StatusCode> { + match self { + Error::Reqwest(error) => error.status(), + Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(), + Error::StatusCode(status) => Some(*status), + Error::InvalidUrl(_) => None, + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a +/// Lighthouse Beacon Node HTTP server (`http_api`). +#[derive(Clone)] +pub struct BeaconNodeHttpClient { + client: reqwest::Client, + server: Url, +} + +impl BeaconNodeHttpClient { + pub fn new(server: Url) -> Self { + Self { + client: reqwest::Client::new(), + server, + } + } + + pub fn from_components(server: Url, client: reqwest::Client) -> Self { + Self { client, server } + } + + /// Return the path with the standard `/eth1/v1` prefix applied. + fn eth_path(&self) -> Result<Url, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1"); + + Ok(path) + } + + /// Perform a HTTP GET request. + async fn get<T: DeserializeOwned, U: IntoUrl>(&self, url: U) -> Result<T, Error> { + let response = self.client.get(url).send().await.map_err(Error::Reqwest)?; + ok_or_error(response) + .await? + .json() + .await + .map_err(Error::Reqwest) + } + + /// Perform a HTTP GET request, returning `None` on a 404 error. + async fn get_opt<T: DeserializeOwned, U: IntoUrl>(&self, url: U) -> Result<Option<T>, Error> { + let response = self.client.get(url).send().await.map_err(Error::Reqwest)?; + match ok_or_error(response).await { + Ok(resp) => resp.json().await.map(Option::Some).map_err(Error::Reqwest), + Err(err) => { + if err.status() == Some(StatusCode::NOT_FOUND) { + Ok(None) + } else { + Err(err) + } + } + } + } + + /// Perform a HTTP POST request. + async fn post<T: Serialize, U: IntoUrl>(&self, url: U, body: &T) -> Result<(), Error> { + let response = self + .client + .post(url) + .json(body) + .send() + .await + .map_err(Error::Reqwest)?; + ok_or_error(response).await?; + Ok(()) + } + + /// `GET beacon/genesis` + /// + /// ## Errors + /// + /// May return a `404` if beacon chain genesis has not yet occurred. + pub async fn get_beacon_genesis(&self) -> Result<GenericResponse<GenesisData>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("genesis"); + + self.get(path).await + } + + /// `GET beacon/states/{state_id}/root` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_root( + &self, + state_id: StateId, + ) -> Result<Option<GenericResponse<RootData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("root"); + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/fork` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_fork( + &self, + state_id: StateId, + ) -> Result<Option<GenericResponse<Fork>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("fork"); + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/finality_checkpoints` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_finality_checkpoints( + &self, + state_id: StateId, + ) -> Result<Option<GenericResponse<FinalityCheckpointsData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("finality_checkpoints"); + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/validators` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_validators( + &self, + state_id: StateId, + ) -> Result<Option<GenericResponse<Vec<ValidatorData>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("validators"); + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/committees?slot,index` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_committees( + &self, + state_id: StateId, + epoch: Epoch, + slot: Option<Slot>, + index: Option<u64>, + ) -> Result<Option<GenericResponse<Vec<CommitteeData>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("committees") + .push(&epoch.to_string()); + + if let Some(slot) = slot { + path.query_pairs_mut() + .append_pair("slot", &slot.to_string()); + } + + if let Some(index) = index { + path.query_pairs_mut() + .append_pair("index", &index.to_string()); + } + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/validators/{validator_id}` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_validator_id( + &self, + state_id: StateId, + validator_id: &ValidatorId, + ) -> Result<Option<GenericResponse<ValidatorData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("validators") + .push(&validator_id.to_string()); + + self.get_opt(path).await + } + + /// `GET beacon/headers?slot,parent_root` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_headers( + &self, + slot: Option<Slot>, + parent_root: Option<Hash256>, + ) -> Result<Option<GenericResponse<Vec<BlockHeaderData>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("headers"); + + if let Some(slot) = slot { + path.query_pairs_mut() + .append_pair("slot", &slot.to_string()); + } + + if let Some(root) = parent_root { + path.query_pairs_mut() + .append_pair("parent_root", &format!("{:?}", root)); + } + + self.get_opt(path).await + } + + /// `GET beacon/headers/{block_id}` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_headers_block_id( + &self, + block_id: BlockId, + ) -> Result<Option<GenericResponse<BlockHeaderData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("headers") + .push(&block_id.to_string()); + + self.get_opt(path).await + } + + /// `POST beacon/blocks` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn post_beacon_blocks<T: EthSpec>( + &self, + block: &SignedBeaconBlock<T>, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks"); + + self.post(path, block).await?; + + Ok(()) + } + + /// `GET beacon/blocks` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blocks<T: EthSpec>( + &self, + block_id: BlockId, + ) -> Result<Option<GenericResponse<SignedBeaconBlock<T>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks") + .push(&block_id.to_string()); + + self.get_opt(path).await + } + + /// `GET beacon/blocks/{block_id}/root` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blocks_root( + &self, + block_id: BlockId, + ) -> Result<Option<GenericResponse<RootData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks") + .push(&block_id.to_string()) + .push("root"); + + self.get_opt(path).await + } + + /// `GET beacon/blocks/{block_id}/attestations` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blocks_attestations<T: EthSpec>( + &self, + block_id: BlockId, + ) -> Result<Option<GenericResponse<Vec<Attestation<T>>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks") + .push(&block_id.to_string()) + .push("attestations"); + + self.get_opt(path).await + } + + /// `POST beacon/pool/attestations` + pub async fn post_beacon_pool_attestations<T: EthSpec>( + &self, + attestation: &Attestation<T>, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attestations"); + + self.post(path, attestation).await?; + + Ok(()) + } + + /// `GET beacon/pool/attestations` + pub async fn get_beacon_pool_attestations<T: EthSpec>( + &self, + ) -> Result<GenericResponse<Vec<Attestation<T>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attestations"); + + self.get(path).await + } + + /// `POST beacon/pool/attester_slashings` + pub async fn post_beacon_pool_attester_slashings<T: EthSpec>( + &self, + slashing: &AttesterSlashing<T>, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attester_slashings"); + + self.post(path, slashing).await?; + + Ok(()) + } + + /// `GET beacon/pool/attester_slashings` + pub async fn get_beacon_pool_attester_slashings<T: EthSpec>( + &self, + ) -> Result<GenericResponse<Vec<AttesterSlashing<T>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attester_slashings"); + + self.get(path).await + } + + /// `POST beacon/pool/proposer_slashings` + pub async fn post_beacon_pool_proposer_slashings( + &self, + slashing: &ProposerSlashing, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("proposer_slashings"); + + self.post(path, slashing).await?; + + Ok(()) + } + + /// `GET beacon/pool/proposer_slashings` + pub async fn get_beacon_pool_proposer_slashings( + &self, + ) -> Result<GenericResponse<Vec<ProposerSlashing>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("proposer_slashings"); + + self.get(path).await + } + + /// `POST beacon/pool/voluntary_exits` + pub async fn post_beacon_pool_voluntary_exits( + &self, + exit: &SignedVoluntaryExit, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("voluntary_exits"); + + self.post(path, exit).await?; + + Ok(()) + } + + /// `GET beacon/pool/voluntary_exits` + pub async fn get_beacon_pool_voluntary_exits( + &self, + ) -> Result<GenericResponse<Vec<SignedVoluntaryExit>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("voluntary_exits"); + + self.get(path).await + } + + /// `GET config/fork_schedule` + pub async fn get_config_fork_schedule(&self) -> Result<GenericResponse<Vec<Fork>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("config") + .push("fork_schedule"); + + self.get(path).await + } + + /// `GET config/fork_schedule` + pub async fn get_config_spec(&self) -> Result<GenericResponse<YamlConfig>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("config") + .push("spec"); + + self.get(path).await + } + + /// `GET config/deposit_contract` + pub async fn get_config_deposit_contract( + &self, + ) -> Result<GenericResponse<DepositContractData>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("config") + .push("deposit_contract"); + + self.get(path).await + } + + /// `GET node/version` + pub async fn get_node_version(&self) -> Result<GenericResponse<VersionData>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("node") + .push("version"); + + self.get(path).await + } + + /// `GET node/syncing` + pub async fn get_node_syncing(&self) -> Result<GenericResponse<SyncingData>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("node") + .push("syncing"); + + self.get(path).await + } + + /// `GET debug/beacon/states/{state_id}` + pub async fn get_debug_beacon_states<T: EthSpec>( + &self, + state_id: StateId, + ) -> Result<Option<GenericResponse<BeaconState<T>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("debug") + .push("beacon") + .push("states") + .push(&state_id.to_string()); + + self.get_opt(path).await + } + + /// `GET debug/beacon/heads` + pub async fn get_debug_beacon_heads( + &self, + ) -> Result<GenericResponse<Vec<ChainHeadData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("debug") + .push("beacon") + .push("heads"); + + self.get(path).await + } + + /// `GET validator/duties/attester/{epoch}?index` + /// + /// ## Note + /// + /// The `index` query parameter accepts a list of validator indices. + pub async fn get_validator_duties_attester( + &self, + epoch: Epoch, + index: Option<&[u64]>, + ) -> Result<GenericResponse<Vec<AttesterData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("duties") + .push("attester") + .push(&epoch.to_string()); + + if let Some(index) = index { + let string = index + .iter() + .map(|i| i.to_string()) + .collect::<Vec<_>>() + .join(","); + path.query_pairs_mut().append_pair("index", &string); + } + + self.get(path).await + } + + /// `GET validator/duties/proposer/{epoch}` + pub async fn get_validator_duties_proposer( + &self, + epoch: Epoch, + ) -> Result<GenericResponse<Vec<ProposerData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("duties") + .push("proposer") + .push(&epoch.to_string()); + + self.get(path).await + } + + /// `GET validator/duties/attester/{epoch}?index` + /// + /// ## Note + /// + /// The `index` query parameter accepts a list of validator indices. + pub async fn get_validator_blocks<T: EthSpec>( + &self, + slot: Slot, + randao_reveal: SignatureBytes, + graffiti: Option<&Graffiti>, + ) -> Result<GenericResponse<BeaconBlock<T>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("blocks") + .push(&slot.to_string()); + + path.query_pairs_mut() + .append_pair("randao_reveal", &randao_reveal.to_string()); + + if let Some(graffiti) = graffiti { + path.query_pairs_mut() + .append_pair("graffiti", &graffiti.to_string()); + } + + self.get(path).await + } + + /// `GET validator/attestation_data?slot,committee_index` + pub async fn get_validator_attestation_data( + &self, + slot: Slot, + committee_index: CommitteeIndex, + ) -> Result<GenericResponse<AttestationData>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("attestation_data"); + + path.query_pairs_mut() + .append_pair("slot", &slot.to_string()) + .append_pair("committee_index", &committee_index.to_string()); + + self.get(path).await + } + + /// `GET validator/attestation_attestation?slot,attestation_data_root` + pub async fn get_validator_aggregate_attestation<T: EthSpec>( + &self, + slot: Slot, + attestation_data_root: Hash256, + ) -> Result<Option<GenericResponse<Attestation<T>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("aggregate_attestation"); + + path.query_pairs_mut() + .append_pair("slot", &slot.to_string()) + .append_pair( + "attestation_data_root", + &format!("{:?}", attestation_data_root), + ); + + self.get_opt(path).await + } + + /// `POST validator/aggregate_and_proofs` + pub async fn post_validator_aggregate_and_proof<T: EthSpec>( + &self, + aggregate: &SignedAggregateAndProof<T>, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("aggregate_and_proofs"); + + self.post(path, aggregate).await?; + + Ok(()) + } + + /// `POST validator/beacon_committee_subscriptions` + pub async fn post_validator_beacon_committee_subscriptions( + &self, + subscriptions: &[BeaconCommitteeSubscription], + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("beacon_committee_subscriptions"); + + self.post(path, &subscriptions).await?; + + Ok(()) + } +} + +/// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an +/// appropriate error message. +async fn ok_or_error(response: Response) -> Result<Response, Error> { + let status = response.status(); + + if status == StatusCode::OK { + Ok(response) + } else if let Ok(message) = response.json().await { + Err(Error::ServerMessage(message)) + } else { + Err(Error::StatusCode(status)) + } +} diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs new file mode 100644 index 00000000000..8bfbad84ecf --- /dev/null +++ b/common/eth2/src/lighthouse.rs @@ -0,0 +1,224 @@ +//! This module contains endpoints that are non-standard and only available on Lighthouse servers. + +use crate::{ + types::{Epoch, EthSpec, GenericResponse, ValidatorId}, + BeaconNodeHttpClient, Error, +}; +use proto_array::core::ProtoArray; +use serde::{Deserialize, Serialize}; + +pub use eth2_libp2p::{types::SyncState, PeerInfo}; + +/// Information returned by `peers` and `connected_peers`. +// TODO: this should be deserializable.. +#[derive(Debug, Clone, Serialize)] +#[serde(bound = "T: EthSpec")] +pub struct Peer<T: EthSpec> { + /// The Peer's ID + pub peer_id: String, + /// The PeerInfo associated with the peer. + pub peer_info: PeerInfo<T>, +} + +/// The results of validators voting during an epoch. +/// +/// Provides information about the current and previous epochs. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct GlobalValidatorInclusionData { + /// The total effective balance of all active validators during the _current_ epoch. + pub current_epoch_active_gwei: u64, + /// The total effective balance of all active validators during the _previous_ epoch. + pub previous_epoch_active_gwei: u64, + /// The total effective balance of all validators who attested during the _current_ epoch. + pub current_epoch_attesting_gwei: u64, + /// The total effective balance of all validators who attested during the _current_ epoch and + /// agreed with the state about the beacon block at the first slot of the _current_ epoch. + pub current_epoch_target_attesting_gwei: u64, + /// The total effective balance of all validators who attested during the _previous_ epoch. + pub previous_epoch_attesting_gwei: u64, + /// The total effective balance of all validators who attested during the _previous_ epoch and + /// agreed with the state about the beacon block at the first slot of the _previous_ epoch. + pub previous_epoch_target_attesting_gwei: u64, + /// The total effective balance of all validators who attested during the _previous_ epoch and + /// agreed with the state about the beacon block at the time of attestation. + pub previous_epoch_head_attesting_gwei: u64, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ValidatorInclusionData { + /// True if the validator has been slashed, ever. + pub is_slashed: bool, + /// True if the validator can withdraw in the current epoch. + pub is_withdrawable_in_current_epoch: bool, + /// True if the validator was active in the state's _current_ epoch. + pub is_active_in_current_epoch: bool, + /// True if the validator was active in the state's _previous_ epoch. + pub is_active_in_previous_epoch: bool, + /// The validator's effective balance in the _current_ epoch. + pub current_epoch_effective_balance_gwei: u64, + /// True if the validator had an attestation included in the _current_ epoch. + pub is_current_epoch_attester: bool, + /// True if the validator's beacon block root attestation for the first slot of the _current_ + /// epoch matches the block root known to the state. + pub is_current_epoch_target_attester: bool, + /// True if the validator had an attestation included in the _previous_ epoch. + pub is_previous_epoch_attester: bool, + /// True if the validator's beacon block root attestation for the first slot of the _previous_ + /// epoch matches the block root known to the state. + pub is_previous_epoch_target_attester: bool, + /// True if the validator's beacon block root attestation in the _previous_ epoch at the + /// attestation's slot (`attestation_data.slot`) matches the block root known to the state. + pub is_previous_epoch_head_attester: bool, +} + +#[cfg(target_os = "linux")] +use {procinfo::pid, psutil::process::Process}; + +/// Reports on the health of the Lighthouse instance. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Health { + /// The pid of this process. + pub pid: u32, + /// The number of threads used by this pid. + pub pid_num_threads: i32, + /// The total resident memory used by this pid. + pub pid_mem_resident_set_size: u64, + /// The total virtual memory used by this pid. + pub pid_mem_virtual_memory_size: u64, + /// Total virtual memory on the system + pub sys_virt_mem_total: u64, + /// Total virtual memory available for new processes. + pub sys_virt_mem_available: u64, + /// Total virtual memory used on the system + pub sys_virt_mem_used: u64, + /// Total virtual memory not used on the system + pub sys_virt_mem_free: u64, + /// Percentage of virtual memory used on the system + pub sys_virt_mem_percent: f32, + /// System load average over 1 minute. + pub sys_loadavg_1: f64, + /// System load average over 5 minutes. + pub sys_loadavg_5: f64, + /// System load average over 15 minutes. + pub sys_loadavg_15: f64, +} + +impl Health { + #[cfg(not(target_os = "linux"))] + pub fn observe() -> Result<Self, String> { + Err("Health is only available on Linux".into()) + } + + #[cfg(target_os = "linux")] + pub fn observe() -> Result<Self, String> { + let process = + Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?; + + let process_mem = process + .memory_info() + .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; + + let stat = pid::stat_self().map_err(|e| format!("Unable to get stat: {:?}", e))?; + + let vm = psutil::memory::virtual_memory() + .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; + let loadavg = + psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; + + Ok(Self { + pid: process.pid(), + pid_num_threads: stat.num_threads, + pid_mem_resident_set_size: process_mem.rss(), + pid_mem_virtual_memory_size: process_mem.vms(), + sys_virt_mem_total: vm.total(), + sys_virt_mem_available: vm.available(), + sys_virt_mem_used: vm.used(), + sys_virt_mem_free: vm.free(), + sys_virt_mem_percent: vm.percent(), + sys_loadavg_1: loadavg.one, + sys_loadavg_5: loadavg.five, + sys_loadavg_15: loadavg.fifteen, + }) + } +} + +impl BeaconNodeHttpClient { + /// `GET lighthouse/health` + pub async fn get_lighthouse_health(&self) -> Result<GenericResponse<Health>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("health"); + + self.get(path).await + } + + /// `GET lighthouse/syncing` + pub async fn get_lighthouse_syncing(&self) -> Result<GenericResponse<SyncState>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("syncing"); + + self.get(path).await + } + + /* + * Note: + * + * The `lighthouse/peers` endpoints do not have functions here. We are yet to implement + * `Deserialize` on the `PeerInfo` struct since it contains use of `Instant`. This could be + * fairly simply achieved, if desired. + */ + + /// `GET lighthouse/proto_array` + pub async fn get_lighthouse_proto_array(&self) -> Result<GenericResponse<ProtoArray>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("proto_array"); + + self.get(path).await + } + + /// `GET lighthouse/validator_inclusion/{epoch}/global` + pub async fn get_lighthouse_validator_inclusion_global( + &self, + epoch: Epoch, + ) -> Result<GenericResponse<GlobalValidatorInclusionData>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("validator_inclusion") + .push(&epoch.to_string()) + .push("global"); + + self.get(path).await + } + + /// `GET lighthouse/validator_inclusion/{epoch}/{validator_id}` + pub async fn get_lighthouse_validator_inclusion( + &self, + epoch: Epoch, + validator_id: ValidatorId, + ) -> Result<GenericResponse<Option<ValidatorInclusionData>>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("validator_inclusion") + .push(&epoch.to_string()) + .push(&validator_id.to_string()); + + self.get(path).await + } +} diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs new file mode 100644 index 00000000000..c3a8d240c23 --- /dev/null +++ b/common/eth2/src/types.rs @@ -0,0 +1,432 @@ +//! This module exposes a superset of the `types` crate. It adds additional types that are only +//! required for the HTTP API. + +use eth2_libp2p::{Enr, Multiaddr}; +use serde::{Deserialize, Serialize}; +use std::convert::TryFrom; +use std::fmt; +use std::str::FromStr; + +pub use types::*; + +/// An API error serializable to JSON. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ErrorMessage { + pub code: u16, + pub message: String, + #[serde(default)] + pub stacktraces: Vec<String>, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct GenesisData { + #[serde(with = "serde_utils::quoted_u64")] + pub genesis_time: u64, + pub genesis_validators_root: Hash256, + #[serde(with = "serde_utils::bytes_4_hex")] + pub genesis_fork_version: [u8; 4], +} + +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum BlockId { + Head, + Genesis, + Finalized, + Justified, + Slot(Slot), + Root(Hash256), +} + +impl FromStr for BlockId { + type Err = String; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + match s { + "head" => Ok(BlockId::Head), + "genesis" => Ok(BlockId::Genesis), + "finalized" => Ok(BlockId::Finalized), + "justified" => Ok(BlockId::Justified), + other => { + if other.starts_with("0x") { + Hash256::from_str(&s[2..]) + .map(BlockId::Root) + .map_err(|e| format!("{} cannot be parsed as a root", e)) + } else { + u64::from_str(s) + .map(Slot::new) + .map(BlockId::Slot) + .map_err(|_| format!("{} cannot be parsed as a parameter", s)) + } + } + } + } +} + +impl fmt::Display for BlockId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BlockId::Head => write!(f, "head"), + BlockId::Genesis => write!(f, "genesis"), + BlockId::Finalized => write!(f, "finalized"), + BlockId::Justified => write!(f, "justified"), + BlockId::Slot(slot) => write!(f, "{}", slot), + BlockId::Root(root) => write!(f, "{:?}", root), + } + } +} + +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum StateId { + Head, + Genesis, + Finalized, + Justified, + Slot(Slot), + Root(Hash256), +} + +impl FromStr for StateId { + type Err = String; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + match s { + "head" => Ok(StateId::Head), + "genesis" => Ok(StateId::Genesis), + "finalized" => Ok(StateId::Finalized), + "justified" => Ok(StateId::Justified), + other => { + if other.starts_with("0x") { + Hash256::from_str(&s[2..]) + .map(StateId::Root) + .map_err(|e| format!("{} cannot be parsed as a root", e)) + } else { + u64::from_str(s) + .map(Slot::new) + .map(StateId::Slot) + .map_err(|_| format!("{} cannot be parsed as a slot", s)) + } + } + } + } +} + +impl fmt::Display for StateId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + StateId::Head => write!(f, "head"), + StateId::Genesis => write!(f, "genesis"), + StateId::Finalized => write!(f, "finalized"), + StateId::Justified => write!(f, "justified"), + StateId::Slot(slot) => write!(f, "{}", slot), + StateId::Root(root) => write!(f, "{:?}", root), + } + } +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(bound = "T: Serialize + serde::de::DeserializeOwned")] +pub struct GenericResponse<T: Serialize + serde::de::DeserializeOwned> { + pub data: T, +} + +impl<T: Serialize + serde::de::DeserializeOwned> From<T> for GenericResponse<T> { + fn from(data: T) -> Self { + Self { data } + } +} + +#[derive(Debug, PartialEq, Clone, Serialize)] +#[serde(bound = "T: Serialize")] +pub struct GenericResponseRef<'a, T: Serialize> { + pub data: &'a T, +} + +impl<'a, T: Serialize> From<&'a T> for GenericResponseRef<'a, T> { + fn from(data: &'a T) -> Self { + Self { data } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] +pub struct RootData { + pub root: Hash256, +} + +impl From<Hash256> for RootData { + fn from(root: Hash256) -> Self { + Self { root } + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct FinalityCheckpointsData { + pub previous_justified: Checkpoint, + pub current_justified: Checkpoint, + pub finalized: Checkpoint, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum ValidatorId { + PublicKey(PublicKeyBytes), + Index(u64), +} + +impl FromStr for ValidatorId { + type Err = String; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + if s.starts_with("0x") { + PublicKeyBytes::from_str(s) + .map(ValidatorId::PublicKey) + .map_err(|e| format!("{} cannot be parsed as a public key: {}", s, e)) + } else { + u64::from_str(s) + .map(ValidatorId::Index) + .map_err(|e| format!("{} cannot be parsed as a slot: {}", s, e)) + } + } +} + +impl fmt::Display for ValidatorId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ValidatorId::PublicKey(pubkey) => write!(f, "{:?}", pubkey), + ValidatorId::Index(index) => write!(f, "{}", index), + } + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ValidatorData { + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub balance: u64, + pub status: ValidatorStatus, + pub validator: Validator, +} + +// TODO: This does not currently match the spec, but I'm going to try and change the spec using +// this proposal: +// +// https://hackmd.io/bQxMDRt1RbS1TLno8K4NPg?view +#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] +pub enum ValidatorStatus { + Unknown, + WaitingForEligibility, + WaitingForFinality, + WaitingInQueue, + StandbyForActive(Epoch), + Active, + ActiveAwaitingVoluntaryExit(Epoch), + ActiveAwaitingSlashedExit(Epoch), + ExitedVoluntarily(Epoch), + ExitedSlashed(Epoch), + Withdrawable, + Withdrawn, +} + +impl ValidatorStatus { + pub fn from_validator( + validator_opt: Option<&Validator>, + epoch: Epoch, + finalized_epoch: Epoch, + far_future_epoch: Epoch, + ) -> Self { + if let Some(validator) = validator_opt { + if validator.is_withdrawable_at(epoch) { + ValidatorStatus::Withdrawable + } else if validator.is_exited_at(epoch) { + if validator.slashed { + ValidatorStatus::ExitedSlashed(validator.withdrawable_epoch) + } else { + ValidatorStatus::ExitedVoluntarily(validator.withdrawable_epoch) + } + } else if validator.is_active_at(epoch) { + if validator.exit_epoch < far_future_epoch { + if validator.slashed { + ValidatorStatus::ActiveAwaitingSlashedExit(validator.exit_epoch) + } else { + ValidatorStatus::ActiveAwaitingVoluntaryExit(validator.exit_epoch) + } + } else { + ValidatorStatus::Active + } + } else if validator.activation_epoch < far_future_epoch { + ValidatorStatus::StandbyForActive(validator.activation_epoch) + } else if validator.activation_eligibility_epoch < far_future_epoch { + if finalized_epoch < validator.activation_eligibility_epoch { + ValidatorStatus::WaitingForFinality + } else { + ValidatorStatus::WaitingInQueue + } + } else { + ValidatorStatus::WaitingForEligibility + } + } else { + ValidatorStatus::Unknown + } + } +} + +#[derive(Serialize, Deserialize)] +pub struct CommitteesQuery { + pub slot: Option<Slot>, + pub index: Option<u64>, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct CommitteeData { + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64_vec")] + pub validators: Vec<u64>, +} + +#[derive(Serialize, Deserialize)] +pub struct HeadersQuery { + pub slot: Option<Slot>, + pub parent_root: Option<Hash256>, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BlockHeaderAndSignature { + pub message: BeaconBlockHeader, + pub signature: SignatureBytes, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BlockHeaderData { + pub root: Hash256, + pub canonical: bool, + pub header: BlockHeaderAndSignature, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct DepositContractData { + #[serde(with = "serde_utils::quoted_u64")] + pub chain_id: u64, + pub address: Address, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ChainHeadData { + pub slot: Slot, + pub root: Hash256, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct IdentityData { + pub peer_id: String, + pub enr: Enr, + pub p2p_addresses: Vec<Multiaddr>, + // TODO: missing the following fields: + // + // - discovery_addresses + // - metadata + // + // Tracked here: https://github.com/sigp/lighthouse/issues/1434 +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct VersionData { + pub version: String, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct SyncingData { + pub is_syncing: bool, + pub head_slot: Slot, + pub sync_distance: Slot, +} + +#[derive(Clone, PartialEq, Debug, Deserialize)] +#[serde(try_from = "String", bound = "T: FromStr")] +pub struct QueryVec<T: FromStr>(pub Vec<T>); + +impl<T: FromStr> TryFrom<String> for QueryVec<T> { + type Error = String; + + fn try_from(string: String) -> Result<Self, Self::Error> { + if string == "" { + return Ok(Self(vec![])); + } + + string + .split(',') + .map(|s| s.parse().map_err(|_| "unable to parse".to_string())) + .collect::<Result<Vec<T>, String>>() + .map(Self) + } +} + +#[derive(Clone, Deserialize)] +pub struct ValidatorDutiesQuery { + pub index: Option<QueryVec<u64>>, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AttesterData { + pub pubkey: PublicKeyBytes, + #[serde(with = "serde_utils::quoted_u64")] + pub validator_index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub committees_at_slot: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub committee_index: CommitteeIndex, + #[serde(with = "serde_utils::quoted_u64")] + pub committee_length: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub validator_committee_index: u64, + pub slot: Slot, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ProposerData { + pub pubkey: PublicKeyBytes, + pub slot: Slot, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct ValidatorBlocksQuery { + pub randao_reveal: SignatureBytes, + pub graffiti: Option<Graffiti>, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct ValidatorAttestationDataQuery { + pub slot: Slot, + pub committee_index: CommitteeIndex, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct ValidatorAggregateAttestationQuery { + pub attestation_data_root: Hash256, + pub slot: Slot, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BeaconCommitteeSubscription { + #[serde(with = "serde_utils::quoted_u64")] + pub validator_index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub committee_index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub committees_at_slot: u64, + pub slot: Slot, + pub is_aggregator: bool, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn query_vec() { + assert_eq!( + QueryVec::try_from("0,1,2".to_string()).unwrap(), + QueryVec(vec![0_u64, 1, 2]) + ); + } +} diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index 0a4251e06df..0637b973c74 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -55,6 +55,7 @@ //! ``` use prometheus::{HistogramOpts, HistogramTimer, Opts}; +use std::time::Duration; pub use prometheus::{ Encoder, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge, @@ -221,6 +222,19 @@ pub fn start_timer(histogram: &Result<Histogram>) -> Option<HistogramTimer> { } } +/// Starts a timer on `vec` with the given `name`. +pub fn observe_timer_vec(vec: &Result<HistogramVec>, name: &[&str], duration: Duration) { + // This conversion was taken from here: + // + // https://docs.rs/prometheus/0.5.0/src/prometheus/histogram.rs.html#550-555 + let nanos = f64::from(duration.subsec_nanos()) / 1e9; + let secs = duration.as_secs() as f64 + nanos; + + if let Some(h) = get_histogram(vec, name) { + h.observe(secs) + } +} + /// Stops a timer created with `start_timer(..)`. pub fn stop_timer(timer: Option<HistogramTimer>) { if let Some(t) = timer { diff --git a/common/remote_beacon_node/Cargo.toml b/common/remote_beacon_node/Cargo.toml deleted file mode 100644 index 38ee8c7ca58..00000000000 --- a/common/remote_beacon_node/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "remote_beacon_node" -version = "0.2.0" -authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2018" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -reqwest = { version = "0.10.4", features = ["json", "native-tls-vendored"] } -url = "2.1.1" -serde = "1.0.110" -futures = "0.3.5" -types = { path = "../../consensus/types" } -rest_types = { path = "../rest_types" } -hex = "0.4.2" -eth2_ssz = "0.1.2" -serde_json = "1.0.52" -eth2_config = { path = "../eth2_config" } -proto_array = { path = "../../consensus/proto_array" } -operation_pool = { path = "../../beacon_node/operation_pool" } diff --git a/common/remote_beacon_node/src/lib.rs b/common/remote_beacon_node/src/lib.rs deleted file mode 100644 index 199efefd9da..00000000000 --- a/common/remote_beacon_node/src/lib.rs +++ /dev/null @@ -1,732 +0,0 @@ -//! Provides a `RemoteBeaconNode` which interacts with a HTTP API on another Lighthouse (or -//! compatible) instance. -//! -//! Presently, this is only used for testing but it _could_ become a user-facing library. - -use eth2_config::Eth2Config; -use reqwest::{Client, ClientBuilder, Response, StatusCode}; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use ssz::Encode; -use std::marker::PhantomData; -use std::time::Duration; -use types::{ - Attestation, AttestationData, AttesterSlashing, BeaconBlock, BeaconState, CommitteeIndex, - Epoch, EthSpec, Fork, Graffiti, Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, - Signature, SignedAggregateAndProof, SignedBeaconBlock, Slot, SubnetId, -}; -use url::Url; - -pub use operation_pool::PersistedOperationPool; -pub use proto_array::core::ProtoArray; -pub use rest_types::{ - CanonicalHeadResponse, Committee, HeadBeaconBlock, Health, IndividualVotesRequest, - IndividualVotesResponse, SyncingResponse, ValidatorDutiesRequest, ValidatorDutyBytes, - ValidatorRequest, ValidatorResponse, ValidatorSubscription, -}; - -// Setting a long timeout for debug ensures that crypto-heavy operations can still succeed. -#[cfg(debug_assertions)] -pub const REQUEST_TIMEOUT_SECONDS: u64 = 15; - -#[cfg(not(debug_assertions))] -pub const REQUEST_TIMEOUT_SECONDS: u64 = 5; - -#[derive(Clone)] -/// Connects to a remote Lighthouse (or compatible) node via HTTP. -pub struct RemoteBeaconNode<E: EthSpec> { - pub http: HttpClient<E>, -} - -impl<E: EthSpec> RemoteBeaconNode<E> { - /// Uses the default HTTP timeout. - pub fn new(http_endpoint: String) -> Result<Self, String> { - Self::new_with_timeout(http_endpoint, Duration::from_secs(REQUEST_TIMEOUT_SECONDS)) - } - - pub fn new_with_timeout(http_endpoint: String, timeout: Duration) -> Result<Self, String> { - Ok(Self { - http: HttpClient::new(http_endpoint, timeout) - .map_err(|e| format!("Unable to create http client: {:?}", e))?, - }) - } -} - -#[derive(Debug)] -pub enum Error { - /// Unable to parse a URL. Check the server URL. - UrlParseError(url::ParseError), - /// The `reqwest` library returned an error. - ReqwestError(reqwest::Error), - /// There was an error when encoding/decoding an object using serde. - SerdeJsonError(serde_json::Error), - /// The server responded to the request, however it did not return a 200-type success code. - DidNotSucceed { status: StatusCode, body: String }, - /// The request input was invalid. - InvalidInput, -} - -#[derive(Clone)] -pub struct HttpClient<E> { - client: Client, - url: Url, - timeout: Duration, - _phantom: PhantomData<E>, -} - -impl<E: EthSpec> HttpClient<E> { - /// Creates a new instance (without connecting to the node). - pub fn new(server_url: String, timeout: Duration) -> Result<Self, Error> { - Ok(Self { - client: ClientBuilder::new() - .timeout(timeout) - .build() - .expect("should build from static configuration"), - url: Url::parse(&server_url)?, - timeout: Duration::from_secs(15), - _phantom: PhantomData, - }) - } - - pub fn beacon(&self) -> Beacon<E> { - Beacon(self.clone()) - } - - pub fn validator(&self) -> Validator<E> { - Validator(self.clone()) - } - - pub fn spec(&self) -> Spec<E> { - Spec(self.clone()) - } - - pub fn node(&self) -> Node<E> { - Node(self.clone()) - } - - pub fn advanced(&self) -> Advanced<E> { - Advanced(self.clone()) - } - - pub fn consensus(&self) -> Consensus<E> { - Consensus(self.clone()) - } - - fn url(&self, path: &str) -> Result<Url, Error> { - self.url.join(path).map_err(|e| e.into()) - } - - pub async fn json_post<T: Serialize>(&self, url: Url, body: T) -> Result<Response, Error> { - self.client - .post(&url.to_string()) - .json(&body) - .send() - .await - .map_err(Error::from) - } - - pub async fn json_get<T: DeserializeOwned>( - &self, - mut url: Url, - query_pairs: Vec<(String, String)>, - ) -> Result<T, Error> { - query_pairs.into_iter().for_each(|(key, param)| { - url.query_pairs_mut().append_pair(&key, ¶m); - }); - - let response = self - .client - .get(&url.to_string()) - .send() - .await - .map_err(Error::from)?; - - let success = error_for_status(response).await.map_err(Error::from)?; - success.json::<T>().await.map_err(Error::from) - } -} - -/// Returns an `Error` (with a description) if the `response` was not a 200-type success response. -/// -/// Distinct from `Response::error_for_status` because it includes the body of the response as -/// text. This ensures the error message from the server is not discarded. -async fn error_for_status(response: Response) -> Result<Response, Error> { - let status = response.status(); - - if status.is_success() { - Ok(response) - } else { - let text_result = response.text().await; - match text_result { - Err(e) => Err(Error::ReqwestError(e)), - Ok(body) => Err(Error::DidNotSucceed { status, body }), - } - } -} - -#[derive(Debug, PartialEq, Clone)] -pub enum PublishStatus { - /// The object was valid and has been published to the network. - Valid, - /// The object was not valid and may or may not have been published to the network. - Invalid(String), - /// The server responded with an unknown status code. The object may or may not have been - /// published to the network. - Unknown, -} - -impl PublishStatus { - /// Returns `true` if `*self == PublishStatus::Valid`. - pub fn is_valid(&self) -> bool { - *self == PublishStatus::Valid - } -} - -/// Provides the functions on the `/validator` endpoint of the node. -#[derive(Clone)] -pub struct Validator<E>(HttpClient<E>); - -impl<E: EthSpec> Validator<E> { - fn url(&self, path: &str) -> Result<Url, Error> { - self.0 - .url("validator/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - /// Produces an unsigned attestation. - pub async fn produce_attestation( - &self, - slot: Slot, - committee_index: CommitteeIndex, - ) -> Result<Attestation<E>, Error> { - let query_params = vec![ - ("slot".into(), format!("{}", slot)), - ("committee_index".into(), format!("{}", committee_index)), - ]; - - let client = self.0.clone(); - let url = self.url("attestation")?; - client.json_get(url, query_params).await - } - - /// Produces an aggregate attestation. - pub async fn produce_aggregate_attestation( - &self, - attestation_data: &AttestationData, - ) -> Result<Attestation<E>, Error> { - let query_params = vec![( - "attestation_data".into(), - as_ssz_hex_string(attestation_data), - )]; - - let client = self.0.clone(); - let url = self.url("aggregate_attestation")?; - client.json_get(url, query_params).await - } - - /// Posts a list of attestations to the beacon node, expecting it to verify it and publish it to the network. - pub async fn publish_attestations( - &self, - attestation: Vec<(Attestation<E>, SubnetId)>, - ) -> Result<PublishStatus, Error> { - let client = self.0.clone(); - let url = self.url("attestations")?; - let response = client.json_post::<_>(url, attestation).await?; - - match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( - response.text().await.map_err(Error::from)?, - )), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - } - } - - /// Posts a list of signed aggregates and proofs to the beacon node, expecting it to verify it and publish it to the network. - pub async fn publish_aggregate_and_proof( - &self, - signed_aggregate_and_proofs: Vec<SignedAggregateAndProof<E>>, - ) -> Result<PublishStatus, Error> { - let client = self.0.clone(); - let url = self.url("aggregate_and_proofs")?; - let response = client - .json_post::<_>(url, signed_aggregate_and_proofs) - .await?; - - match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( - response.text().await.map_err(Error::from)?, - )), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - } - } - - /// Returns the duties required of the given validator pubkeys in the given epoch. - pub async fn get_duties( - &self, - epoch: Epoch, - validator_pubkeys: &[PublicKey], - ) -> Result<Vec<ValidatorDutyBytes>, Error> { - let client = self.0.clone(); - - let bulk_request = ValidatorDutiesRequest { - epoch, - pubkeys: validator_pubkeys - .iter() - .map(|pubkey| pubkey.clone().into()) - .collect(), - }; - - let url = self.url("duties")?; - let response = client.json_post::<_>(url, bulk_request).await?; - let success = error_for_status(response).await.map_err(Error::from)?; - success.json().await.map_err(Error::from) - } - - /// Posts a block to the beacon node, expecting it to verify it and publish it to the network. - pub async fn publish_block(&self, block: SignedBeaconBlock<E>) -> Result<PublishStatus, Error> { - let client = self.0.clone(); - let url = self.url("block")?; - let response = client.json_post::<_>(url, block).await?; - - match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( - response.text().await.map_err(Error::from)?, - )), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - } - } - - /// Requests a new (unsigned) block from the beacon node. - pub async fn produce_block( - &self, - slot: Slot, - randao_reveal: Signature, - graffiti: Option<Graffiti>, - ) -> Result<BeaconBlock<E>, Error> { - let client = self.0.clone(); - let url = self.url("block")?; - - let mut query_pairs = vec![ - ("slot".into(), format!("{}", slot.as_u64())), - ("randao_reveal".into(), as_ssz_hex_string(&randao_reveal)), - ]; - - if let Some(graffiti_bytes) = graffiti { - query_pairs.push(("graffiti".into(), as_ssz_hex_string(&graffiti_bytes))); - } - - client.json_get::<BeaconBlock<E>>(url, query_pairs).await - } - - /// Subscribes a list of validators to particular slots for attestation production/publication. - pub async fn subscribe( - &self, - subscriptions: Vec<ValidatorSubscription>, - ) -> Result<PublishStatus, Error> { - let client = self.0.clone(); - let url = self.url("subscribe")?; - let response = client.json_post::<_>(url, subscriptions).await?; - - match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( - response.text().await.map_err(Error::from)?, - )), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - } - } -} - -/// Provides the functions on the `/beacon` endpoint of the node. -#[derive(Clone)] -pub struct Beacon<E>(HttpClient<E>); - -impl<E: EthSpec> Beacon<E> { - fn url(&self, path: &str) -> Result<Url, Error> { - self.0 - .url("beacon/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - /// Returns the genesis time. - pub async fn get_genesis_time(&self) -> Result<u64, Error> { - let client = self.0.clone(); - let url = self.url("genesis_time")?; - client.json_get(url, vec![]).await - } - - /// Returns the genesis validators root. - pub async fn get_genesis_validators_root(&self) -> Result<Hash256, Error> { - let client = self.0.clone(); - let url = self.url("genesis_validators_root")?; - client.json_get(url, vec![]).await - } - - /// Returns the fork at the head of the beacon chain. - pub async fn get_fork(&self) -> Result<Fork, Error> { - let client = self.0.clone(); - let url = self.url("fork")?; - client.json_get(url, vec![]).await - } - - /// Returns info about the head of the canonical beacon chain. - pub async fn get_head(&self) -> Result<CanonicalHeadResponse, Error> { - let client = self.0.clone(); - let url = self.url("head")?; - client.json_get::<CanonicalHeadResponse>(url, vec![]).await - } - - /// Returns the set of known beacon chain head blocks. One of these will be the canonical head. - pub async fn get_heads(&self) -> Result<Vec<HeadBeaconBlock>, Error> { - let client = self.0.clone(); - let url = self.url("heads")?; - client.json_get(url, vec![]).await - } - - /// Returns the block and block root at the given slot. - pub async fn get_block_by_slot( - &self, - slot: Slot, - ) -> Result<(SignedBeaconBlock<E>, Hash256), Error> { - self.get_block("slot".to_string(), format!("{}", slot.as_u64())) - .await - } - - /// Returns the block and block root at the given root. - pub async fn get_block_by_root( - &self, - root: Hash256, - ) -> Result<(SignedBeaconBlock<E>, Hash256), Error> { - self.get_block("root".to_string(), root_as_string(root)) - .await - } - - /// Returns the block and block root at the given slot. - async fn get_block( - &self, - query_key: String, - query_param: String, - ) -> Result<(SignedBeaconBlock<E>, Hash256), Error> { - let client = self.0.clone(); - let url = self.url("block")?; - client - .json_get::<BlockResponse<E>>(url, vec![(query_key, query_param)]) - .await - .map(|response| (response.beacon_block, response.root)) - } - - /// Returns the state and state root at the given slot. - pub async fn get_state_by_slot(&self, slot: Slot) -> Result<(BeaconState<E>, Hash256), Error> { - self.get_state("slot".to_string(), format!("{}", slot.as_u64())) - .await - } - - /// Returns the state and state root at the given root. - pub async fn get_state_by_root( - &self, - root: Hash256, - ) -> Result<(BeaconState<E>, Hash256), Error> { - self.get_state("root".to_string(), root_as_string(root)) - .await - } - - /// Returns the root of the state at the given slot. - pub async fn get_state_root(&self, slot: Slot) -> Result<Hash256, Error> { - let client = self.0.clone(); - let url = self.url("state_root")?; - client - .json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))]) - .await - } - - /// Returns the root of the block at the given slot. - pub async fn get_block_root(&self, slot: Slot) -> Result<Hash256, Error> { - let client = self.0.clone(); - let url = self.url("block_root")?; - client - .json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))]) - .await - } - - /// Returns the state and state root at the given slot. - async fn get_state( - &self, - query_key: String, - query_param: String, - ) -> Result<(BeaconState<E>, Hash256), Error> { - let client = self.0.clone(); - let url = self.url("state")?; - client - .json_get::<StateResponse<E>>(url, vec![(query_key, query_param)]) - .await - .map(|response| (response.beacon_state, response.root)) - } - - /// Returns the block and block root at the given slot. - /// - /// If `state_root` is `Some`, the query will use the given state instead of the default - /// canonical head state. - pub async fn get_validators( - &self, - validator_pubkeys: Vec<PublicKey>, - state_root: Option<Hash256>, - ) -> Result<Vec<ValidatorResponse>, Error> { - let client = self.0.clone(); - - let bulk_request = ValidatorRequest { - state_root, - pubkeys: validator_pubkeys - .iter() - .map(|pubkey| pubkey.clone().into()) - .collect(), - }; - - let url = self.url("validators")?; - let response = client.json_post::<_>(url, bulk_request).await?; - let success = error_for_status(response).await.map_err(Error::from)?; - success.json().await.map_err(Error::from) - } - - /// Returns all validators. - /// - /// If `state_root` is `Some`, the query will use the given state instead of the default - /// canonical head state. - pub async fn get_all_validators( - &self, - state_root: Option<Hash256>, - ) -> Result<Vec<ValidatorResponse>, Error> { - let client = self.0.clone(); - - let query_params = if let Some(state_root) = state_root { - vec![("state_root".into(), root_as_string(state_root))] - } else { - vec![] - }; - - let url = self.url("validators/all")?; - client.json_get(url, query_params).await - } - - /// Returns the active validators. - /// - /// If `state_root` is `Some`, the query will use the given state instead of the default - /// canonical head state. - pub async fn get_active_validators( - &self, - state_root: Option<Hash256>, - ) -> Result<Vec<ValidatorResponse>, Error> { - let client = self.0.clone(); - - let query_params = if let Some(state_root) = state_root { - vec![("state_root".into(), root_as_string(state_root))] - } else { - vec![] - }; - - let url = self.url("validators/active")?; - client.json_get(url, query_params).await - } - - /// Returns committees at the given epoch. - pub async fn get_committees(&self, epoch: Epoch) -> Result<Vec<Committee>, Error> { - let client = self.0.clone(); - - let url = self.url("committees")?; - client - .json_get(url, vec![("epoch".into(), format!("{}", epoch.as_u64()))]) - .await - } - - pub async fn proposer_slashing( - &self, - proposer_slashing: ProposerSlashing, - ) -> Result<bool, Error> { - let client = self.0.clone(); - - let url = self.url("proposer_slashing")?; - let response = client.json_post::<_>(url, proposer_slashing).await?; - let success = error_for_status(response).await.map_err(Error::from)?; - success.json().await.map_err(Error::from) - } - - pub async fn attester_slashing( - &self, - attester_slashing: AttesterSlashing<E>, - ) -> Result<bool, Error> { - let client = self.0.clone(); - - let url = self.url("attester_slashing")?; - let response = client.json_post::<_>(url, attester_slashing).await?; - let success = error_for_status(response).await.map_err(Error::from)?; - success.json().await.map_err(Error::from) - } -} - -/// Provides the functions on the `/spec` endpoint of the node. -#[derive(Clone)] -pub struct Spec<E>(HttpClient<E>); - -impl<E: EthSpec> Spec<E> { - fn url(&self, path: &str) -> Result<Url, Error> { - self.0 - .url("spec/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - pub async fn get_eth2_config(&self) -> Result<Eth2Config, Error> { - let client = self.0.clone(); - let url = self.url("eth2_config")?; - client.json_get(url, vec![]).await - } -} - -/// Provides the functions on the `/node` endpoint of the node. -#[derive(Clone)] -pub struct Node<E>(HttpClient<E>); - -impl<E: EthSpec> Node<E> { - fn url(&self, path: &str) -> Result<Url, Error> { - self.0 - .url("node/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - pub async fn get_version(&self) -> Result<String, Error> { - let client = self.0.clone(); - let url = self.url("version")?; - client.json_get(url, vec![]).await - } - - pub async fn get_health(&self) -> Result<Health, Error> { - let client = self.0.clone(); - let url = self.url("health")?; - client.json_get(url, vec![]).await - } - - pub async fn syncing_status(&self) -> Result<SyncingResponse, Error> { - let client = self.0.clone(); - let url = self.url("syncing")?; - client.json_get(url, vec![]).await - } -} - -/// Provides the functions on the `/advanced` endpoint of the node. -#[derive(Clone)] -pub struct Advanced<E>(HttpClient<E>); - -impl<E: EthSpec> Advanced<E> { - fn url(&self, path: &str) -> Result<Url, Error> { - self.0 - .url("advanced/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - /// Gets the core `ProtoArray` struct from the node. - pub async fn get_fork_choice(&self) -> Result<ProtoArray, Error> { - let client = self.0.clone(); - let url = self.url("fork_choice")?; - client.json_get(url, vec![]).await - } - - /// Gets the core `PersistedOperationPool` struct from the node. - pub async fn get_operation_pool(&self) -> Result<PersistedOperationPool<E>, Error> { - let client = self.0.clone(); - let url = self.url("operation_pool")?; - client.json_get(url, vec![]).await - } -} - -/// Provides the functions on the `/consensus` endpoint of the node. -#[derive(Clone)] -pub struct Consensus<E>(HttpClient<E>); - -impl<E: EthSpec> Consensus<E> { - fn url(&self, path: &str) -> Result<Url, Error> { - self.0 - .url("consensus/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - /// Gets a `IndividualVote` for each of the given `pubkeys`. - pub async fn get_individual_votes( - &self, - epoch: Epoch, - pubkeys: Vec<PublicKeyBytes>, - ) -> Result<IndividualVotesResponse, Error> { - let client = self.0.clone(); - let req_body = IndividualVotesRequest { epoch, pubkeys }; - - let url = self.url("individual_votes")?; - let response = client.json_post::<_>(url, req_body).await?; - let success = error_for_status(response).await.map_err(Error::from)?; - success.json().await.map_err(Error::from) - } - - /// Gets a `VoteCount` for the given `epoch`. - pub async fn get_vote_count(&self, epoch: Epoch) -> Result<IndividualVotesResponse, Error> { - let client = self.0.clone(); - let query_params = vec![("epoch".into(), format!("{}", epoch.as_u64()))]; - let url = self.url("vote_count")?; - client.json_get(url, query_params).await - } -} - -#[derive(Deserialize)] -#[serde(bound = "T: EthSpec")] -pub struct BlockResponse<T: EthSpec> { - pub beacon_block: SignedBeaconBlock<T>, - pub root: Hash256, -} - -#[derive(Deserialize)] -#[serde(bound = "T: EthSpec")] -pub struct StateResponse<T: EthSpec> { - pub beacon_state: BeaconState<T>, - pub root: Hash256, -} - -fn root_as_string(root: Hash256) -> String { - format!("0x{:?}", root) -} - -fn as_ssz_hex_string<T: Encode>(item: &T) -> String { - format!("0x{}", hex::encode(item.as_ssz_bytes())) -} - -impl From<reqwest::Error> for Error { - fn from(e: reqwest::Error) -> Error { - Error::ReqwestError(e) - } -} - -impl From<url::ParseError> for Error { - fn from(e: url::ParseError) -> Error { - Error::UrlParseError(e) - } -} - -impl From<serde_json::Error> for Error { - fn from(e: serde_json::Error) -> Error { - Error::SerdeJsonError(e) - } -} diff --git a/common/rest_types/Cargo.toml b/common/rest_types/Cargo.toml deleted file mode 100644 index d9e021fe19d..00000000000 --- a/common/rest_types/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "rest_types" -version = "0.2.0" -authors = ["Sigma Prime <contact@sigmaprime.io>"] -edition = "2018" - -[dependencies] -types = { path = "../../consensus/types" } -eth2_ssz_derive = "0.1.0" -eth2_ssz = "0.1.2" -eth2_hashing = "0.1.0" -tree_hash = "0.1.0" -state_processing = { path = "../../consensus/state_processing" } -bls = { path = "../../crypto/bls" } -serde = { version = "1.0.110", features = ["derive"] } -rayon = "1.3.0" -hyper = "0.13.5" -tokio = { version = "0.2.21", features = ["sync"] } -environment = { path = "../../lighthouse/environment" } -store = { path = "../../beacon_node/store" } -beacon_chain = { path = "../../beacon_node/beacon_chain" } -serde_json = "1.0.52" -serde_yaml = "0.8.11" - -[target.'cfg(target_os = "linux")'.dependencies] -psutil = "3.1.0" -procinfo = "0.4.2" diff --git a/common/rest_types/src/api_error.rs b/common/rest_types/src/api_error.rs deleted file mode 100644 index 1eac8d4a468..00000000000 --- a/common/rest_types/src/api_error.rs +++ /dev/null @@ -1,99 +0,0 @@ -use hyper::{Body, Response, StatusCode}; -use std::error::Error as StdError; - -#[derive(PartialEq, Debug, Clone)] -pub enum ApiError { - MethodNotAllowed(String), - ServerError(String), - NotImplemented(String), - BadRequest(String), - NotFound(String), - UnsupportedType(String), - ImATeapot(String), // Just in case. - ProcessingError(String), // A 202 error, for when a block/attestation cannot be processed, but still transmitted. - InvalidHeaderValue(String), -} - -pub type ApiResult = Result<Response<Body>, ApiError>; - -impl ApiError { - pub fn status_code(self) -> (StatusCode, String) { - match self { - ApiError::MethodNotAllowed(desc) => (StatusCode::METHOD_NOT_ALLOWED, desc), - ApiError::ServerError(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc), - ApiError::NotImplemented(desc) => (StatusCode::NOT_IMPLEMENTED, desc), - ApiError::BadRequest(desc) => (StatusCode::BAD_REQUEST, desc), - ApiError::NotFound(desc) => (StatusCode::NOT_FOUND, desc), - ApiError::UnsupportedType(desc) => (StatusCode::UNSUPPORTED_MEDIA_TYPE, desc), - ApiError::ImATeapot(desc) => (StatusCode::IM_A_TEAPOT, desc), - ApiError::ProcessingError(desc) => (StatusCode::ACCEPTED, desc), - ApiError::InvalidHeaderValue(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc), - } - } -} - -impl Into<Response<Body>> for ApiError { - fn into(self) -> Response<Body> { - let (status_code, desc) = self.status_code(); - Response::builder() - .status(status_code) - .header("content-type", "text/plain; charset=utf-8") - .body(Body::from(desc)) - .expect("Response should always be created.") - } -} - -impl From<store::Error> for ApiError { - fn from(e: store::Error) -> ApiError { - ApiError::ServerError(format!("Database error: {:?}", e)) - } -} - -impl From<types::BeaconStateError> for ApiError { - fn from(e: types::BeaconStateError) -> ApiError { - ApiError::ServerError(format!("BeaconState error: {:?}", e)) - } -} - -impl From<beacon_chain::BeaconChainError> for ApiError { - fn from(e: beacon_chain::BeaconChainError) -> ApiError { - ApiError::ServerError(format!("BeaconChainError error: {:?}", e)) - } -} - -impl From<state_processing::per_slot_processing::Error> for ApiError { - fn from(e: state_processing::per_slot_processing::Error) -> ApiError { - ApiError::ServerError(format!("PerSlotProcessing error: {:?}", e)) - } -} - -impl From<hyper::error::Error> for ApiError { - fn from(e: hyper::error::Error) -> ApiError { - ApiError::ServerError(format!("Networking error: {:?}", e)) - } -} - -impl From<std::io::Error> for ApiError { - fn from(e: std::io::Error) -> ApiError { - ApiError::ServerError(format!("IO error: {:?}", e)) - } -} - -impl From<hyper::header::InvalidHeaderValue> for ApiError { - fn from(e: hyper::header::InvalidHeaderValue) -> ApiError { - ApiError::InvalidHeaderValue(format!("Invalid CORS header value: {:?}", e)) - } -} - -impl StdError for ApiError { - fn cause(&self) -> Option<&dyn StdError> { - None - } -} - -impl std::fmt::Display for ApiError { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let status = self.clone().status_code(); - write!(f, "{:?}: {:?}", status.0, status.1) - } -} diff --git a/common/rest_types/src/beacon.rs b/common/rest_types/src/beacon.rs deleted file mode 100644 index 0a141ea282a..00000000000 --- a/common/rest_types/src/beacon.rs +++ /dev/null @@ -1,65 +0,0 @@ -//! A collection of REST API types for interaction with the beacon node. - -use bls::PublicKeyBytes; -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use types::beacon_state::EthSpec; -use types::{BeaconState, CommitteeIndex, Hash256, SignedBeaconBlock, Slot, Validator}; - -/// Information about a block that is at the head of a chain. May or may not represent the -/// canonical head. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct HeadBeaconBlock { - pub beacon_block_root: Hash256, - pub beacon_block_slot: Slot, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -#[serde(bound = "T: EthSpec")] -pub struct BlockResponse<T: EthSpec> { - pub root: Hash256, - pub beacon_block: SignedBeaconBlock<T>, -} - -/// Information about the block and state that are at head of the beacon chain. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct CanonicalHeadResponse { - pub slot: Slot, - pub block_root: Hash256, - pub state_root: Hash256, - pub finalized_slot: Slot, - pub finalized_block_root: Hash256, - pub justified_slot: Slot, - pub justified_block_root: Hash256, - pub previous_justified_slot: Slot, - pub previous_justified_block_root: Hash256, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct ValidatorResponse { - pub pubkey: PublicKeyBytes, - pub validator_index: Option<usize>, - pub balance: Option<u64>, - pub validator: Option<Validator>, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct ValidatorRequest { - /// If set to `None`, uses the canonical head state. - pub state_root: Option<Hash256>, - pub pubkeys: Vec<PublicKeyBytes>, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct Committee { - pub slot: Slot, - pub index: CommitteeIndex, - pub committee: Vec<usize>, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -#[serde(bound = "T: EthSpec")] -pub struct StateResponse<T: EthSpec> { - pub root: Hash256, - pub beacon_state: BeaconState<T>, -} diff --git a/common/rest_types/src/consensus.rs b/common/rest_types/src/consensus.rs deleted file mode 100644 index 519b1ae247c..00000000000 --- a/common/rest_types/src/consensus.rs +++ /dev/null @@ -1,66 +0,0 @@ -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use state_processing::per_epoch_processing::ValidatorStatus; -use types::{Epoch, PublicKeyBytes}; - -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct IndividualVotesRequest { - pub epoch: Epoch, - pub pubkeys: Vec<PublicKeyBytes>, -} - -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct IndividualVote { - /// True if the validator has been slashed, ever. - pub is_slashed: bool, - /// True if the validator can withdraw in the current epoch. - pub is_withdrawable_in_current_epoch: bool, - /// True if the validator was active in the state's _current_ epoch. - pub is_active_in_current_epoch: bool, - /// True if the validator was active in the state's _previous_ epoch. - pub is_active_in_previous_epoch: bool, - /// The validator's effective balance in the _current_ epoch. - pub current_epoch_effective_balance_gwei: u64, - /// True if the validator had an attestation included in the _current_ epoch. - pub is_current_epoch_attester: bool, - /// True if the validator's beacon block root attestation for the first slot of the _current_ - /// epoch matches the block root known to the state. - pub is_current_epoch_target_attester: bool, - /// True if the validator had an attestation included in the _previous_ epoch. - pub is_previous_epoch_attester: bool, - /// True if the validator's beacon block root attestation for the first slot of the _previous_ - /// epoch matches the block root known to the state. - pub is_previous_epoch_target_attester: bool, - /// True if the validator's beacon block root attestation in the _previous_ epoch at the - /// attestation's slot (`attestation_data.slot`) matches the block root known to the state. - pub is_previous_epoch_head_attester: bool, -} - -impl Into<IndividualVote> for ValidatorStatus { - fn into(self) -> IndividualVote { - IndividualVote { - is_slashed: self.is_slashed, - is_withdrawable_in_current_epoch: self.is_withdrawable_in_current_epoch, - is_active_in_current_epoch: self.is_active_in_current_epoch, - is_active_in_previous_epoch: self.is_active_in_previous_epoch, - current_epoch_effective_balance_gwei: self.current_epoch_effective_balance, - is_current_epoch_attester: self.is_current_epoch_attester, - is_current_epoch_target_attester: self.is_current_epoch_target_attester, - is_previous_epoch_attester: self.is_previous_epoch_attester, - is_previous_epoch_target_attester: self.is_previous_epoch_target_attester, - is_previous_epoch_head_attester: self.is_previous_epoch_head_attester, - } - } -} - -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct IndividualVotesResponse { - /// The epoch which is considered the "current" epoch. - pub epoch: Epoch, - /// The validators public key. - pub pubkey: PublicKeyBytes, - /// The index of the validator in state.validators. - pub validator_index: Option<usize>, - /// Voting statistics for the validator, if they voted in the given epoch. - pub vote: Option<IndividualVote>, -} diff --git a/common/rest_types/src/handler.rs b/common/rest_types/src/handler.rs deleted file mode 100644 index cbbcd73b19a..00000000000 --- a/common/rest_types/src/handler.rs +++ /dev/null @@ -1,247 +0,0 @@ -use crate::{ApiError, ApiResult}; -use environment::TaskExecutor; -use hyper::header; -use hyper::{Body, Request, Response, StatusCode}; -use serde::Deserialize; -use serde::Serialize; -use ssz::Encode; - -/// Defines the encoding for the API. -#[derive(Clone, Serialize, Deserialize, Copy)] -pub enum ApiEncodingFormat { - JSON, - YAML, - SSZ, -} - -impl ApiEncodingFormat { - pub fn get_content_type(&self) -> &str { - match self { - ApiEncodingFormat::JSON => "application/json", - ApiEncodingFormat::YAML => "application/yaml", - ApiEncodingFormat::SSZ => "application/ssz", - } - } -} - -impl From<&str> for ApiEncodingFormat { - fn from(f: &str) -> ApiEncodingFormat { - match f { - "application/yaml" => ApiEncodingFormat::YAML, - "application/ssz" => ApiEncodingFormat::SSZ, - _ => ApiEncodingFormat::JSON, - } - } -} - -/// Provides a HTTP request handler with Lighthouse-specific functionality. -pub struct Handler<T> { - executor: TaskExecutor, - req: Request<()>, - body: Body, - ctx: T, - encoding: ApiEncodingFormat, - allow_body: bool, -} - -impl<T: Clone + Send + Sync + 'static> Handler<T> { - /// Start handling a new request. - pub fn new(req: Request<Body>, ctx: T, executor: TaskExecutor) -> Result<Self, ApiError> { - let (req_parts, body) = req.into_parts(); - let req = Request::from_parts(req_parts, ()); - - let accept_header: String = req - .headers() - .get(header::ACCEPT) - .map_or(Ok(""), |h| h.to_str()) - .map_err(|e| { - ApiError::BadRequest(format!( - "The Accept header contains invalid characters: {:?}", - e - )) - }) - .map(String::from)?; - - Ok(Self { - executor, - req, - body, - ctx, - allow_body: false, - encoding: ApiEncodingFormat::from(accept_header.as_str()), - }) - } - - /// The default behaviour is to return an error if any body is supplied in the request. Calling - /// this function disables that error. - pub fn allow_body(mut self) -> Self { - self.allow_body = true; - self - } - - /// Return a simple static value. - /// - /// Does not use the blocking executor. - pub async fn static_value<V>(self, value: V) -> Result<HandledRequest<V>, ApiError> { - // Always check and disallow a body for a static value. - let _ = Self::get_body(self.body, false).await?; - - Ok(HandledRequest { - value, - encoding: self.encoding, - }) - } - - /// Calls `func` in-line, on the core executor. - /// - /// This should only be used for very fast tasks. - pub async fn in_core_task<F, V>(self, func: F) -> Result<HandledRequest<V>, ApiError> - where - V: Send + Sync + 'static, - F: Fn(Request<Vec<u8>>, T) -> Result<V, ApiError> + Send + Sync + 'static, - { - let body = Self::get_body(self.body, self.allow_body).await?; - let (req_parts, _) = self.req.into_parts(); - let req = Request::from_parts(req_parts, body); - - let value = func(req, self.ctx)?; - - Ok(HandledRequest { - value, - encoding: self.encoding, - }) - } - - /// Spawns `func` on the blocking executor. - /// - /// This method is suitable for handling long-running or intensive tasks. - pub async fn in_blocking_task<F, V>(self, func: F) -> Result<HandledRequest<V>, ApiError> - where - V: Send + Sync + 'static, - F: Fn(Request<Vec<u8>>, T) -> Result<V, ApiError> + Send + Sync + 'static, - { - let ctx = self.ctx; - let body = Self::get_body(self.body, self.allow_body).await?; - let (req_parts, _) = self.req.into_parts(); - let req = Request::from_parts(req_parts, body); - - let value = self - .executor - .clone() - .handle - .spawn_blocking(move || func(req, ctx)) - .await - .map_err(|e| { - ApiError::ServerError(format!( - "Failed to get blocking join handle: {}", - e.to_string() - )) - })??; - - Ok(HandledRequest { - value, - encoding: self.encoding, - }) - } - - /// Call `func`, then return a response that is suitable for an SSE stream. - pub async fn sse_stream<F>(self, func: F) -> ApiResult - where - F: Fn(Request<()>, T) -> Result<Body, ApiError>, - { - let body = func(self.req, self.ctx)?; - - Response::builder() - .status(200) - .header("Content-Type", "text/event-stream") - .header("Connection", "Keep-Alive") - .header("Cache-Control", "no-cache") - .header("Access-Control-Allow-Origin", "*") - .body(body) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) - } - - /// Downloads the bytes for `body`. - async fn get_body(body: Body, allow_body: bool) -> Result<Vec<u8>, ApiError> { - let bytes = hyper::body::to_bytes(body) - .await - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; - - if !allow_body && !bytes[..].is_empty() { - Err(ApiError::BadRequest( - "The request body must be empty".to_string(), - )) - } else { - Ok(bytes.into_iter().collect()) - } - } -} - -/// A request that has been "handled" and now a result (`value`) needs to be serialize and -/// returned. -pub struct HandledRequest<V> { - encoding: ApiEncodingFormat, - value: V, -} - -impl HandledRequest<String> { - /// Simple encode a string as utf-8. - pub fn text_encoding(self) -> ApiResult { - Response::builder() - .status(StatusCode::OK) - .header("content-type", "text/plain; charset=utf-8") - .body(Body::from(self.value)) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) - } -} - -impl<V: Serialize + Encode> HandledRequest<V> { - /// Suitable for all items which implement `serde` and `ssz`. - pub fn all_encodings(self) -> ApiResult { - match self.encoding { - ApiEncodingFormat::SSZ => Response::builder() - .status(StatusCode::OK) - .header("content-type", "application/ssz") - .body(Body::from(self.value.as_ssz_bytes())) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))), - _ => self.serde_encodings(), - } - } -} - -impl<V: Serialize> HandledRequest<V> { - /// Suitable for items which only implement `serde`. - pub fn serde_encodings(self) -> ApiResult { - let (body, content_type) = match self.encoding { - ApiEncodingFormat::JSON => ( - Body::from(serde_json::to_string(&self.value).map_err(|e| { - ApiError::ServerError(format!( - "Unable to serialize response body as JSON: {:?}", - e - )) - })?), - "application/json", - ), - ApiEncodingFormat::SSZ => { - return Err(ApiError::UnsupportedType( - "Response cannot be encoded as SSZ.".into(), - )); - } - ApiEncodingFormat::YAML => ( - Body::from(serde_yaml::to_string(&self.value).map_err(|e| { - ApiError::ServerError(format!( - "Unable to serialize response body as YAML: {:?}", - e - )) - })?), - "application/yaml", - ), - }; - - Response::builder() - .status(StatusCode::OK) - .header("content-type", content_type) - .body(body) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) - } -} diff --git a/common/rest_types/src/lib.rs b/common/rest_types/src/lib.rs deleted file mode 100644 index 1bedd1cadbc..00000000000 --- a/common/rest_types/src/lib.rs +++ /dev/null @@ -1,22 +0,0 @@ -//! A collection of types used to pass data across the rest HTTP API. -//! -//! This is primarily used by the validator client and the beacon node rest API. - -mod api_error; -mod beacon; -mod consensus; -mod handler; -mod node; -mod validator; - -pub use api_error::{ApiError, ApiResult}; -pub use beacon::{ - BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse, - ValidatorRequest, ValidatorResponse, -}; -pub use consensus::{IndividualVote, IndividualVotesRequest, IndividualVotesResponse}; -pub use handler::{ApiEncodingFormat, Handler}; -pub use node::{Health, SyncingResponse, SyncingStatus}; -pub use validator::{ - ValidatorDutiesRequest, ValidatorDuty, ValidatorDutyBytes, ValidatorSubscription, -}; diff --git a/common/rest_types/src/node.rs b/common/rest_types/src/node.rs deleted file mode 100644 index ca98645cc8c..00000000000 --- a/common/rest_types/src/node.rs +++ /dev/null @@ -1,103 +0,0 @@ -//! Collection of types for the /node HTTP -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use types::Slot; - -#[cfg(target_os = "linux")] -use {procinfo::pid, psutil::process::Process}; - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -/// The current syncing status of the node. -pub struct SyncingStatus { - /// The starting slot of sync. - /// - /// For a finalized sync, this is the start slot of the current finalized syncing - /// chain. - /// - /// For head sync this is the last finalized slot. - pub starting_slot: Slot, - /// The current slot. - pub current_slot: Slot, - /// The highest known slot. For the current syncing chain. - /// - /// For a finalized sync, the target finalized slot. - /// For head sync, this is the highest known slot of all head chains. - pub highest_slot: Slot, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -/// The response for the /node/syncing HTTP GET. -pub struct SyncingResponse { - /// Is the node syncing. - pub is_syncing: bool, - /// The current sync status. - pub sync_status: SyncingStatus, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -/// Reports on the health of the Lighthouse instance. -pub struct Health { - /// The pid of this process. - pub pid: u32, - /// The number of threads used by this pid. - pub pid_num_threads: i32, - /// The total resident memory used by this pid. - pub pid_mem_resident_set_size: u64, - /// The total virtual memory used by this pid. - pub pid_mem_virtual_memory_size: u64, - /// Total virtual memory on the system - pub sys_virt_mem_total: u64, - /// Total virtual memory available for new processes. - pub sys_virt_mem_available: u64, - /// Total virtual memory used on the system - pub sys_virt_mem_used: u64, - /// Total virtual memory not used on the system - pub sys_virt_mem_free: u64, - /// Percentage of virtual memory used on the system - pub sys_virt_mem_percent: f32, - /// System load average over 1 minute. - pub sys_loadavg_1: f64, - /// System load average over 5 minutes. - pub sys_loadavg_5: f64, - /// System load average over 15 minutes. - pub sys_loadavg_15: f64, -} - -impl Health { - #[cfg(not(target_os = "linux"))] - pub fn observe() -> Result<Self, String> { - Err("Health is only available on Linux".into()) - } - - #[cfg(target_os = "linux")] - pub fn observe() -> Result<Self, String> { - let process = - Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?; - - let process_mem = process - .memory_info() - .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; - - let stat = pid::stat_self().map_err(|e| format!("Unable to get stat: {:?}", e))?; - - let vm = psutil::memory::virtual_memory() - .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; - let loadavg = - psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; - - Ok(Self { - pid: process.pid(), - pid_num_threads: stat.num_threads, - pid_mem_resident_set_size: process_mem.rss(), - pid_mem_virtual_memory_size: process_mem.vms(), - sys_virt_mem_total: vm.total(), - sys_virt_mem_available: vm.available(), - sys_virt_mem_used: vm.used(), - sys_virt_mem_free: vm.free(), - sys_virt_mem_percent: vm.percent(), - sys_loadavg_1: loadavg.one, - sys_loadavg_5: loadavg.five, - sys_loadavg_15: loadavg.fifteen, - }) - } -} diff --git a/common/rest_types/src/validator.rs b/common/rest_types/src/validator.rs deleted file mode 100644 index 2b0f077298a..00000000000 --- a/common/rest_types/src/validator.rs +++ /dev/null @@ -1,103 +0,0 @@ -use bls::{PublicKey, PublicKeyBytes}; -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use types::{CommitteeIndex, Epoch, Slot}; - -/// A Validator duty with the validator public key represented a `PublicKeyBytes`. -pub type ValidatorDutyBytes = ValidatorDutyBase<PublicKeyBytes>; -/// A validator duty with the pubkey represented as a `PublicKey`. -pub type ValidatorDuty = ValidatorDutyBase<PublicKey>; - -// NOTE: if you add or remove fields, please adjust `eq_ignoring_proposal_slots` -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] -pub struct ValidatorDutyBase<T> { - /// The validator's BLS public key, uniquely identifying them. - pub validator_pubkey: T, - /// The validator's index in `state.validators` - pub validator_index: Option<u64>, - /// The slot at which the validator must attest. - pub attestation_slot: Option<Slot>, - /// The index of the committee within `slot` of which the validator is a member. - pub attestation_committee_index: Option<CommitteeIndex>, - /// The position of the validator in the committee. - pub attestation_committee_position: Option<usize>, - /// The committee count at `attestation_slot`. - pub committee_count_at_slot: Option<u64>, - /// The slots in which a validator must propose a block (can be empty). - /// - /// Should be set to `None` when duties are not yet known (before the current epoch). - pub block_proposal_slots: Option<Vec<Slot>>, - /// This provides the modulo: `max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)` - /// which allows the validator client to determine if this duty requires the validator to be - /// aggregate attestations. - pub aggregator_modulo: Option<u64>, -} - -impl<T> ValidatorDutyBase<T> { - /// Return `true` if these validator duties are equal, ignoring their `block_proposal_slots`. - pub fn eq_ignoring_proposal_slots(&self, other: &Self) -> bool - where - T: PartialEq, - { - self.validator_pubkey == other.validator_pubkey - && self.validator_index == other.validator_index - && self.attestation_slot == other.attestation_slot - && self.attestation_committee_index == other.attestation_committee_index - && self.attestation_committee_position == other.attestation_committee_position - && self.committee_count_at_slot == other.committee_count_at_slot - && self.aggregator_modulo == other.aggregator_modulo - } -} - -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct ValidatorDutiesRequest { - pub epoch: Epoch, - pub pubkeys: Vec<PublicKeyBytes>, -} - -/// A validator subscription, created when a validator subscribes to a slot to perform optional aggregation -/// duties. -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct ValidatorSubscription { - /// The validators index. - pub validator_index: u64, - /// The index of the committee within `slot` of which the validator is a member. Used by the - /// beacon node to quickly evaluate the associated `SubnetId`. - pub attestation_committee_index: CommitteeIndex, - /// The slot in which to subscribe. - pub slot: Slot, - /// Committee count at slot to subscribe. - pub committee_count_at_slot: u64, - /// If true, the validator is an aggregator and the beacon node should aggregate attestations - /// for this slot. - pub is_aggregator: bool, -} - -#[cfg(test)] -mod test { - use super::*; - use bls::SecretKey; - - #[test] - fn eq_ignoring_proposal_slots() { - let validator_pubkey = SecretKey::deserialize(&[1; 32]).unwrap().public_key(); - - let duty1 = ValidatorDuty { - validator_pubkey, - validator_index: Some(10), - attestation_slot: Some(Slot::new(50)), - attestation_committee_index: Some(2), - attestation_committee_position: Some(6), - committee_count_at_slot: Some(4), - block_proposal_slots: None, - aggregator_modulo: Some(99), - }; - let duty2 = ValidatorDuty { - block_proposal_slots: Some(vec![Slot::new(42), Slot::new(45)]), - ..duty1.clone() - }; - assert_ne!(duty1, duty2); - assert!(duty1.eq_ignoring_proposal_slots(&duty2)); - assert!(duty2.eq_ignoring_proposal_slots(&duty1)); - } -} diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index 41c847498a6..0fe1bedfeda 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -24,6 +24,16 @@ pub trait SlotClock: Send + Sync + Sized { /// Returns the slot at this present time. fn now(&self) -> Option<Slot>; + /// Returns the slot at this present time if genesis has happened. Otherwise, returns the + /// genesis slot. Returns `None` if there is an error reading the clock. + fn now_or_genesis(&self) -> Option<Slot> { + if self.is_prior_to_genesis()? { + Some(self.genesis_slot()) + } else { + self.now() + } + } + /// Indicates if the current time is prior to genesis time. /// /// Returns `None` if the system clock cannot be read. diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml new file mode 100644 index 00000000000..98ddab5d83d --- /dev/null +++ b/common/warp_utils/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "warp_utils" +version = "0.1.0" +authors = ["Paul Hauner <paul@paulhauner.com>"] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +warp = "0.2.5" +eth2 = { path = "../eth2" } +types = { path = "../../consensus/types" } +beacon_chain = { path = "../../beacon_node/beacon_chain" } +state_processing = { path = "../../consensus/state_processing" } +safe_arith = { path = "../../consensus/safe_arith" } diff --git a/common/warp_utils/src/lib.rs b/common/warp_utils/src/lib.rs new file mode 100644 index 00000000000..ec9cf3c3442 --- /dev/null +++ b/common/warp_utils/src/lib.rs @@ -0,0 +1,5 @@ +//! This crate contains functions that are common across multiple `warp` HTTP servers in the +//! Lighthouse project. E.g., the `http_api` and `http_metrics` crates. + +pub mod reject; +pub mod reply; diff --git a/common/warp_utils/src/reject.rs b/common/warp_utils/src/reject.rs new file mode 100644 index 00000000000..1243d5f685a --- /dev/null +++ b/common/warp_utils/src/reject.rs @@ -0,0 +1,168 @@ +use eth2::types::ErrorMessage; +use std::convert::Infallible; +use warp::{http::StatusCode, reject::Reject}; + +#[derive(Debug)] +pub struct BeaconChainError(pub beacon_chain::BeaconChainError); + +impl Reject for BeaconChainError {} + +pub fn beacon_chain_error(e: beacon_chain::BeaconChainError) -> warp::reject::Rejection { + warp::reject::custom(BeaconChainError(e)) +} + +#[derive(Debug)] +pub struct BeaconStateError(pub types::BeaconStateError); + +impl Reject for BeaconStateError {} + +pub fn beacon_state_error(e: types::BeaconStateError) -> warp::reject::Rejection { + warp::reject::custom(BeaconStateError(e)) +} + +#[derive(Debug)] +pub struct ArithError(pub safe_arith::ArithError); + +impl Reject for ArithError {} + +pub fn arith_error(e: safe_arith::ArithError) -> warp::reject::Rejection { + warp::reject::custom(ArithError(e)) +} + +#[derive(Debug)] +pub struct SlotProcessingError(pub state_processing::SlotProcessingError); + +impl Reject for SlotProcessingError {} + +pub fn slot_processing_error(e: state_processing::SlotProcessingError) -> warp::reject::Rejection { + warp::reject::custom(SlotProcessingError(e)) +} + +#[derive(Debug)] +pub struct BlockProductionError(pub beacon_chain::BlockProductionError); + +impl Reject for BlockProductionError {} + +pub fn block_production_error(e: beacon_chain::BlockProductionError) -> warp::reject::Rejection { + warp::reject::custom(BlockProductionError(e)) +} + +#[derive(Debug)] +pub struct CustomNotFound(pub String); + +impl Reject for CustomNotFound {} + +pub fn custom_not_found(msg: String) -> warp::reject::Rejection { + warp::reject::custom(CustomNotFound(msg)) +} + +#[derive(Debug)] +pub struct CustomBadRequest(pub String); + +impl Reject for CustomBadRequest {} + +pub fn custom_bad_request(msg: String) -> warp::reject::Rejection { + warp::reject::custom(CustomBadRequest(msg)) +} + +#[derive(Debug)] +pub struct CustomServerError(pub String); + +impl Reject for CustomServerError {} + +pub fn custom_server_error(msg: String) -> warp::reject::Rejection { + warp::reject::custom(CustomServerError(msg)) +} + +#[derive(Debug)] +pub struct BroadcastWithoutImport(pub String); + +impl Reject for BroadcastWithoutImport {} + +pub fn broadcast_without_import(msg: String) -> warp::reject::Rejection { + warp::reject::custom(BroadcastWithoutImport(msg)) +} + +#[derive(Debug)] +pub struct ObjectInvalid(pub String); + +impl Reject for ObjectInvalid {} + +pub fn object_invalid(msg: String) -> warp::reject::Rejection { + warp::reject::custom(ObjectInvalid(msg)) +} + +#[derive(Debug)] +pub struct NotSynced(pub String); + +impl Reject for NotSynced {} + +pub fn not_synced(msg: String) -> warp::reject::Rejection { + warp::reject::custom(NotSynced(msg)) +} + +/// This function receives a `Rejection` and tries to return a custom +/// value, otherwise simply passes the rejection along. +pub async fn handle_rejection(err: warp::Rejection) -> Result<impl warp::Reply, Infallible> { + let code; + let message; + + if err.is_not_found() { + code = StatusCode::NOT_FOUND; + message = "NOT_FOUND".to_string(); + } else if let Some(e) = err.find::<warp::filters::body::BodyDeserializeError>() { + message = format!("BAD_REQUEST: body deserialize error: {}", e); + code = StatusCode::BAD_REQUEST; + } else if let Some(e) = err.find::<warp::reject::InvalidQuery>() { + code = StatusCode::BAD_REQUEST; + message = format!("BAD_REQUEST: invalid query: {}", e); + } else if let Some(e) = err.find::<crate::reject::BeaconChainError>() { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = format!("UNHANDLED_ERROR: {:?}", e.0); + } else if let Some(e) = err.find::<crate::reject::BeaconStateError>() { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = format!("UNHANDLED_ERROR: {:?}", e.0); + } else if let Some(e) = err.find::<crate::reject::SlotProcessingError>() { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = format!("UNHANDLED_ERROR: {:?}", e.0); + } else if let Some(e) = err.find::<crate::reject::BlockProductionError>() { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = format!("UNHANDLED_ERROR: {:?}", e.0); + } else if let Some(e) = err.find::<crate::reject::CustomNotFound>() { + code = StatusCode::NOT_FOUND; + message = format!("NOT_FOUND: {}", e.0); + } else if let Some(e) = err.find::<crate::reject::CustomBadRequest>() { + code = StatusCode::BAD_REQUEST; + message = format!("BAD_REQUEST: {}", e.0); + } else if let Some(e) = err.find::<crate::reject::CustomServerError>() { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = format!("INTERNAL_SERVER_ERROR: {}", e.0); + } else if let Some(e) = err.find::<crate::reject::BroadcastWithoutImport>() { + code = StatusCode::ACCEPTED; + message = format!( + "ACCEPTED: the object was broadcast to the network without being \ + fully imported to the local database: {}", + e.0 + ); + } else if let Some(e) = err.find::<crate::reject::ObjectInvalid>() { + code = StatusCode::BAD_REQUEST; + message = format!("BAD_REQUEST: Invalid object: {}", e.0); + } else if let Some(e) = err.find::<crate::reject::NotSynced>() { + code = StatusCode::SERVICE_UNAVAILABLE; + message = format!("SERVICE_UNAVAILABLE: beacon node is syncing: {}", e.0); + } else if err.find::<warp::reject::MethodNotAllowed>().is_some() { + code = StatusCode::METHOD_NOT_ALLOWED; + message = "METHOD_NOT_ALLOWED".to_string(); + } else { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = "UNHANDLED_REJECTION".to_string(); + } + + let json = warp::reply::json(&ErrorMessage { + code: code.as_u16(), + message, + stacktraces: vec![], + }); + + Ok(warp::reply::with_status(json, code)) +} diff --git a/common/warp_utils/src/reply.rs b/common/warp_utils/src/reply.rs new file mode 100644 index 00000000000..dcec6214f0c --- /dev/null +++ b/common/warp_utils/src/reply.rs @@ -0,0 +1,15 @@ +/// Add CORS headers to `reply` only if `allow_origin.is_some()`. +pub fn maybe_cors<T: warp::Reply + 'static>( + reply: T, + allow_origin: Option<&String>, +) -> Box<dyn warp::Reply> { + if let Some(allow_origin) = allow_origin { + Box::new(warp::reply::with_header( + reply, + "Access-Control-Allow-Origin", + allow_origin, + )) + } else { + Box::new(reply) + } +} diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 99f998e5584..f6c43ae429a 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -4,7 +4,7 @@ use proto_array::{Block as ProtoBlock, ProtoArrayForkChoice}; use ssz_derive::{Decode, Encode}; use types::{ BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256, - IndexedAttestation, Slot, + IndexedAttestation, RelativeEpoch, ShufflingId, Slot, }; use crate::ForkChoiceStore; @@ -240,10 +240,18 @@ where /// Instantiates `Self` from the genesis parameters. pub fn from_genesis( fc_store: T, + genesis_block_root: Hash256, genesis_block: &BeaconBlock<E>, + genesis_state: &BeaconState<E>, ) -> Result<Self, Error<T::Error>> { let finalized_block_slot = genesis_block.slot; let finalized_block_state_root = genesis_block.state_root; + let current_epoch_shuffling_id = + ShufflingId::new(genesis_block_root, genesis_state, RelativeEpoch::Current) + .map_err(Error::BeaconStateError)?; + let next_epoch_shuffling_id = + ShufflingId::new(genesis_block_root, genesis_state, RelativeEpoch::Next) + .map_err(Error::BeaconStateError)?; let proto_array = ProtoArrayForkChoice::new( finalized_block_slot, @@ -251,6 +259,8 @@ where fc_store.justified_checkpoint().epoch, fc_store.finalized_checkpoint().epoch, fc_store.finalized_checkpoint().root, + current_epoch_shuffling_id, + next_epoch_shuffling_id, )?; Ok(Self { @@ -534,6 +544,10 @@ where root: block_root, parent_root: Some(block.parent_root), target_root, + current_epoch_shuffling_id: ShufflingId::new(block_root, state, RelativeEpoch::Current) + .map_err(Error::BeaconStateError)?, + next_epoch_shuffling_id: ShufflingId::new(block_root, state, RelativeEpoch::Next) + .map_err(Error::BeaconStateError)?, state_root: block.state_root, justified_epoch: state.current_justified_checkpoint.epoch, finalized_epoch: state.finalized_checkpoint.epoch, diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 78c7534cde9..7b508afd49c 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -6,3 +6,4 @@ pub use crate::fork_choice::{ SAFE_SLOTS_TO_UPDATE_JUSTIFIED, }; pub use fork_choice_store::ForkChoiceStore; +pub use proto_array::Block as ProtoBlock; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index ffa9cbe6bd4..86fbbd8ec9e 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -351,7 +351,7 @@ impl ForkChoiceTest { let mut verified_attestation = self .harness .chain - .verify_unaggregated_attestation_for_gossip(attestation, subnet_id) + .verify_unaggregated_attestation_for_gossip(attestation, Some(subnet_id)) .expect("precondition: should gossip verify attestation"); if let MutationDelay::Blocks(slots) = delay { diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 6e1bd970b00..9cac0bafb10 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -4,7 +4,7 @@ mod votes; use crate::proto_array_fork_choice::{Block, ProtoArrayForkChoice}; use serde_derive::{Deserialize, Serialize}; -use types::{Epoch, Hash256, Slot}; +use types::{Epoch, Hash256, ShufflingId, Slot}; pub use ffg_updates::*; pub use no_votes::*; @@ -55,12 +55,15 @@ pub struct ForkChoiceTestDefinition { impl ForkChoiceTestDefinition { pub fn run(self) { + let junk_shuffling_id = ShufflingId::from_components(Epoch::new(0), Hash256::zero()); let mut fork_choice = ProtoArrayForkChoice::new( self.finalized_block_slot, Hash256::zero(), self.justified_epoch, self.finalized_epoch, self.finalized_root, + junk_shuffling_id.clone(), + junk_shuffling_id, ) .expect("should create fork choice struct"); @@ -125,6 +128,14 @@ impl ForkChoiceTestDefinition { parent_root: Some(parent_root), state_root: Hash256::zero(), target_root: Hash256::zero(), + current_epoch_shuffling_id: ShufflingId::from_components( + Epoch::new(0), + Hash256::zero(), + ), + next_epoch_shuffling_id: ShufflingId::from_components( + Epoch::new(0), + Hash256::zero(), + ), justified_epoch, finalized_epoch, }; diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 18db8d340ce..c89a96628a7 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -2,7 +2,7 @@ use crate::{error::Error, Block}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; -use types::{Epoch, Hash256, Slot}; +use types::{Epoch, Hash256, ShufflingId, Slot}; #[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)] pub struct ProtoNode { @@ -18,6 +18,8 @@ pub struct ProtoNode { /// The `target_root` is not necessary for `ProtoArray` either, it also just exists for upstream /// components (namely fork choice attestation verification). pub target_root: Hash256, + pub current_epoch_shuffling_id: ShufflingId, + pub next_epoch_shuffling_id: ShufflingId, pub root: Hash256, pub parent: Option<usize>, pub justified_epoch: Epoch, @@ -142,6 +144,8 @@ impl ProtoArray { slot: block.slot, root: block.root, target_root: block.target_root, + current_epoch_shuffling_id: block.current_epoch_shuffling_id, + next_epoch_shuffling_id: block.next_epoch_shuffling_id, state_root: block.state_root, parent: block .parent_root diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 451f3999313..e4cf5bbc67b 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -4,7 +4,7 @@ use crate::ssz_container::SszContainer; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; -use types::{Epoch, Hash256, Slot}; +use types::{Epoch, Hash256, ShufflingId, Slot}; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; @@ -25,6 +25,8 @@ pub struct Block { pub parent_root: Option<Hash256>, pub state_root: Hash256, pub target_root: Hash256, + pub current_epoch_shuffling_id: ShufflingId, + pub next_epoch_shuffling_id: ShufflingId, pub justified_epoch: Epoch, pub finalized_epoch: Epoch, } @@ -70,6 +72,8 @@ impl ProtoArrayForkChoice { justified_epoch: Epoch, finalized_epoch: Epoch, finalized_root: Hash256, + current_epoch_shuffling_id: ShufflingId, + next_epoch_shuffling_id: ShufflingId, ) -> Result<Self, String> { let mut proto_array = ProtoArray { prune_threshold: DEFAULT_PRUNE_THRESHOLD, @@ -87,6 +91,8 @@ impl ProtoArrayForkChoice { // We are using the finalized_root as the target_root, since it always lies on an // epoch boundary. target_root: finalized_root, + current_epoch_shuffling_id, + next_epoch_shuffling_id, justified_epoch, finalized_epoch, }; @@ -194,6 +200,8 @@ impl ProtoArrayForkChoice { parent_root, state_root: block.state_root, target_root: block.target_root, + current_epoch_shuffling_id: block.current_epoch_shuffling_id.clone(), + next_epoch_shuffling_id: block.next_epoch_shuffling_id.clone(), justified_epoch: block.justified_epoch, finalized_epoch: block.finalized_epoch, }) @@ -341,6 +349,7 @@ mod test_compute_deltas { let finalized_desc = Hash256::from_low_u64_be(2); let not_finalized_desc = Hash256::from_low_u64_be(3); let unknown = Hash256::from_low_u64_be(4); + let junk_shuffling_id = ShufflingId::from_components(Epoch::new(0), Hash256::zero()); let mut fc = ProtoArrayForkChoice::new( genesis_slot, @@ -348,6 +357,8 @@ mod test_compute_deltas { genesis_epoch, genesis_epoch, finalized_root, + junk_shuffling_id.clone(), + junk_shuffling_id.clone(), ) .unwrap(); @@ -359,6 +370,8 @@ mod test_compute_deltas { parent_root: Some(finalized_root), state_root, target_root: finalized_root, + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id.clone(), justified_epoch: genesis_epoch, finalized_epoch: genesis_epoch, }) @@ -372,6 +385,8 @@ mod test_compute_deltas { parent_root: None, state_root, target_root: finalized_root, + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id.clone(), justified_epoch: genesis_epoch, finalized_epoch: genesis_epoch, }) diff --git a/consensus/serde_hex/Cargo.toml b/consensus/serde_hex/Cargo.toml deleted file mode 100644 index 2df5ff02a08..00000000000 --- a/consensus/serde_hex/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "serde_hex" -version = "0.2.0" -authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2018" - -[dependencies] -serde = "1.0.110" -hex = "0.4.2" diff --git a/consensus/serde_utils/Cargo.toml b/consensus/serde_utils/Cargo.toml index 1fb35736baf..8c0013562c1 100644 --- a/consensus/serde_utils/Cargo.toml +++ b/consensus/serde_utils/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] serde = { version = "1.0.110", features = ["derive"] } serde_derive = "1.0.110" +hex = "0.4.2" [dev-dependencies] serde_json = "1.0.52" diff --git a/consensus/serde_utils/src/bytes_4_hex.rs b/consensus/serde_utils/src/bytes_4_hex.rs new file mode 100644 index 00000000000..e057d1a1288 --- /dev/null +++ b/consensus/serde_utils/src/bytes_4_hex.rs @@ -0,0 +1,38 @@ +//! Formats `[u8; 4]` as a 0x-prefixed hex string. +//! +//! E.g., `[0, 1, 2, 3]` serializes as `"0x00010203"`. + +use crate::hex::PrefixedHexVisitor; +use serde::de::Error; +use serde::{Deserializer, Serializer}; + +const BYTES_LEN: usize = 4; + +pub fn serialize<S>(bytes: &[u8; BYTES_LEN], serializer: S) -> Result<S::Ok, S::Error> +where + S: Serializer, +{ + let mut hex_string: String = "0x".to_string(); + hex_string.push_str(&hex::encode(&bytes)); + + serializer.serialize_str(&hex_string) +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; BYTES_LEN], D::Error> +where + D: Deserializer<'de>, +{ + let decoded = deserializer.deserialize_str(PrefixedHexVisitor)?; + + if decoded.len() != BYTES_LEN { + return Err(D::Error::custom(format!( + "expected {} bytes for array, got {}", + BYTES_LEN, + decoded.len() + ))); + } + + let mut array = [0; BYTES_LEN]; + array.copy_from_slice(&decoded); + Ok(array) +} diff --git a/consensus/serde_hex/src/lib.rs b/consensus/serde_utils/src/hex.rs similarity index 81% rename from consensus/serde_hex/src/lib.rs rename to consensus/serde_utils/src/hex.rs index db84222757d..79dfaa506b8 100644 --- a/consensus/serde_hex/src/lib.rs +++ b/consensus/serde_utils/src/hex.rs @@ -1,6 +1,9 @@ +//! Provides utilities for parsing 0x-prefixed hex strings. + use serde::de::{self, Visitor}; use std::fmt; +/// Encode `data` as a 0x-prefixed hex string. pub fn encode<T: AsRef<[u8]>>(data: T) -> String { let hex = hex::encode(data); let mut s = "0x".to_string(); @@ -8,6 +11,15 @@ pub fn encode<T: AsRef<[u8]>>(data: T) -> String { s } +/// Decode `data` from a 0x-prefixed hex string. +pub fn decode(s: &str) -> Result<Vec<u8>, String> { + if s.starts_with("0x") { + hex::decode(&s[2..]).map_err(|e| format!("invalid hex: {:?}", e)) + } else { + Err("hex must have 0x prefix".to_string()) + } +} + pub struct PrefixedHexVisitor; impl<'de> Visitor<'de> for PrefixedHexVisitor { diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index df2b44b6243..0016e67a3db 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -1,2 +1,9 @@ -pub mod quoted_u64; +mod quoted_int; + +pub mod bytes_4_hex; +pub mod hex; pub mod quoted_u64_vec; +pub mod u32_hex; +pub mod u8_hex; + +pub use quoted_int::{quoted_u32, quoted_u64, quoted_u8}; diff --git a/consensus/serde_utils/src/quoted_int.rs b/consensus/serde_utils/src/quoted_int.rs new file mode 100644 index 00000000000..24edf1ebee2 --- /dev/null +++ b/consensus/serde_utils/src/quoted_int.rs @@ -0,0 +1,144 @@ +//! Formats some integer types using quotes. +//! +//! E.g., `1` serializes as `"1"`. +//! +//! Quotes can be optional during decoding. + +use serde::{Deserializer, Serializer}; +use serde_derive::{Deserialize, Serialize}; +use std::convert::TryFrom; +use std::marker::PhantomData; + +macro_rules! define_mod { + ($int: ty, $visit_fn: ident) => { + /// Serde support for deserializing quoted integers. + /// + /// Configurable so that quotes are either required or optional. + pub struct QuotedIntVisitor<T> { + require_quotes: bool, + _phantom: PhantomData<T>, + } + + impl<'a, T> serde::de::Visitor<'a> for QuotedIntVisitor<T> + where + T: From<$int> + Into<$int> + Copy + TryFrom<u64>, + { + type Value = T; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + if self.require_quotes { + write!(formatter, "a quoted integer") + } else { + write!(formatter, "a quoted or unquoted integer") + } + } + + fn visit_str<E>(self, s: &str) -> Result<Self::Value, E> + where + E: serde::de::Error, + { + s.parse::<$int>() + .map(T::from) + .map_err(serde::de::Error::custom) + } + + fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> + where + E: serde::de::Error, + { + if self.require_quotes { + Err(serde::de::Error::custom( + "received unquoted integer when quotes are required", + )) + } else { + T::try_from(v).map_err(|_| serde::de::Error::custom("invalid integer")) + } + } + } + + /// Wrapper type for requiring quotes on a `$int`-like type. + /// + /// Unlike using `serde(with = "quoted_$int::require_quotes")` this is composable, and can be nested + /// inside types like `Option`, `Result` and `Vec`. + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] + #[serde(transparent)] + pub struct Quoted<T> + where + T: From<$int> + Into<$int> + Copy + TryFrom<u64>, + { + #[serde(with = "require_quotes")] + pub value: T, + } + + /// Serialize with quotes. + pub fn serialize<S, T>(value: &T, serializer: S) -> Result<S::Ok, S::Error> + where + S: Serializer, + T: From<$int> + Into<$int> + Copy, + { + let v: $int = (*value).into(); + serializer.serialize_str(&format!("{}", v)) + } + + /// Deserialize with or without quotes. + pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error> + where + D: Deserializer<'de>, + T: From<$int> + Into<$int> + Copy + TryFrom<u64>, + { + deserializer.deserialize_any(QuotedIntVisitor { + require_quotes: false, + _phantom: PhantomData, + }) + } + + /// Requires quotes when deserializing. + /// + /// Usage: `#[serde(with = "quoted_u64::require_quotes")]`. + pub mod require_quotes { + pub use super::serialize; + use super::*; + + pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error> + where + D: Deserializer<'de>, + T: From<$int> + Into<$int> + Copy + TryFrom<u64>, + { + deserializer.deserialize_any(QuotedIntVisitor { + require_quotes: true, + _phantom: PhantomData, + }) + } + } + + #[cfg(test)] + mod test { + use super::*; + + #[test] + fn require_quotes() { + let x = serde_json::from_str::<Quoted<$int>>("\"8\"").unwrap(); + assert_eq!(x.value, 8); + serde_json::from_str::<Quoted<$int>>("8").unwrap_err(); + } + } + }; +} + +pub mod quoted_u8 { + use super::*; + + define_mod!(u8, visit_u8); +} + +pub mod quoted_u32 { + use super::*; + + define_mod!(u32, visit_u32); +} + +pub mod quoted_u64 { + use super::*; + + define_mod!(u64, visit_u64); +} diff --git a/consensus/serde_utils/src/quoted_u64.rs b/consensus/serde_utils/src/quoted_u64.rs deleted file mode 100644 index 2e73a104f19..00000000000 --- a/consensus/serde_utils/src/quoted_u64.rs +++ /dev/null @@ -1,115 +0,0 @@ -use serde::{Deserializer, Serializer}; -use serde_derive::{Deserialize, Serialize}; -use std::marker::PhantomData; - -/// Serde support for deserializing quoted integers. -/// -/// Configurable so that quotes are either required or optional. -pub struct QuotedIntVisitor<T> { - require_quotes: bool, - _phantom: PhantomData<T>, -} - -impl<'a, T> serde::de::Visitor<'a> for QuotedIntVisitor<T> -where - T: From<u64> + Into<u64> + Copy, -{ - type Value = T; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - if self.require_quotes { - write!(formatter, "a quoted integer") - } else { - write!(formatter, "a quoted or unquoted integer") - } - } - - fn visit_str<E>(self, s: &str) -> Result<Self::Value, E> - where - E: serde::de::Error, - { - s.parse::<u64>() - .map(T::from) - .map_err(serde::de::Error::custom) - } - - fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> - where - E: serde::de::Error, - { - if self.require_quotes { - Err(serde::de::Error::custom( - "received unquoted integer when quotes are required", - )) - } else { - Ok(T::from(v)) - } - } -} - -/// Wrapper type for requiring quotes on a `u64`-like type. -/// -/// Unlike using `serde(with = "quoted_u64::require_quotes")` this is composable, and can be nested -/// inside types like `Option`, `Result` and `Vec`. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] -#[serde(transparent)] -pub struct Quoted<T> -where - T: From<u64> + Into<u64> + Copy, -{ - #[serde(with = "require_quotes")] - pub value: T, -} - -/// Serialize with quotes. -pub fn serialize<S, T>(value: &T, serializer: S) -> Result<S::Ok, S::Error> -where - S: Serializer, - T: From<u64> + Into<u64> + Copy, -{ - let v: u64 = (*value).into(); - serializer.serialize_str(&format!("{}", v)) -} - -/// Deserialize with or without quotes. -pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error> -where - D: Deserializer<'de>, - T: From<u64> + Into<u64> + Copy, -{ - deserializer.deserialize_any(QuotedIntVisitor { - require_quotes: false, - _phantom: PhantomData, - }) -} - -/// Requires quotes when deserializing. -/// -/// Usage: `#[serde(with = "quoted_u64::require_quotes")]`. -pub mod require_quotes { - pub use super::serialize; - use super::*; - - pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error> - where - D: Deserializer<'de>, - T: From<u64> + Into<u64> + Copy, - { - deserializer.deserialize_any(QuotedIntVisitor { - require_quotes: true, - _phantom: PhantomData, - }) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn require_quotes() { - let x = serde_json::from_str::<Quoted<u64>>("\"8\"").unwrap(); - assert_eq!(x.value, 8); - serde_json::from_str::<Quoted<u64>>("8").unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/quoted_u64_vec.rs b/consensus/serde_utils/src/quoted_u64_vec.rs index c5badee5012..f124c989092 100644 --- a/consensus/serde_utils/src/quoted_u64_vec.rs +++ b/consensus/serde_utils/src/quoted_u64_vec.rs @@ -1,3 +1,9 @@ +//! Formats `Vec<u64>` using quotes. +//! +//! E.g., `vec![0, 1, 2]` serializes as `["0", "1", "2"]`. +//! +//! Quotes can be optional during decoding. + use serde::ser::SerializeSeq; use serde::{Deserializer, Serializer}; use serde_derive::{Deserialize, Serialize}; @@ -6,7 +12,7 @@ use serde_derive::{Deserialize, Serialize}; #[serde(transparent)] pub struct QuotedIntWrapper { #[serde(with = "crate::quoted_u64")] - int: u64, + pub int: u64, } pub struct QuotedIntVecVisitor; diff --git a/consensus/serde_utils/src/u32_hex.rs b/consensus/serde_utils/src/u32_hex.rs new file mode 100644 index 00000000000..c1ab3537b2a --- /dev/null +++ b/consensus/serde_utils/src/u32_hex.rs @@ -0,0 +1,21 @@ +//! Formats `u32` as a 0x-prefixed, little-endian hex string. +//! +//! E.g., `0` serializes as `"0x00000000"`. + +use crate::bytes_4_hex; +use serde::{Deserializer, Serializer}; + +pub fn serialize<S>(num: &u32, serializer: S) -> Result<S::Ok, S::Error> +where + S: Serializer, +{ + let hex = format!("0x{}", hex::encode(num.to_le_bytes())); + serializer.serialize_str(&hex) +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result<u32, D::Error> +where + D: Deserializer<'de>, +{ + bytes_4_hex::deserialize(deserializer).map(u32::from_le_bytes) +} diff --git a/consensus/serde_utils/src/u8_hex.rs b/consensus/serde_utils/src/u8_hex.rs new file mode 100644 index 00000000000..8083e1d120b --- /dev/null +++ b/consensus/serde_utils/src/u8_hex.rs @@ -0,0 +1,29 @@ +//! Formats `u8` as a 0x-prefixed hex string. +//! +//! E.g., `0` serializes as `"0x00"`. + +use crate::hex::PrefixedHexVisitor; +use serde::de::Error; +use serde::{Deserializer, Serializer}; + +pub fn serialize<S>(byte: &u8, serializer: S) -> Result<S::Ok, S::Error> +where + S: Serializer, +{ + let hex = format!("0x{}", hex::encode([*byte])); + serializer.serialize_str(&hex) +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result<u8, D::Error> +where + D: Deserializer<'de>, +{ + let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; + if bytes.len() != 1 { + return Err(D::Error::custom(format!( + "expected 1 byte for u8, got {}", + bytes.len() + ))); + } + Ok(bytes[0]) +} diff --git a/consensus/ssz_types/Cargo.toml b/consensus/ssz_types/Cargo.toml index 144b3ce31fd..ca6a5adbe81 100644 --- a/consensus/ssz_types/Cargo.toml +++ b/consensus/ssz_types/Cargo.toml @@ -11,7 +11,7 @@ name = "ssz_types" tree_hash = "0.1.0" serde = "1.0.110" serde_derive = "1.0.110" -serde_hex = { path = "../serde_hex" } +serde_utils = { path = "../serde_utils" } eth2_ssz = "0.1.2" typenum = "1.12.0" arbitrary = { version = "0.4.4", features = ["derive"], optional = true } diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs index 1b6dce3ec0f..09fa9fc2df4 100644 --- a/consensus/ssz_types/src/bitfield.rs +++ b/consensus/ssz_types/src/bitfield.rs @@ -3,7 +3,7 @@ use crate::Error; use core::marker::PhantomData; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::{encode as hex_encode, PrefixedHexVisitor}; use ssz::{Decode, Encode}; use tree_hash::Hash256; use typenum::Unsigned; diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 80b4007b973..c3a5cd90d5d 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -39,6 +39,8 @@ tempfile = "3.1.0" derivative = "2.1.1" rusqlite = { version = "0.23.1", features = ["bundled"], optional = true } arbitrary = { version = "0.4.4", features = ["derive"], optional = true } +serde_utils = { path = "../serde_utils" } +regex = "1.3.9" [dev-dependencies] serde_json = "1.0.52" diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 737c891c9fb..52871226107 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -16,6 +16,7 @@ use tree_hash_derive::TreeHash; #[serde(bound = "T: EthSpec")] pub struct AggregateAndProof<T: EthSpec> { /// The index of the validator that created the attestation. + #[serde(with = "serde_utils::quoted_u64")] pub aggregator_index: u64, /// The aggregate attestation. pub aggregate: Attestation<T>, diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index 67fb280025c..07fa529e0ff 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -26,6 +26,7 @@ use tree_hash_derive::TreeHash; )] pub struct AttestationData { pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, // LMD GHOST vote diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation_duty.rs index c32e4683e18..613d7fd1c88 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation_duty.rs @@ -12,4 +12,7 @@ pub struct AttestationDuty { pub committee_position: usize, /// The total number of attesters in the committee. pub committee_len: usize, + /// The committee count at `attestation_slot`. + #[serde(with = "serde_utils::quoted_u64")] + pub committees_at_slot: u64, } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index eeb10458bfa..d3a9160709c 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -16,6 +16,7 @@ use tree_hash_derive::TreeHash; #[serde(bound = "T: EthSpec")] pub struct BeaconBlock<T: EthSpec> { pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, pub parent_root: Hash256, pub state_root: Hash256, diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 489c5bc9d77..ef28307edcc 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -1,5 +1,4 @@ use crate::test_utils::TestRandom; -use crate::utils::{graffiti_from_hex_str, graffiti_to_hex_str, Graffiti}; use crate::*; use serde_derive::{Deserialize, Serialize}; @@ -17,10 +16,6 @@ use tree_hash_derive::TreeHash; pub struct BeaconBlockBody<T: EthSpec> { pub randao_reveal: Signature, pub eth1_data: Eth1Data, - #[serde( - serialize_with = "graffiti_to_hex_str", - deserialize_with = "graffiti_from_hex_str" - )] pub graffiti: Graffiti, pub proposer_slashings: VariableList<ProposerSlashing, T::MaxProposerSlashings>, pub attester_slashings: VariableList<AttesterSlashing<T>, T::MaxAttesterSlashings>, diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index 04a20e56d3f..708c0e16fe7 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -14,6 +14,7 @@ use tree_hash_derive::TreeHash; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct BeaconBlockHeader { pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, pub parent_root: Hash256, pub state_root: Hash256, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index a2d923da9d3..25cb85ce8cc 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -157,6 +157,7 @@ where T: EthSpec, { // Versioning + #[serde(with = "serde_utils::quoted_u64")] pub genesis_time: u64, pub genesis_validators_root: Hash256, pub slot: Slot, @@ -173,6 +174,7 @@ where // Ethereum 1.0 chain data pub eth1_data: Eth1Data, pub eth1_data_votes: VariableList<Eth1Data, T::SlotsPerEth1VotingPeriod>, + #[serde(with = "serde_utils::quoted_u64")] pub eth1_deposit_index: u64, // Registry @@ -913,6 +915,13 @@ impl<T: EthSpec> BeaconState<T> { self.exit_cache = ExitCache::default(); } + /// Returns `true` if the committee cache for `relative_epoch` is built and ready to use. + pub fn committee_cache_is_initialized(&self, relative_epoch: RelativeEpoch) -> bool { + let i = Self::committee_cache_index(relative_epoch); + + self.committee_caches[i].is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) + } + /// Build an epoch cache, unless it is has already been built. pub fn build_committee_cache( &mut self, diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 6ee24cd2bb2..728c9cf026d 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -186,6 +186,7 @@ impl CommitteeCache { index, committee_position, committee_len, + committees_at_slot: self.committees_per_slot(), }) }) } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index c621acb81b8..7327895eeef 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -4,10 +4,6 @@ use serde_derive::{Deserialize, Serialize}; use std::fs::File; use std::path::Path; use tree_hash::TreeHash; -use utils::{ - fork_from_hex_str, fork_to_hex_str, u32_from_hex_str, u32_to_hex_str, u8_from_hex_str, - u8_to_hex_str, -}; /// Each of the BLS signature domains. /// @@ -65,12 +61,9 @@ pub struct ChainSpec { /* * Initial Values */ - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub genesis_fork_version: [u8; 4], - #[serde(deserialize_with = "u8_from_hex_str", serialize_with = "u8_to_hex_str")] + #[serde(with = "serde_utils::u8_hex")] pub bls_withdrawal_prefix_byte: u8, /* @@ -115,6 +108,7 @@ pub struct ChainSpec { */ pub eth1_follow_distance: u64, pub seconds_per_eth1_block: u64, + pub deposit_contract_address: Address, /* * Networking @@ -326,6 +320,9 @@ impl ChainSpec { */ eth1_follow_distance: 1_024, seconds_per_eth1_block: 14, + deposit_contract_address: "1234567890123456789012345678901234567890" + .parse() + .expect("chain spec deposit contract address"), /* * Network specific @@ -448,104 +445,127 @@ pub struct YamlConfig { #[serde(default)] config_name: String, // ChainSpec - max_committees_per_slot: usize, - target_committee_size: usize, + #[serde(with = "serde_utils::quoted_u64")] + max_committees_per_slot: u64, + #[serde(with = "serde_utils::quoted_u64")] + target_committee_size: u64, + #[serde(with = "serde_utils::quoted_u64")] min_per_epoch_churn_limit: u64, + #[serde(with = "serde_utils::quoted_u64")] churn_limit_quotient: u64, + #[serde(with = "serde_utils::quoted_u8")] shuffle_round_count: u8, + #[serde(with = "serde_utils::quoted_u64")] min_genesis_active_validator_count: u64, + #[serde(with = "serde_utils::quoted_u64")] min_genesis_time: u64, + #[serde(with = "serde_utils::quoted_u64")] genesis_delay: u64, + #[serde(with = "serde_utils::quoted_u64")] min_deposit_amount: u64, + #[serde(with = "serde_utils::quoted_u64")] max_effective_balance: u64, + #[serde(with = "serde_utils::quoted_u64")] ejection_balance: u64, + #[serde(with = "serde_utils::quoted_u64")] effective_balance_increment: u64, + #[serde(with = "serde_utils::quoted_u64")] hysteresis_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] hysteresis_downward_multiplier: u64, + #[serde(with = "serde_utils::quoted_u64")] hysteresis_upward_multiplier: u64, // Proportional slashing multiplier defaults to 3 for compatibility with Altona and Medalla. #[serde(default = "default_proportional_slashing_multiplier")] + #[serde(with = "serde_utils::quoted_u64")] proportional_slashing_multiplier: u64, - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] genesis_fork_version: [u8; 4], - #[serde(deserialize_with = "u8_from_hex_str", serialize_with = "u8_to_hex_str")] + #[serde(with = "serde_utils::u8_hex")] bls_withdrawal_prefix: u8, + #[serde(with = "serde_utils::quoted_u64")] seconds_per_slot: u64, + #[serde(with = "serde_utils::quoted_u64")] min_attestation_inclusion_delay: u64, + #[serde(with = "serde_utils::quoted_u64")] min_seed_lookahead: u64, + #[serde(with = "serde_utils::quoted_u64")] max_seed_lookahead: u64, + #[serde(with = "serde_utils::quoted_u64")] min_epochs_to_inactivity_penalty: u64, + #[serde(with = "serde_utils::quoted_u64")] min_validator_withdrawability_delay: u64, + #[serde(with = "serde_utils::quoted_u64")] shard_committee_period: u64, + #[serde(with = "serde_utils::quoted_u64")] base_reward_factor: u64, + #[serde(with = "serde_utils::quoted_u64")] whistleblower_reward_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] proposer_reward_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] inactivity_penalty_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] min_slashing_penalty_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] safe_slots_to_update_justified: u64, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_beacon_proposer: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_beacon_attester: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_randao: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_deposit: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_voluntary_exit: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_selection_proof: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_aggregate_and_proof: u32, // EthSpec + #[serde(with = "serde_utils::quoted_u32")] max_validators_per_committee: u32, + #[serde(with = "serde_utils::quoted_u64")] slots_per_epoch: u64, + #[serde(with = "serde_utils::quoted_u64")] epochs_per_eth1_voting_period: u64, - slots_per_historical_root: usize, - epochs_per_historical_vector: usize, - epochs_per_slashings_vector: usize, + #[serde(with = "serde_utils::quoted_u64")] + slots_per_historical_root: u64, + #[serde(with = "serde_utils::quoted_u64")] + epochs_per_historical_vector: u64, + #[serde(with = "serde_utils::quoted_u64")] + epochs_per_slashings_vector: u64, + #[serde(with = "serde_utils::quoted_u64")] historical_roots_limit: u64, + #[serde(with = "serde_utils::quoted_u64")] validator_registry_limit: u64, + #[serde(with = "serde_utils::quoted_u32")] max_proposer_slashings: u32, + #[serde(with = "serde_utils::quoted_u32")] max_attester_slashings: u32, + #[serde(with = "serde_utils::quoted_u32")] max_attestations: u32, + #[serde(with = "serde_utils::quoted_u32")] max_deposits: u32, + #[serde(with = "serde_utils::quoted_u32")] max_voluntary_exits: u32, // Validator + #[serde(with = "serde_utils::quoted_u64")] eth1_follow_distance: u64, + #[serde(with = "serde_utils::quoted_u64")] target_aggregators_per_committee: u64, + #[serde(with = "serde_utils::quoted_u64")] random_subnets_per_validator: u64, + #[serde(with = "serde_utils::quoted_u64")] epochs_per_random_subnet_subscription: u64, + #[serde(with = "serde_utils::quoted_u64")] seconds_per_eth1_block: u64, + deposit_contract_address: Address, /* TODO: incorporate these into ChainSpec and turn on `serde(deny_unknown_fields)` deposit_chain_id: u64, deposit_network_id: u64, - deposit_contract_address: String, */ } @@ -568,8 +588,8 @@ impl YamlConfig { Self { config_name: T::spec_name().to_string(), // ChainSpec - max_committees_per_slot: spec.max_committees_per_slot, - target_committee_size: spec.target_committee_size, + max_committees_per_slot: spec.max_committees_per_slot as u64, + target_committee_size: spec.target_committee_size as u64, min_per_epoch_churn_limit: spec.min_per_epoch_churn_limit, churn_limit_quotient: spec.churn_limit_quotient, shuffle_round_count: spec.shuffle_round_count, @@ -611,9 +631,9 @@ impl YamlConfig { max_validators_per_committee: T::MaxValidatorsPerCommittee::to_u32(), slots_per_epoch: T::slots_per_epoch(), epochs_per_eth1_voting_period: T::EpochsPerEth1VotingPeriod::to_u64(), - slots_per_historical_root: T::slots_per_historical_root(), - epochs_per_historical_vector: T::epochs_per_historical_vector(), - epochs_per_slashings_vector: T::EpochsPerSlashingsVector::to_usize(), + slots_per_historical_root: T::slots_per_historical_root() as u64, + epochs_per_historical_vector: T::epochs_per_historical_vector() as u64, + epochs_per_slashings_vector: T::EpochsPerSlashingsVector::to_u64(), historical_roots_limit: T::HistoricalRootsLimit::to_u64(), validator_registry_limit: T::ValidatorRegistryLimit::to_u64(), max_proposer_slashings: T::MaxProposerSlashings::to_u32(), @@ -628,6 +648,7 @@ impl YamlConfig { random_subnets_per_validator: spec.random_subnets_per_validator, epochs_per_random_subnet_subscription: spec.epochs_per_random_subnet_subscription, seconds_per_eth1_block: spec.seconds_per_eth1_block, + deposit_contract_address: spec.deposit_contract_address, } } @@ -643,9 +664,9 @@ impl YamlConfig { if self.max_validators_per_committee != T::MaxValidatorsPerCommittee::to_u32() || self.slots_per_epoch != T::slots_per_epoch() || self.epochs_per_eth1_voting_period != T::EpochsPerEth1VotingPeriod::to_u64() - || self.slots_per_historical_root != T::slots_per_historical_root() - || self.epochs_per_historical_vector != T::epochs_per_historical_vector() - || self.epochs_per_slashings_vector != T::EpochsPerSlashingsVector::to_usize() + || self.slots_per_historical_root != T::slots_per_historical_root() as u64 + || self.epochs_per_historical_vector != T::epochs_per_historical_vector() as u64 + || self.epochs_per_slashings_vector != T::EpochsPerSlashingsVector::to_u64() || self.historical_roots_limit != T::HistoricalRootsLimit::to_u64() || self.validator_registry_limit != T::ValidatorRegistryLimit::to_u64() || self.max_proposer_slashings != T::MaxProposerSlashings::to_u32() @@ -662,8 +683,8 @@ impl YamlConfig { /* * Misc */ - max_committees_per_slot: self.max_committees_per_slot, - target_committee_size: self.target_committee_size, + max_committees_per_slot: self.max_committees_per_slot as usize, + target_committee_size: self.target_committee_size as usize, min_per_epoch_churn_limit: self.min_per_epoch_churn_limit, churn_limit_quotient: self.churn_limit_quotient, shuffle_round_count: self.shuffle_round_count, @@ -685,6 +706,7 @@ impl YamlConfig { random_subnets_per_validator: self.random_subnets_per_validator, epochs_per_random_subnet_subscription: self.epochs_per_random_subnet_subscription, seconds_per_eth1_block: self.seconds_per_eth1_block, + deposit_contract_address: self.deposit_contract_address, /* * Gwei values */ diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index ce72c362e21..8e2050a0b83 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -15,6 +15,7 @@ use tree_hash_derive::TreeHash; pub struct DepositData { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, pub signature: SignatureBytes, } diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit_message.rs index fe283a17f93..92f6b66bf7f 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit_message.rs @@ -15,6 +15,7 @@ use tree_hash_derive::TreeHash; pub struct DepositMessage { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, } diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index e10744368f6..008b7933fff 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -1,5 +1,4 @@ use crate::test_utils::TestRandom; -use crate::utils::{fork_from_hex_str, fork_to_hex_str}; use crate::Epoch; use serde_derive::{Deserialize, Serialize}; @@ -16,15 +15,9 @@ use tree_hash_derive::TreeHash; Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] pub struct EnrForkId { - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub fork_digest: [u8; 4], - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub next_fork_version: [u8; 4], pub next_fork_epoch: Epoch, } diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index dcc1ea09819..e3b74cc491c 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -26,6 +26,7 @@ use tree_hash_derive::TreeHash; )] pub struct Eth1Data { pub deposit_root: Hash256, + #[serde(with = "serde_utils::quoted_u64")] pub deposit_count: u64, pub block_hash: Hash256, } diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork.rs index 8e95710c4ad..b129271ba0f 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork.rs @@ -1,5 +1,4 @@ use crate::test_utils::TestRandom; -use crate::utils::{fork_from_hex_str, fork_to_hex_str}; use crate::Epoch; use serde_derive::{Deserialize, Serialize}; @@ -25,15 +24,9 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct Fork { - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub previous_version: [u8; 4], - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub current_version: [u8; 4], pub epoch: Epoch, } diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork_data.rs index bad6f6219e2..092102f779e 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork_data.rs @@ -1,5 +1,4 @@ use crate::test_utils::TestRandom; -use crate::utils::{fork_from_hex_str, fork_to_hex_str}; use crate::{Hash256, SignedRoot}; use serde_derive::{Deserialize, Serialize}; @@ -15,10 +14,7 @@ use tree_hash_derive::TreeHash; Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] pub struct ForkData { - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub current_version: [u8; 4], pub genesis_validators_root: Hash256, } diff --git a/consensus/types/src/free_attestation.rs b/consensus/types/src/free_attestation.rs index 6215fb0cd7d..79bc149e43e 100644 --- a/consensus/types/src/free_attestation.rs +++ b/consensus/types/src/free_attestation.rs @@ -9,5 +9,6 @@ use serde_derive::Serialize; pub struct FreeAttestation { pub data: AttestationData, pub signature: Signature, + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, } diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs new file mode 100644 index 00000000000..f35df93838b --- /dev/null +++ b/consensus/types/src/graffiti.rs @@ -0,0 +1,132 @@ +use crate::{ + test_utils::{RngCore, TestRandom}, + Hash256, +}; +use regex::bytes::Regex; +use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; +use ssz::{Decode, DecodeError, Encode}; +use std::fmt; +use tree_hash::TreeHash; + +pub const GRAFFITI_BYTES_LEN: usize = 32; + +/// The 32-byte `graffiti` field on a beacon block. +#[derive(Default, Debug, PartialEq, Clone, Copy, Serialize, Deserialize)] +#[serde(transparent)] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +pub struct Graffiti(#[serde(with = "serde_graffiti")] pub [u8; GRAFFITI_BYTES_LEN]); + +impl Graffiti { + pub fn as_utf8_lossy(&self) -> String { + #[allow(clippy::invalid_regex)] + let re = Regex::new("\\p{C}").expect("graffiti regex is valid"); + String::from_utf8_lossy(&re.replace_all(&self.0[..], &b""[..])).to_string() + } +} + +impl fmt::Display for Graffiti { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", serde_utils::hex::encode(&self.0)) + } +} + +impl From<[u8; GRAFFITI_BYTES_LEN]> for Graffiti { + fn from(bytes: [u8; GRAFFITI_BYTES_LEN]) -> Self { + Self(bytes) + } +} + +impl Into<[u8; GRAFFITI_BYTES_LEN]> for Graffiti { + fn into(self) -> [u8; GRAFFITI_BYTES_LEN] { + self.0 + } +} + +pub mod serde_graffiti { + use super::*; + + pub fn serialize<S>(bytes: &[u8; GRAFFITI_BYTES_LEN], serializer: S) -> Result<S::Ok, S::Error> + where + S: Serializer, + { + serializer.serialize_str(&serde_utils::hex::encode(bytes)) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; GRAFFITI_BYTES_LEN], D::Error> + where + D: Deserializer<'de>, + { + let s: String = Deserialize::deserialize(deserializer)?; + + let bytes = serde_utils::hex::decode(&s).map_err(D::Error::custom)?; + + if bytes.len() != GRAFFITI_BYTES_LEN { + return Err(D::Error::custom(format!( + "incorrect byte length {}, expected {}", + bytes.len(), + GRAFFITI_BYTES_LEN + ))); + } + + let mut array = [0; GRAFFITI_BYTES_LEN]; + array[..].copy_from_slice(&bytes); + + Ok(array) + } +} + +impl Encode for Graffiti { + fn is_ssz_fixed_len() -> bool { + <[u8; GRAFFITI_BYTES_LEN] as Encode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <[u8; GRAFFITI_BYTES_LEN] as Encode>::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec<u8>) { + self.0.ssz_append(buf) + } +} + +impl Decode for Graffiti { + fn is_ssz_fixed_len() -> bool { + <[u8; GRAFFITI_BYTES_LEN] as Decode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <[u8; GRAFFITI_BYTES_LEN] as Decode>::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, DecodeError> { + <[u8; GRAFFITI_BYTES_LEN]>::from_ssz_bytes(bytes).map(Self) + } +} + +impl TreeHash for Graffiti { + fn tree_hash_type() -> tree_hash::TreeHashType { + <[u8; GRAFFITI_BYTES_LEN]>::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> Vec<u8> { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + <[u8; GRAFFITI_BYTES_LEN]>::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl TestRandom for Graffiti { + fn random_for_test(rng: &mut impl RngCore) -> Self { + Self::from(Hash256::random_for_test(rng).to_fixed_bytes()) + } +} diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index 341db180750..eaae75de839 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -18,6 +18,7 @@ use tree_hash_derive::TreeHash; #[serde(bound = "T: EthSpec")] pub struct IndexedAttestation<T: EthSpec> { /// Lists validator registry indices, not committee indices. + #[serde(with = "quoted_variable_list_u64")] pub attesting_indices: VariableList<u64, T::MaxValidatorsPerCommittee>, pub data: AttestationData, pub signature: AggregateSignature, @@ -53,6 +54,43 @@ impl<T: EthSpec> Hash for IndexedAttestation<T> { } } +/// Serialize a variable list of `u64` such that each int is quoted. Deserialize a variable +/// list supporting both quoted and un-quoted ints. +/// +/// E.g.,`["0", "1", "2"]` +mod quoted_variable_list_u64 { + use super::*; + use crate::Unsigned; + use serde::ser::SerializeSeq; + use serde::{Deserializer, Serializer}; + use serde_utils::quoted_u64_vec::{QuotedIntVecVisitor, QuotedIntWrapper}; + + pub fn serialize<S, T>(value: &VariableList<u64, T>, serializer: S) -> Result<S::Ok, S::Error> + where + S: Serializer, + T: Unsigned, + { + let mut seq = serializer.serialize_seq(Some(value.len()))?; + for &int in value.iter() { + seq.serialize_element(&QuotedIntWrapper { int })?; + } + seq.end() + } + + pub fn deserialize<'de, D, T>(deserializer: D) -> Result<VariableList<u64, T>, D::Error> + where + D: Deserializer<'de>, + T: Unsigned, + { + deserializer + .deserialize_any(QuotedIntVecVisitor) + .and_then(|vec| { + VariableList::new(vec) + .map_err(|e| serde::de::Error::custom(format!("invalid length: {:?}", e))) + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 19697118a50..65c1290d7a7 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -29,19 +29,21 @@ pub mod eth_spec; pub mod fork; pub mod fork_data; pub mod free_attestation; +pub mod graffiti; pub mod historical_batch; pub mod indexed_attestation; pub mod pending_attestation; pub mod proposer_slashing; pub mod relative_epoch; pub mod selection_proof; +pub mod shuffling_id; pub mod signed_aggregate_and_proof; pub mod signed_beacon_block; pub mod signed_beacon_block_header; pub mod signed_voluntary_exit; pub mod signing_data; -pub mod utils; pub mod validator; +pub mod validator_subscription; pub mod voluntary_exit; #[macro_use] pub mod slot_epoch_macros; @@ -74,12 +76,14 @@ pub use crate::eth1_data::Eth1Data; pub use crate::fork::Fork; pub use crate::fork_data::ForkData; pub use crate::free_attestation::FreeAttestation; +pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; pub use crate::pending_attestation::PendingAttestation; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; pub use crate::selection_proof::SelectionProof; +pub use crate::shuffling_id::ShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{SignedBeaconBlock, SignedBeaconBlockHash}; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; @@ -88,6 +92,7 @@ pub use crate::signing_data::{SignedRoot, SigningData}; pub use crate::slot_epoch::{Epoch, Slot}; pub use crate::subnet_id::SubnetId; pub use crate::validator::Validator; +pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; pub type CommitteeIndex = u64; @@ -99,4 +104,3 @@ pub use bls::{ AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, Signature, SignatureBytes, }; pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; -pub use utils::{Graffiti, GRAFFITI_BYTES_LEN}; diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/pending_attestation.rs index 70ebb1bbd68..f4b0fd9b148 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/pending_attestation.rs @@ -13,7 +13,9 @@ use tree_hash_derive::TreeHash; pub struct PendingAttestation<T: EthSpec> { pub aggregation_bits: BitList<T::MaxValidatorsPerCommittee>, pub data: AttestationData, + #[serde(with = "serde_utils::quoted_u64")] pub inclusion_delay: u64, + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, } diff --git a/consensus/types/src/shuffling_id.rs b/consensus/types/src/shuffling_id.rs new file mode 100644 index 00000000000..d54b5fa640f --- /dev/null +++ b/consensus/types/src/shuffling_id.rs @@ -0,0 +1,61 @@ +use crate::*; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use std::hash::Hash; + +/// Can be used to key (ID) the shuffling in some chain, in some epoch. +/// +/// ## Reasoning +/// +/// We say that the ID of some shuffling is always equal to a 2-tuple: +/// +/// - The epoch for which the shuffling should be effective. +/// - A block root, where this is the root at the *last* slot of the penultimate epoch. I.e., the +/// final block which contributed a randao reveal to the seed for the shuffling. +/// +/// The struct stores exactly that 2-tuple. +#[derive(Debug, PartialEq, Eq, Clone, Hash, Serialize, Deserialize, Encode, Decode)] +pub struct ShufflingId { + pub shuffling_epoch: Epoch, + shuffling_decision_block: Hash256, +} + +impl ShufflingId { + /// Using the given `state`, return the shuffling id for the shuffling at the given + /// `relative_epoch`. + /// + /// The `block_root` provided should be either: + /// + /// - The root of the block which produced this state. + /// - If the state is from a skip slot, the root of the latest block in that state. + pub fn new<E: EthSpec>( + block_root: Hash256, + state: &BeaconState<E>, + relative_epoch: RelativeEpoch, + ) -> Result<Self, BeaconStateError> { + let shuffling_epoch = relative_epoch.into_epoch(state.current_epoch()); + + let shuffling_decision_slot = shuffling_epoch + .saturating_sub(1_u64) + .start_slot(E::slots_per_epoch()) + .saturating_sub(1_u64); + + let shuffling_decision_block = if state.slot == shuffling_decision_slot { + block_root + } else { + *state.get_block_root(shuffling_decision_slot)? + }; + + Ok(Self { + shuffling_epoch, + shuffling_decision_block, + }) + } + + pub fn from_components(shuffling_epoch: Epoch, shuffling_decision_block: Hash256) -> Self { + Self { + shuffling_epoch, + shuffling_decision_block, + } + } +} diff --git a/consensus/types/src/slot_epoch_macros.rs b/consensus/types/src/slot_epoch_macros.rs index 26b80692c94..caf31417d66 100644 --- a/consensus/types/src/slot_epoch_macros.rs +++ b/consensus/types/src/slot_epoch_macros.rs @@ -313,6 +313,18 @@ macro_rules! impl_ssz { }; } +macro_rules! impl_from_str { + ($type: ident) => { + impl std::str::FromStr for $type { + type Err = std::num::ParseIntError; + + fn from_str(s: &str) -> Result<$type, Self::Err> { + u64::from_str(s).map($type) + } + } + }; +} + macro_rules! impl_common { ($type: ident) => { impl_from_into_u64!($type); @@ -328,6 +340,7 @@ macro_rules! impl_common { impl_display!($type); impl_debug!($type); impl_ssz!($type); + impl_from_str!($type); }; } diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 80cc249776f..667e2c9b78e 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -6,7 +6,8 @@ use std::ops::{Deref, DerefMut}; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct SubnetId(u64); +#[serde(transparent)] +pub struct SubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); impl SubnetId { pub fn new(id: u64) -> Self { diff --git a/consensus/types/src/utils.rs b/consensus/types/src/utils.rs deleted file mode 100644 index a527fc18fd1..00000000000 --- a/consensus/types/src/utils.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod serde_utils; - -pub use self::serde_utils::*; diff --git a/consensus/types/src/utils/serde_utils.rs b/consensus/types/src/utils/serde_utils.rs deleted file mode 100644 index 36b719646bf..00000000000 --- a/consensus/types/src/utils/serde_utils.rs +++ /dev/null @@ -1,134 +0,0 @@ -use serde::de::Error; -use serde::{Deserialize, Deserializer, Serializer}; - -pub const FORK_BYTES_LEN: usize = 4; -pub const GRAFFITI_BYTES_LEN: usize = 32; - -/// Type for a slice of `GRAFFITI_BYTES_LEN` bytes. -/// -/// Gets included inside each `BeaconBlockBody`. -pub type Graffiti = [u8; GRAFFITI_BYTES_LEN]; - -pub fn u8_from_hex_str<'de, D>(deserializer: D) -> Result<u8, D::Error> -where - D: Deserializer<'de>, -{ - let s: String = Deserialize::deserialize(deserializer)?; - - let start = match s.as_str().get(2..) { - Some(start) => start, - None => return Err(D::Error::custom("string length too small")), - }; - u8::from_str_radix(&start, 16).map_err(D::Error::custom) -} - -#[allow(clippy::trivially_copy_pass_by_ref)] // Serde requires the `byte` to be a ref. -pub fn u8_to_hex_str<S>(byte: &u8, serializer: S) -> Result<S::Ok, S::Error> -where - S: Serializer, -{ - let mut hex: String = "0x".to_string(); - hex.push_str(&hex::encode(&[*byte])); - - serializer.serialize_str(&hex) -} - -pub fn u32_from_hex_str<'de, D>(deserializer: D) -> Result<u32, D::Error> -where - D: Deserializer<'de>, -{ - let s: String = Deserialize::deserialize(deserializer)?; - let start = s - .as_str() - .get(2..) - .ok_or_else(|| D::Error::custom("string length too small"))?; - - u32::from_str_radix(&start, 16) - .map_err(D::Error::custom) - .map(u32::from_be) -} - -#[allow(clippy::trivially_copy_pass_by_ref)] // Serde requires the `num` to be a ref. -pub fn u32_to_hex_str<S>(num: &u32, serializer: S) -> Result<S::Ok, S::Error> -where - S: Serializer, -{ - let mut hex: String = "0x".to_string(); - let bytes = num.to_le_bytes(); - hex.push_str(&hex::encode(&bytes)); - - serializer.serialize_str(&hex) -} - -pub fn fork_from_hex_str<'de, D>(deserializer: D) -> Result<[u8; FORK_BYTES_LEN], D::Error> -where - D: Deserializer<'de>, -{ - let s: String = Deserialize::deserialize(deserializer)?; - let mut array = [0 as u8; FORK_BYTES_LEN]; - - let start = s - .as_str() - .get(2..) - .ok_or_else(|| D::Error::custom("string length too small"))?; - let decoded: Vec<u8> = hex::decode(&start).map_err(D::Error::custom)?; - - if decoded.len() != FORK_BYTES_LEN { - return Err(D::Error::custom("Fork length too long")); - } - - for (i, item) in array.iter_mut().enumerate() { - if i > decoded.len() { - break; - } - *item = decoded[i]; - } - Ok(array) -} - -#[allow(clippy::trivially_copy_pass_by_ref)] -pub fn fork_to_hex_str<S>(bytes: &[u8; FORK_BYTES_LEN], serializer: S) -> Result<S::Ok, S::Error> -where - S: Serializer, -{ - let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(&bytes)); - - serializer.serialize_str(&hex_string) -} - -pub fn graffiti_to_hex_str<S>(bytes: &Graffiti, serializer: S) -> Result<S::Ok, S::Error> -where - S: Serializer, -{ - let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(&bytes)); - - serializer.serialize_str(&hex_string) -} - -pub fn graffiti_from_hex_str<'de, D>(deserializer: D) -> Result<Graffiti, D::Error> -where - D: Deserializer<'de>, -{ - let s: String = Deserialize::deserialize(deserializer)?; - let mut array = Graffiti::default(); - - let start = s - .as_str() - .get(2..) - .ok_or_else(|| D::Error::custom("string length too small"))?; - let decoded: Vec<u8> = hex::decode(&start).map_err(D::Error::custom)?; - - if decoded.len() > GRAFFITI_BYTES_LEN { - return Err(D::Error::custom("Fork length too long")); - } - - for (i, item) in array.iter_mut().enumerate() { - if i > decoded.len() { - break; - } - *item = decoded[i]; - } - Ok(array) -} diff --git a/consensus/types/src/validator_subscription.rs b/consensus/types/src/validator_subscription.rs new file mode 100644 index 00000000000..fd48660c52b --- /dev/null +++ b/consensus/types/src/validator_subscription.rs @@ -0,0 +1,21 @@ +use crate::*; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; + +/// A validator subscription, created when a validator subscribes to a slot to perform optional aggregation +/// duties. +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] +pub struct ValidatorSubscription { + /// The validators index. + pub validator_index: u64, + /// The index of the committee within `slot` of which the validator is a member. Used by the + /// beacon node to quickly evaluate the associated `SubnetId`. + pub attestation_committee_index: CommitteeIndex, + /// The slot in which to subscribe. + pub slot: Slot, + /// Committee count at slot to subscribe. + pub committee_count_at_slot: u64, + /// If true, the validator is an aggregator and the beacon node should aggregate attestations + /// for this slot. + pub is_aggregator: bool, +} diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index a9509d7affa..c33ea7e79f7 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -16,6 +16,7 @@ use tree_hash_derive::TreeHash; pub struct VoluntaryExit { /// Earliest epoch when voluntary exit can be processed. pub epoch: Epoch, + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, } diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index e1cb1fde319..8fd004a80b2 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -11,7 +11,7 @@ milagro_bls = { git = "https://github.com/sigp/milagro_bls", branch = "paulh" } rand = "0.7.2" serde = "1.0.102" serde_derive = "1.0.102" -serde_hex = { path = "../../consensus/serde_hex" } +serde_utils = { path = "../../consensus/serde_utils" } hex = "0.3" eth2_hashing = "0.1.0" ethereum-types = "0.9.1" diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index 240b7d1880d..0517512f82a 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -6,7 +6,7 @@ use crate::{ }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::marker::PhantomData; @@ -245,6 +245,23 @@ where impl_tree_hash!(SIGNATURE_BYTES_LEN); } +impl<Pub, AggPub, Sig, AggSig> fmt::Display for GenericAggregateSignature<Pub, AggPub, Sig, AggSig> +where + Sig: TSignature<Pub>, + AggSig: TAggregateSignature<Pub, AggPub, Sig>, +{ + impl_display!(); +} + +impl<Pub, AggPub, Sig, AggSig> std::str::FromStr + for GenericAggregateSignature<Pub, AggPub, Sig, AggSig> +where + Sig: TSignature<Pub>, + AggSig: TAggregateSignature<Pub, AggPub, Sig>, +{ + impl_from_str!(); +} + impl<Pub, AggPub, Sig, AggSig> Serialize for GenericAggregateSignature<Pub, AggPub, Sig, AggSig> where Sig: TSignature<Pub>, diff --git a/crypto/bls/src/generic_public_key.rs b/crypto/bls/src/generic_public_key.rs index 29814d24aca..7b22d272990 100644 --- a/crypto/bls/src/generic_public_key.rs +++ b/crypto/bls/src/generic_public_key.rs @@ -1,7 +1,7 @@ use crate::Error; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::hash::{Hash, Hasher}; @@ -97,6 +97,14 @@ impl<Pub: TPublicKey> TreeHash for GenericPublicKey<Pub> { impl_tree_hash!(PUBLIC_KEY_BYTES_LEN); } +impl<Pub: TPublicKey> fmt::Display for GenericPublicKey<Pub> { + impl_display!(); +} + +impl<Pub: TPublicKey> std::str::FromStr for GenericPublicKey<Pub> { + impl_from_str!(); +} + impl<Pub: TPublicKey> Serialize for GenericPublicKey<Pub> { impl_serde_serialize!(); } diff --git a/crypto/bls/src/generic_public_key_bytes.rs b/crypto/bls/src/generic_public_key_bytes.rs index beceac1c904..387eb91c969 100644 --- a/crypto/bls/src/generic_public_key_bytes.rs +++ b/crypto/bls/src/generic_public_key_bytes.rs @@ -4,7 +4,7 @@ use crate::{ }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; @@ -101,6 +101,16 @@ where Pub: TPublicKey, { fn from(pk: GenericPublicKey<Pub>) -> Self { + Self::from(&pk) + } +} + +/// Serializes the `PublicKey` in compressed form, storing the bytes in the newly created `Self`. +impl<Pub> From<&GenericPublicKey<Pub>> for GenericPublicKeyBytes<Pub> +where + Pub: TPublicKey, +{ + fn from(pk: &GenericPublicKey<Pub>) -> Self { Self { bytes: pk.serialize(), _phantom: PhantomData, @@ -132,6 +142,14 @@ impl<Pub> TreeHash for GenericPublicKeyBytes<Pub> { impl_tree_hash!(PUBLIC_KEY_BYTES_LEN); } +impl<Pub> fmt::Display for GenericPublicKeyBytes<Pub> { + impl_display!(); +} + +impl<Pub> std::str::FromStr for GenericPublicKeyBytes<Pub> { + impl_from_str!(); +} + impl<Pub> Serialize for GenericPublicKeyBytes<Pub> { impl_serde_serialize!(); } diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index 28a9361957a..44250d4a6ba 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -4,7 +4,7 @@ use crate::{ }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::marker::PhantomData; @@ -149,6 +149,14 @@ impl<PublicKey, T: TSignature<PublicKey>> TreeHash for GenericSignature<PublicKe impl_tree_hash!(SIGNATURE_BYTES_LEN); } +impl<PublicKey, T: TSignature<PublicKey>> fmt::Display for GenericSignature<PublicKey, T> { + impl_display!(); +} + +impl<PublicKey, T: TSignature<PublicKey>> std::str::FromStr for GenericSignature<PublicKey, T> { + impl_from_str!(); +} + impl<PublicKey, T: TSignature<PublicKey>> Serialize for GenericSignature<PublicKey, T> { impl_serde_serialize!(); } diff --git a/crypto/bls/src/generic_signature_bytes.rs b/crypto/bls/src/generic_signature_bytes.rs index 1f987ecd362..bc7e7f111e8 100644 --- a/crypto/bls/src/generic_signature_bytes.rs +++ b/crypto/bls/src/generic_signature_bytes.rs @@ -5,7 +5,7 @@ use crate::{ }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; @@ -124,6 +124,14 @@ impl<Pub, Sig> TreeHash for GenericSignatureBytes<Pub, Sig> { impl_tree_hash!(SIGNATURE_BYTES_LEN); } +impl<Pub, Sig> fmt::Display for GenericSignatureBytes<Pub, Sig> { + impl_display!(); +} + +impl<Pub, Sig> std::str::FromStr for GenericSignatureBytes<Pub, Sig> { + impl_from_str!(); +} + impl<Pub, Sig> Serialize for GenericSignatureBytes<Pub, Sig> { impl_serde_serialize!(); } diff --git a/crypto/bls/src/macros.rs b/crypto/bls/src/macros.rs index ca103da6da4..136faeb4423 100644 --- a/crypto/bls/src/macros.rs +++ b/crypto/bls/src/macros.rs @@ -76,6 +76,35 @@ macro_rules! impl_ssz_decode { }; } +/// Contains the functions required for a `fmt::Display` implementation. +/// +/// Does not include the `Impl` section since it gets very complicated when it comes to generics. +macro_rules! impl_display { + () => { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", hex_encode(self.serialize().to_vec())) + } + }; +} + +/// Contains the functions required for a `fmt::Display` implementation. +/// +/// Does not include the `Impl` section since it gets very complicated when it comes to generics. +macro_rules! impl_from_str { + () => { + type Err = String; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + if s.starts_with("0x") { + let bytes = hex::decode(&s[2..]).map_err(|e| e.to_string())?; + Self::deserialize(&bytes[..]).map_err(|e| format!("{:?}", e)) + } else { + Err("must start with 0x".to_string()) + } + } + }; +} + /// Contains the functions required for a `serde::Serialize` implementation. /// /// Does not include the `Impl` section since it gets very complicated when it comes to generics. @@ -85,7 +114,7 @@ macro_rules! impl_serde_serialize { where S: Serializer, { - serializer.serialize_str(&hex_encode(self.serialize().to_vec())) + serializer.serialize_str(&self.to_string()) } }; } @@ -99,9 +128,25 @@ macro_rules! impl_serde_deserialize { where D: Deserializer<'de>, { - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - Self::deserialize(&bytes[..]) - .map_err(|e| serde::de::Error::custom(format!("invalid pubkey ({:?})", e))) + pub struct StringVisitor; + + impl<'de> serde::de::Visitor<'de> for StringVisitor { + type Value = String; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a hex string with 0x prefix") + } + + fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> + where + E: serde::de::Error, + { + Ok(value.to_string()) + } + } + + let string = deserializer.deserialize_str(StringVisitor)?; + <Self as std::str::FromStr>::from_str(&string).map_err(serde::de::Error::custom) } }; } diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index a48f24f3f04..ae23936369e 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -15,6 +15,6 @@ url = "2.1.1" serde = "1.0.110" futures = "0.3.5" genesis = { path = "../../beacon_node/genesis" } -remote_beacon_node = { path = "../../common/remote_beacon_node" } +eth2 = { path = "../../common/eth2" } validator_client = { path = "../../validator_client" } validator_dir = { path = "../../common/validator_dir", features = ["insecure_keys"] } diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index b1a74b64a73..e2391c0f880 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -4,7 +4,12 @@ use beacon_node::ProductionBeaconNode; use environment::RuntimeContext; +use eth2::{ + reqwest::{ClientBuilder, Url}, + BeaconNodeHttpClient, +}; use std::path::PathBuf; +use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; use tempdir::TempDir; use types::EthSpec; @@ -13,9 +18,12 @@ use validator_dir::insecure_keys::build_deterministic_validator_dirs; pub use beacon_node::{ClientConfig, ClientGenesis, ProductionClient}; pub use environment; -pub use remote_beacon_node::RemoteBeaconNode; +pub use eth2; pub use validator_client::Config as ValidatorConfig; +/// The global timeout for HTTP requests to the beacon node. +const HTTP_TIMEOUT: Duration = Duration::from_secs(4); + /// Provides a beacon node that is running in the current process on a given tokio executor (it /// is _local_ to this process). /// @@ -52,16 +60,23 @@ impl<E: EthSpec> LocalBeaconNode<E> { impl<E: EthSpec> LocalBeaconNode<E> { /// Returns a `RemoteBeaconNode` that can connect to `self`. Useful for testing the node as if /// it were external this process. - pub fn remote_node(&self) -> Result<RemoteBeaconNode<E>, String> { - let socket_addr = self + pub fn remote_node(&self) -> Result<BeaconNodeHttpClient, String> { + let listen_addr = self .client - .http_listen_addr() + .http_api_listen_addr() .ok_or_else(|| "A remote beacon node must have a http server".to_string())?; - Ok(RemoteBeaconNode::new(format!( - "http://{}:{}", - socket_addr.ip(), - socket_addr.port() - ))?) + + let beacon_node_url: Url = format!("http://{}:{}", listen_addr.ip(), listen_addr.port()) + .parse() + .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?; + let beacon_node_http_client = ClientBuilder::new() + .timeout(HTTP_TIMEOUT) + .build() + .map_err(|e| format!("Unable to build HTTP client: {:?}", e))?; + Ok(BeaconNodeHttpClient::from_components( + beacon_node_url, + beacon_node_http_client, + )) } } @@ -71,8 +86,8 @@ pub fn testing_client_config() -> ClientConfig { // Setting ports to `0` means that the OS will choose some available port. client_config.network.libp2p_port = 0; client_config.network.discovery_port = 0; - client_config.rest_api.enabled = true; - client_config.rest_api.port = 0; + client_config.http_api.enabled = true; + client_config.http_api.listen_port = 0; client_config.websocket_server.enabled = true; client_config.websocket_server.port = 0; diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 43ceaa14fdb..e755c9005fa 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -1,4 +1,5 @@ use crate::local_network::LocalNetwork; +use node_test_rig::eth2::types::StateId; use std::time::Duration; use types::{Epoch, EthSpec, Slot, Unsigned}; @@ -65,11 +66,9 @@ pub async fn verify_all_finalized_at<E: EthSpec>( for remote_node in network.remote_nodes()? { epochs.push( remote_node - .http - .beacon() - .get_head() + .get_beacon_states_finality_checkpoints(StateId::Head) .await - .map(|head| head.finalized_slot.epoch(E::slots_per_epoch())) + .map(|body| body.unwrap().data.finalized.epoch) .map_err(|e| format!("Get head via http failed: {:?}", e))?, ); } @@ -95,17 +94,10 @@ async fn verify_validator_count<E: EthSpec>( let validator_counts = { let mut validator_counts = Vec::new(); for remote_node in network.remote_nodes()? { - let beacon = remote_node.http.beacon(); - - let head = beacon - .get_head() - .await - .map_err(|e| format!("Get head via http failed: {:?}", e))?; - - let vc = beacon - .get_state_by_root(head.state_root) + let vc = remote_node + .get_debug_beacon_states::<E>(StateId::Head) .await - .map(|(state, _root)| state) + .map(|body| body.unwrap().data) .map_err(|e| format!("Get state root via http failed: {:?}", e))? .validators .len(); diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index de78aaa0578..1ce8b2a5d9b 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -34,7 +34,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .short("s") .long("speed_up_factor") .takes_value(true) - .default_value("4") + .default_value("3") .help("Speed up factor")) .arg(Arg::with_name("continue_after_checks") .short("c") @@ -62,7 +62,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .short("s") .long("speed_up_factor") .takes_value(true) - .default_value("4") + .default_value("3") .help("Speed up factor")) .arg(Arg::with_name("continue_after_checks") .short("c") diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 37ce3ab5664..0dd9b3424b8 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -1,6 +1,7 @@ use node_test_rig::{ - environment::RuntimeContext, ClientConfig, LocalBeaconNode, LocalValidatorClient, - RemoteBeaconNode, ValidatorConfig, ValidatorFiles, + environment::RuntimeContext, + eth2::{types::StateId, BeaconNodeHttpClient}, + ClientConfig, LocalBeaconNode, LocalValidatorClient, ValidatorConfig, ValidatorFiles, }; use parking_lot::RwLock; use std::ops::Deref; @@ -123,7 +124,7 @@ impl<E: EthSpec> LocalNetwork<E> { .ok_or_else(|| format!("No beacon node for index {}", beacon_node))?; beacon_node .client - .http_listen_addr() + .http_api_listen_addr() .expect("Must have http started") }; @@ -140,7 +141,7 @@ impl<E: EthSpec> LocalNetwork<E> { } /// For all beacon nodes in `Self`, return a HTTP client to access each nodes HTTP API. - pub fn remote_nodes(&self) -> Result<Vec<RemoteBeaconNode<E>>, String> { + pub fn remote_nodes(&self) -> Result<Vec<BeaconNodeHttpClient>, String> { let beacon_nodes = self.beacon_nodes.read(); beacon_nodes @@ -154,11 +155,9 @@ impl<E: EthSpec> LocalNetwork<E> { let nodes = self.remote_nodes().expect("Failed to get remote nodes"); let bootnode = nodes.first().expect("Should contain bootnode"); bootnode - .http - .beacon() - .get_head() + .get_beacon_states_finality_checkpoints(StateId::Head) .await .map_err(|e| format!("Cannot get head: {:?}", e)) - .map(|head| head.finalized_slot.epoch(E::slots_per_epoch())) + .map(|body| body.unwrap().data.finalized.epoch) } } diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 7583a6eab20..47272f62681 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -350,11 +350,9 @@ pub async fn check_still_syncing<E: EthSpec>(network: &LocalNetwork<E>) -> Resul for remote_node in network.remote_nodes()? { status.push( remote_node - .http - .node() - .syncing_status() + .get_node_syncing() .await - .map(|status| status.is_syncing) + .map(|body| body.data.is_syncing) .map_err(|e| format!("Get syncing status via http failed: {:?}", e))?, ) } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 77a6e5ce97e..b69b31f574c 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -19,7 +19,6 @@ clap = "2.33.0" eth2_interop_keypairs = { path = "../common/eth2_interop_keypairs" } slashing_protection = { path = "./slashing_protection" } slot_clock = { path = "../common/slot_clock" } -rest_types = { path = "../common/rest_types" } types = { path = "../consensus/types" } serde = "1.0.110" serde_derive = "1.0.110" @@ -41,7 +40,7 @@ eth2_ssz_derive = "0.1.0" hex = "0.4.2" deposit_contract = { path = "../common/deposit_contract" } bls = { path = "../crypto/bls" } -remote_beacon_node = { path = "../common/remote_beacon_node" } +eth2 = { path = "../common/eth2" } tempdir = "0.3.7" rayon = "1.3.0" validator_dir = { path = "../common/validator_dir" } diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index fa79877774b..d675ebda2e8 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -3,22 +3,26 @@ use crate::{ validator_store::ValidatorStore, }; use environment::RuntimeContext; +use eth2::BeaconNodeHttpClient; use futures::StreamExt; -use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; -use slog::{crit, debug, error, info, trace}; +use slog::{crit, error, info, trace}; use slot_clock::SlotClock; use std::collections::HashMap; use std::ops::Deref; use std::sync::Arc; use tokio::time::{delay_until, interval_at, Duration, Instant}; -use types::{Attestation, ChainSpec, CommitteeIndex, EthSpec, Slot, SubnetId}; +use tree_hash::TreeHash; +use types::{ + AggregateSignature, Attestation, AttestationData, BitList, ChainSpec, CommitteeIndex, EthSpec, + Slot, +}; /// Builds an `AttestationService`. pub struct AttestationServiceBuilder<T, E: EthSpec> { duties_service: Option<DutiesService<T, E>>, validator_store: Option<ValidatorStore<T, E>>, slot_clock: Option<T>, - beacon_node: Option<RemoteBeaconNode<E>>, + beacon_node: Option<BeaconNodeHttpClient>, context: Option<RuntimeContext<E>>, } @@ -48,7 +52,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationServiceBuilder<T, E> { self } - pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode<E>) -> Self { + pub fn beacon_node(mut self, beacon_node: BeaconNodeHttpClient) -> Self { self.beacon_node = Some(beacon_node); self } @@ -86,7 +90,7 @@ pub struct Inner<T, E: EthSpec> { duties_service: DutiesService<T, E>, validator_store: ValidatorStore<T, E>, slot_clock: T, - beacon_node: RemoteBeaconNode<E>, + beacon_node: BeaconNodeHttpClient, context: RuntimeContext<E>, } @@ -262,7 +266,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { // Step 2. // // If an attestation was produced, make an aggregate. - if let Some(attestation) = attestation_opt { + if let Some(attestation_data) = attestation_opt { // First, wait until the `aggregation_production_instant` (2/3rds // of the way though the slot). As verified in the // `delay_triggers_when_in_the_past` test, this code will still run @@ -272,7 +276,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { // Then download, sign and publish a `SignedAggregateAndProof` for each // validator that is elected to aggregate for this `slot` and // `committee_index`. - self.produce_and_publish_aggregates(attestation, &validator_duties) + self.produce_and_publish_aggregates(attestation_data, &validator_duties) .await .map_err(move |e| { crit!( @@ -305,7 +309,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { slot: Slot, committee_index: CommitteeIndex, validator_duties: &[DutyAndProof], - ) -> Result<Option<Attestation<E>>, String> { + ) -> Result<Option<AttestationData>, String> { let log = self.context.log(); if validator_duties.is_empty() { @@ -318,124 +322,88 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { .ok_or_else(|| "Unable to determine current slot from clock".to_string())? .epoch(E::slots_per_epoch()); - let attestation = self + let attestation_data = self .beacon_node - .http - .validator() - .produce_attestation(slot, committee_index) + .get_validator_attestation_data(slot, committee_index) .await - .map_err(|e| format!("Failed to produce attestation: {:?}", e))?; + .map_err(|e| format!("Failed to produce attestation data: {:?}", e))? + .data; + + for duty in validator_duties { + // Ensure that all required fields are present in the validator duty. + let ( + duty_slot, + duty_committee_index, + validator_committee_position, + _, + _, + committee_length, + ) = if let Some(tuple) = duty.attestation_duties() { + tuple + } else { + crit!( + log, + "Missing validator duties when signing"; + "duties" => format!("{:?}", duty) + ); + continue; + }; - // For each validator in `validator_duties`, clone the `attestation` and add - // their signature. - // - // If any validator is unable to sign, they are simply skipped. - let signed_attestations = validator_duties - .iter() - .filter_map(|duty| { - // Ensure that all required fields are present in the validator duty. - let ( - duty_slot, - duty_committee_index, - validator_committee_position, - _, - committee_count_at_slot, - ) = if let Some(tuple) = duty.attestation_duties() { - tuple - } else { - crit!( - log, - "Missing validator duties when signing"; - "duties" => format!("{:?}", duty) - ); - return None; - }; + // Ensure that the attestation matches the duties. + if duty_slot != attestation_data.slot || duty_committee_index != attestation_data.index + { + crit!( + log, + "Inconsistent validator duties during signing"; + "validator" => format!("{:?}", duty.validator_pubkey()), + "duty_slot" => duty_slot, + "attestation_slot" => attestation_data.slot, + "duty_index" => duty_committee_index, + "attestation_index" => attestation_data.index, + ); + continue; + } - // Ensure that the attestation matches the duties. - if duty_slot != attestation.data.slot - || duty_committee_index != attestation.data.index - { - crit!( - log, - "Inconsistent validator duties during signing"; - "validator" => format!("{:?}", duty.validator_pubkey()), - "duty_slot" => duty_slot, - "attestation_slot" => attestation.data.slot, - "duty_index" => duty_committee_index, - "attestation_index" => attestation.data.index, - ); - return None; - } + let mut attestation = Attestation { + aggregation_bits: BitList::with_capacity(committee_length as usize).unwrap(), + data: attestation_data.clone(), + signature: AggregateSignature::infinity(), + }; - let mut attestation = attestation.clone(); - let subnet_id = SubnetId::compute_subnet_for_attestation_data::<E>( - &attestation.data, - committee_count_at_slot, - &self.context.eth2_config().spec, + self.validator_store + .sign_attestation( + duty.validator_pubkey(), + validator_committee_position, + &mut attestation, + current_epoch, ) - .map_err(|e| { - error!( - log, - "Failed to compute subnet id to publish attestation: {:?}", e - ) - }) - .ok()?; - self.validator_store - .sign_attestation( - duty.validator_pubkey(), - validator_committee_position, - &mut attestation, - current_epoch, - ) - .map(|_| (attestation, subnet_id)) - }) - .collect::<Vec<_>>(); - - // If there are any signed attestations, publish them to the BN. Otherwise, - // just return early. - if let Some(attestation) = signed_attestations.first().cloned() { - let num_attestations = signed_attestations.len(); - let beacon_block_root = attestation.0.data.beacon_block_root; - - self.beacon_node - .http - .validator() - .publish_attestations(signed_attestations) + .ok_or_else(|| "Failed to sign attestation".to_string())?; + + match self + .beacon_node + .post_beacon_pool_attestations(&attestation) .await - .map_err(|e| format!("Failed to publish attestation: {:?}", e)) - .map(move |publish_status| match publish_status { - PublishStatus::Valid => info!( - log, - "Successfully published attestations"; - "count" => num_attestations, - "head_block" => format!("{:?}", beacon_block_root), - "committee_index" => committee_index, - "slot" => slot.as_u64(), - "type" => "unaggregated", - ), - PublishStatus::Invalid(msg) => crit!( - log, - "Published attestation was invalid"; - "message" => msg, - "committee_index" => committee_index, - "slot" => slot.as_u64(), - "type" => "unaggregated", - ), - PublishStatus::Unknown => { - crit!(log, "Unknown condition when publishing unagg. attestation") - } - }) - .map(|()| Some(attestation.0)) - } else { - debug!( - log, - "No attestations to publish"; - "committee_index" => committee_index, - "slot" => slot.as_u64(), - ); - - Ok(None) + { + Ok(()) => info!( + log, + "Successfully published attestation"; + "head_block" => format!("{:?}", attestation.data.beacon_block_root), + "committee_index" => attestation.data.index, + "slot" => attestation.data.slot.as_u64(), + "type" => "unaggregated", + ), + Err(e) => error!( + log, + "Unable to publish attestation"; + "error" => e.to_string(), + "committee_index" => attestation.data.index, + "slot" => slot.as_u64(), + "type" => "unaggregated", + ), + } } + + Ok(Some(attestation_data)) } /// Performs the second step of the attesting process: downloading an aggregated `Attestation`, @@ -453,103 +421,89 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { /// returned to the BN. async fn produce_and_publish_aggregates( &self, - attestation: Attestation<E>, + attestation_data: AttestationData, validator_duties: &[DutyAndProof], ) -> Result<(), String> { let log = self.context.log(); let aggregated_attestation = self .beacon_node - .http - .validator() - .produce_aggregate_attestation(&attestation.data) + .get_validator_aggregate_attestation( + attestation_data.slot, + attestation_data.tree_hash_root(), + ) .await - .map_err(|e| format!("Failed to produce an aggregate attestation: {:?}", e))?; - - // For each validator, clone the `aggregated_attestation` and convert it into - // a `SignedAggregateAndProof` - let signed_aggregate_and_proofs = validator_duties - .iter() - .filter_map(|duty_and_proof| { - // Do not produce a signed aggregator for validators that are not + .map_err(|e| format!("Failed to produce an aggregate attestation: {:?}", e))? + .ok_or_else(|| format!("No aggregate available for {:?}", attestation_data))? + .data; + + for duty_and_proof in validator_duties { + let selection_proof = if let Some(proof) = duty_and_proof.selection_proof.as_ref() { + proof + } else { + // Do not produce a signed aggregate for validators that are not // subscribed aggregators. - let selection_proof = duty_and_proof.selection_proof.as_ref()?.clone(); - - let (duty_slot, duty_committee_index, _, validator_index, _) = - duty_and_proof.attestation_duties().or_else(|| { - crit!(log, "Missing duties when signing aggregate"); - None - })?; + continue; + }; + let (duty_slot, duty_committee_index, _, validator_index, _, _) = + if let Some(tuple) = duty_and_proof.attestation_duties() { + tuple + } else { + crit!(log, "Missing duties when signing aggregate"); + continue; + }; - let pubkey = &duty_and_proof.duty.validator_pubkey; - let slot = attestation.data.slot; - let committee_index = attestation.data.index; + let pubkey = &duty_and_proof.duty.validator_pubkey; + let slot = attestation_data.slot; + let committee_index = attestation_data.index; - if duty_slot != slot || duty_committee_index != committee_index { - crit!(log, "Inconsistent validator duties during signing"); - return None; - } + if duty_slot != slot || duty_committee_index != committee_index { + crit!(log, "Inconsistent validator duties during signing"); + continue; + } - if let Some(signed_aggregate_and_proof) = - self.validator_store.produce_signed_aggregate_and_proof( - pubkey, - validator_index, - aggregated_attestation.clone(), - selection_proof, - ) - { - Some(signed_aggregate_and_proof) - } else { - crit!(log, "Failed to sign attestation"); - None - } - }) - .collect::<Vec<_>>(); + let signed_aggregate_and_proof = if let Some(aggregate) = + self.validator_store.produce_signed_aggregate_and_proof( + pubkey, + validator_index, + aggregated_attestation.clone(), + selection_proof.clone(), + ) { + aggregate + } else { + crit!(log, "Failed to sign attestation"); + continue; + }; - // If there any signed aggregates and proofs were produced, publish them to the - // BN. - if let Some(first) = signed_aggregate_and_proofs.first().cloned() { - let attestation = first.message.aggregate; + let attestation = &signed_aggregate_and_proof.message.aggregate; - let publish_status = self + match self .beacon_node - .http - .validator() - .publish_aggregate_and_proof(signed_aggregate_and_proofs) + .post_validator_aggregate_and_proof(&signed_aggregate_and_proof) .await - .map_err(|e| format!("Failed to publish aggregate and proofs: {:?}", e))?; - match publish_status { - PublishStatus::Valid => info!( + { + Ok(()) => info!( log, - "Successfully published attestations"; + "Successfully published attestation"; + "aggregator" => signed_aggregate_and_proof.message.aggregator_index, "signatures" => attestation.aggregation_bits.num_set_bits(), "head_block" => format!("{:?}", attestation.data.beacon_block_root), "committee_index" => attestation.data.index, "slot" => attestation.data.slot.as_u64(), "type" => "aggregated", ), - PublishStatus::Invalid(msg) => crit!( + Err(e) => crit!( log, - "Published attestation was invalid"; - "message" => msg, + "Failed to publish attestation"; + "error" => e.to_string(), "committee_index" => attestation.data.index, "slot" => attestation.data.slot.as_u64(), "type" => "aggregated", ), - PublishStatus::Unknown => { - crit!(log, "Unknown condition when publishing agg. attestation") - } - }; - Ok(()) - } else { - debug!( - log, - "No signed aggregates to publish"; - "committee_index" => attestation.data.index, - "slot" => attestation.data.slot.as_u64(), - ); - Ok(()) + } } + + Ok(()) } } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 60d1f4d5514..bf52cacfc0b 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -1,19 +1,19 @@ use crate::validator_store::ValidatorStore; use environment::RuntimeContext; +use eth2::{types::Graffiti, BeaconNodeHttpClient}; use futures::channel::mpsc::Receiver; use futures::{StreamExt, TryFutureExt}; -use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; -use types::{EthSpec, Graffiti, PublicKey, Slot}; +use types::{EthSpec, PublicKey, Slot}; /// Builds a `BlockService`. pub struct BlockServiceBuilder<T, E: EthSpec> { validator_store: Option<ValidatorStore<T, E>>, slot_clock: Option<Arc<T>>, - beacon_node: Option<RemoteBeaconNode<E>>, + beacon_node: Option<BeaconNodeHttpClient>, context: Option<RuntimeContext<E>>, graffiti: Option<Graffiti>, } @@ -39,7 +39,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { self } - pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode<E>) -> Self { + pub fn beacon_node(mut self, beacon_node: BeaconNodeHttpClient) -> Self { self.beacon_node = Some(beacon_node); self } @@ -79,7 +79,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { pub struct Inner<T, E: EthSpec> { validator_store: ValidatorStore<T, E>, slot_clock: Arc<T>, - beacon_node: RemoteBeaconNode<E>, + beacon_node: BeaconNodeHttpClient, context: RuntimeContext<E>, graffiti: Option<Graffiti>, } @@ -221,41 +221,28 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { let block = self .beacon_node - .http - .validator() - .produce_block(slot, randao_reveal, self.graffiti) + .get_validator_blocks(slot, randao_reveal.into(), self.graffiti.as_ref()) .await - .map_err(|e| format!("Error from beacon node when producing block: {:?}", e))?; + .map_err(|e| format!("Error from beacon node when producing block: {:?}", e))? + .data; let signed_block = self .validator_store .sign_block(&validator_pubkey, block, current_slot) .ok_or_else(|| "Unable to sign block".to_string())?; - let publish_status = self - .beacon_node - .http - .validator() - .publish_block(signed_block.clone()) + self.beacon_node + .post_beacon_blocks(&signed_block) .await .map_err(|e| format!("Error from beacon node when publishing block: {:?}", e))?; - match publish_status { - PublishStatus::Valid => info!( - log, - "Successfully published block"; - "deposits" => signed_block.message.body.deposits.len(), - "attestations" => signed_block.message.body.attestations.len(), - "slot" => signed_block.slot().as_u64(), - ), - PublishStatus::Invalid(msg) => crit!( - log, - "Published block was invalid"; - "message" => msg, - "slot" => signed_block.slot().as_u64(), - ), - PublishStatus::Unknown => crit!(log, "Unknown condition when publishing block"), - } + info!( + log, + "Successfully published block"; + "deposits" => signed_block.message.body.deposits.len(), + "attestations" => signed_block.message.body.attestations.len(), + "slot" => signed_block.slot().as_u64(), + ); Ok(()) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 991b5516220..4d230b1b445 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -4,9 +4,10 @@ use directory::{ get_testnet_name, DEFAULT_HARDCODED_TESTNET, DEFAULT_ROOT_DIR, DEFAULT_SECRET_DIR, DEFAULT_VALIDATOR_DIR, }; +use eth2::types::Graffiti; use serde_derive::{Deserialize, Serialize}; use std::path::PathBuf; -use types::{Graffiti, GRAFFITI_BYTES_LEN}; +use types::GRAFFITI_BYTES_LEN; pub const DEFAULT_HTTP_SERVER: &str = "http://localhost:5052/"; /// Path to the slashing protection database within the datadir. @@ -119,15 +120,14 @@ impl Config { GRAFFITI_BYTES_LEN )); } else { - // Default graffiti to all 0 bytes. - let mut graffiti = Graffiti::default(); + let mut graffiti = [0; 32]; // Copy the provided bytes over. // // Panic-free because `graffiti_bytes.len()` <= `GRAFFITI_BYTES_LEN`. graffiti[..graffiti_bytes.len()].copy_from_slice(&graffiti_bytes); - config.graffiti = Some(graffiti); + config.graffiti = Some(graffiti.into()); } } diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 7375d550235..7f6d33fe85b 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -1,16 +1,15 @@ use crate::{ - block_service::BlockServiceNotification, is_synced::is_synced, validator_store::ValidatorStore, + block_service::BlockServiceNotification, is_synced::is_synced, validator_duty::ValidatorDuty, + validator_store::ValidatorStore, }; use environment::RuntimeContext; +use eth2::BeaconNodeHttpClient; use futures::channel::mpsc::Sender; use futures::{SinkExt, StreamExt}; use parking_lot::RwLock; -use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; -use rest_types::{ValidatorDuty, ValidatorDutyBytes, ValidatorSubscription}; use slog::{debug, error, trace, warn}; use slot_clock::SlotClock; use std::collections::HashMap; -use std::convert::TryInto; use std::ops::Deref; use std::sync::Arc; use tokio::time::{interval_at, Duration, Instant}; @@ -44,14 +43,14 @@ impl DutyAndProof { pub fn compute_selection_proof<T: SlotClock + 'static, E: EthSpec>( &mut self, validator_store: &ValidatorStore<T, E>, + spec: &ChainSpec, ) -> Result<(), String> { - let (modulo, slot) = if let (Some(modulo), Some(slot)) = - (self.duty.aggregator_modulo, self.duty.attestation_slot) + let (committee_length, slot) = if let (Some(count), Some(slot)) = + (self.duty.committee_length, self.duty.attestation_slot) { - (modulo, slot) + (count as usize, slot) } else { - // If there is no modulo or for the aggregator we assume they are not activated and - // therefore not an aggregator. + // If there are no attester duties we assume the validator is inactive. self.selection_proof = None; return Ok(()); }; @@ -61,7 +60,7 @@ impl DutyAndProof { .ok_or_else(|| "Failed to produce selection proof".to_string())?; self.selection_proof = selection_proof - .is_aggregator_from_modulo(modulo) + .is_aggregator(committee_length, spec) .map_err(|e| format!("Invalid modulo: {:?}", e)) .map(|is_aggregator| { if is_aggregator { @@ -87,19 +86,20 @@ impl DutyAndProof { /// It's important to note that this doesn't actually check `self.selection_proof`, instead it /// checks to see if the inputs to computing the selection proof are equal. fn selection_proof_eq(&self, other: &Self) -> bool { - self.duty.aggregator_modulo == other.duty.aggregator_modulo + self.duty.committee_count_at_slot == other.duty.committee_count_at_slot && self.duty.attestation_slot == other.duty.attestation_slot } /// Returns the information required for an attesting validator, if they are scheduled to /// attest. - pub fn attestation_duties(&self) -> Option<(Slot, CommitteeIndex, usize, u64, u64)> { + pub fn attestation_duties(&self) -> Option<(Slot, CommitteeIndex, usize, u64, u64, u64)> { Some(( self.duty.attestation_slot?, self.duty.attestation_committee_index?, self.duty.attestation_committee_position?, self.duty.validator_index?, self.duty.committee_count_at_slot?, + self.duty.committee_length?, )) } @@ -108,26 +108,12 @@ impl DutyAndProof { } } -impl TryInto<DutyAndProof> for ValidatorDutyBytes { - type Error = String; - - fn try_into(self) -> Result<DutyAndProof, Self::Error> { - let duty = ValidatorDuty { - validator_pubkey: (&self.validator_pubkey) - .try_into() - .map_err(|e| format!("Invalid pubkey bytes from server: {:?}", e))?, - validator_index: self.validator_index, - attestation_slot: self.attestation_slot, - attestation_committee_index: self.attestation_committee_index, - attestation_committee_position: self.attestation_committee_position, - committee_count_at_slot: self.committee_count_at_slot, - block_proposal_slots: self.block_proposal_slots, - aggregator_modulo: self.aggregator_modulo, - }; - Ok(DutyAndProof { - duty, +impl Into<DutyAndProof> for ValidatorDuty { + fn into(self) -> DutyAndProof { + DutyAndProof { + duty: self, selection_proof: None, - }) + } } } @@ -260,6 +246,7 @@ impl DutiesStore { mut duties: DutyAndProof, slots_per_epoch: u64, validator_store: &ValidatorStore<T, E>, + spec: &ChainSpec, ) -> Result<InsertOutcome, String> { let mut store = self.store.write(); @@ -282,7 +269,7 @@ impl DutiesStore { } } else { // Compute the selection proof. - duties.compute_selection_proof(validator_store)?; + duties.compute_selection_proof(validator_store, spec)?; // Determine if a re-subscription is required. let should_resubscribe = !duties.subscription_eq(known_duties); @@ -294,7 +281,7 @@ impl DutiesStore { } } else { // Compute the selection proof. - duties.compute_selection_proof(validator_store)?; + duties.compute_selection_proof(validator_store, spec)?; validator_map.insert(epoch, duties); @@ -302,7 +289,7 @@ impl DutiesStore { } } else { // Compute the selection proof. - duties.compute_selection_proof(validator_store)?; + duties.compute_selection_proof(validator_store, spec)?; let validator_pubkey = duties.duty.validator_pubkey.clone(); @@ -328,7 +315,7 @@ impl DutiesStore { pub struct DutiesServiceBuilder<T, E: EthSpec> { validator_store: Option<ValidatorStore<T, E>>, slot_clock: Option<T>, - beacon_node: Option<RemoteBeaconNode<E>>, + beacon_node: Option<BeaconNodeHttpClient>, context: Option<RuntimeContext<E>>, allow_unsynced_beacon_node: bool, } @@ -354,7 +341,7 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesServiceBuilder<T, E> { self } - pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode<E>) -> Self { + pub fn beacon_node(mut self, beacon_node: BeaconNodeHttpClient) -> Self { self.beacon_node = Some(beacon_node); self } @@ -397,7 +384,7 @@ pub struct Inner<T, E: EthSpec> { store: Arc<DutiesStore>, validator_store: ValidatorStore<T, E>, pub(crate) slot_clock: T, - pub(crate) beacon_node: RemoteBeaconNode<E>, + pub(crate) beacon_node: BeaconNodeHttpClient, context: RuntimeContext<E>, /// If true, the duties service will poll for duties from the beacon node even if it is not /// synced. @@ -462,7 +449,7 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { pub fn start_update_service( self, mut block_service_tx: Sender<BlockServiceNotification>, - spec: &ChainSpec, + spec: Arc<ChainSpec>, ) -> Result<(), String> { let duration_to_next_slot = self .slot_clock @@ -481,17 +468,22 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { // Run an immediate update before starting the updater service. let duties_service = self.clone(); let mut block_service_tx_clone = block_service_tx.clone(); + let inner_spec = spec.clone(); self.inner .context .executor .runtime_handle() - .spawn(async move { duties_service.do_update(&mut block_service_tx_clone).await }); + .spawn(async move { + duties_service + .do_update(&mut block_service_tx_clone, &inner_spec) + .await + }); let executor = self.inner.context.executor.clone(); let interval_fut = async move { while interval.next().await.is_some() { - self.clone().do_update(&mut block_service_tx).await; + self.clone().do_update(&mut block_service_tx, &spec).await; } }; @@ -501,7 +493,11 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { } /// Attempt to download the duties of all managed validators for this epoch and the next. - async fn do_update(self, block_service_tx: &mut Sender<BlockServiceNotification>) { + async fn do_update( + self, + block_service_tx: &mut Sender<BlockServiceNotification>, + spec: &ChainSpec, + ) { let log = self.context.log(); if !is_synced(&self.beacon_node, &self.slot_clock, None).await @@ -534,7 +530,11 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { // Update duties for the current epoch, but keep running if there's an error: // block production or the next epoch update could still succeed. - if let Err(e) = self.clone().update_epoch(current_epoch).await { + if let Err(e) = self + .clone() + .update_epoch(current_epoch, current_epoch, spec) + .await + { error!( log, "Failed to get current epoch duties"; @@ -558,7 +558,11 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { }; // Update duties for the next epoch. - if let Err(e) = self.clone().update_epoch(current_epoch + 1).await { + if let Err(e) = self + .clone() + .update_epoch(current_epoch, current_epoch + 1, spec) + .await + { error!( log, "Failed to get next epoch duties"; @@ -567,18 +571,15 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { } } - /// Attempt to download the duties of all managed validators for the given `epoch`. - async fn update_epoch(self, epoch: Epoch) -> Result<(), String> { - let pubkeys = self.validator_store.voting_pubkeys(); - let all_duties = self - .beacon_node - .http - .validator() - .get_duties(epoch, pubkeys.as_slice()) - .await - .map_err(move |e| format!("Failed to get duties for epoch {}: {:?}", epoch, e))?; - - let log = self.context.log().clone(); + /// Attempt to download the duties of all managed validators for the given `request_epoch`. The + /// `current_epoch` should be a local reading of the slot clock. + async fn update_epoch( + self, + current_epoch: Epoch, + request_epoch: Epoch, + spec: &ChainSpec, + ) -> Result<(), String> { + let log = self.context.log(); let mut new_validator = 0; let mut new_epoch = 0; @@ -587,74 +588,76 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { let mut replaced = 0; let mut invalid = 0; - // For each of the duties, attempt to insert them into our local store and build a - // list of new or changed selections proofs for any aggregating validators. - let validator_subscriptions = all_duties - .into_iter() - .filter_map(|remote_duties| { - // Convert the remote duties into our local representation. - let duties: DutyAndProof = remote_duties - .clone() - .try_into() - .map_err(|e| { - error!( - log, - "Unable to convert remote duties"; - "error" => e - ) - }) - .ok()?; - - let validator_pubkey = duties.duty.validator_pubkey.clone(); - - // Attempt to update our local store. - let outcome = self - .store - .insert(epoch, duties, E::slots_per_epoch(), &self.validator_store) - .map_err(|e| { - error!( - log, - "Unable to store duties"; - "error" => e - ) - }) - .ok()?; - - match &outcome { - InsertOutcome::NewValidator => { - debug!( - log, - "First duty assignment for validator"; - "proposal_slots" => format!("{:?}", &remote_duties.block_proposal_slots), - "attestation_slot" => format!("{:?}", &remote_duties.attestation_slot), - "validator" => format!("{:?}", &remote_duties.validator_pubkey) - ); - new_validator += 1; + let mut validator_subscriptions = vec![]; + for pubkey in self.validator_store.voting_pubkeys() { + let remote_duties = match ValidatorDuty::download( + &self.beacon_node, + current_epoch, + request_epoch, + pubkey, + ) + .await + { + Ok(duties) => duties, + Err(e) => { + error!( + log, + "Failed to download validator duties"; + "error" => e + ); + continue; + } + }; + + // Convert the remote duties into our local representation. + let duties: DutyAndProof = remote_duties.clone().into(); + + let validator_pubkey = duties.duty.validator_pubkey.clone(); + + // Attempt to update our local store. + match self.store.insert( + request_epoch, + duties, + E::slots_per_epoch(), + &self.validator_store, + spec, + ) { + Ok(outcome) => { + match &outcome { + InsertOutcome::NewValidator => { + debug!( + log, + "First duty assignment for validator"; + "proposal_slots" => format!("{:?}", &remote_duties.block_proposal_slots), + "attestation_slot" => format!("{:?}", &remote_duties.attestation_slot), + "validator" => format!("{:?}", &remote_duties.validator_pubkey) + ); + new_validator += 1; + } + InsertOutcome::NewProposalSlots => new_proposal_slots += 1, + InsertOutcome::NewEpoch => new_epoch += 1, + InsertOutcome::Identical => identical += 1, + InsertOutcome::Replaced { .. } => replaced += 1, + InsertOutcome::Invalid => invalid += 1, + } + + if let Some(is_aggregator) = + self.store.is_aggregator(&validator_pubkey, request_epoch) + { + if outcome.is_subscription_candidate() { + if let Some(subscription) = remote_duties.subscription(is_aggregator) { + validator_subscriptions.push(subscription) + } + } } - InsertOutcome::NewProposalSlots => new_proposal_slots += 1, - InsertOutcome::NewEpoch => new_epoch += 1, - InsertOutcome::Identical => identical += 1, - InsertOutcome::Replaced { .. } => replaced += 1, - InsertOutcome::Invalid => invalid += 1, - }; - - // The selection proof is computed on `store.insert`, so it's necessary to check - // with the store that the validator is an aggregator. - let is_aggregator = self.store.is_aggregator(&validator_pubkey, epoch)?; - - if outcome.is_subscription_candidate() { - Some(ValidatorSubscription { - validator_index: remote_duties.validator_index?, - attestation_committee_index: remote_duties.attestation_committee_index?, - slot: remote_duties.attestation_slot?, - committee_count_at_slot: remote_duties.committee_count_at_slot?, - is_aggregator, - }) - } else { - None } - }) - .collect::<Vec<_>>(); + Err(e) => error!( + log, + "Unable to store duties"; + "error" => e + ), + } + } if invalid > 0 { error!( @@ -673,7 +676,7 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { "new_proposal_slots" => new_proposal_slots, "new_validator" => new_validator, "replaced" => replaced, - "epoch" => format!("{}", epoch) + "epoch" => format!("{}", request_epoch) ); if replaced > 0 { @@ -690,34 +693,19 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { if count == 0 { debug!(log, "No new subscriptions required"); - - Ok(()) } else { self.beacon_node - .http - .validator() - .subscribe(validator_subscriptions) + .post_validator_beacon_committee_subscriptions(&validator_subscriptions) .await - .map_err(|e| format!("Failed to subscribe validators: {:?}", e)) - .map(move |status| { - match status { - PublishStatus::Valid => debug!( - log, - "Successfully subscribed validators"; - "count" => count - ), - PublishStatus::Unknown => error!( - log, - "Unknown response from subscription"; - ), - PublishStatus::Invalid(e) => error!( - log, - "Failed to subscribe validator"; - "error" => e - ), - }; - }) + .map_err(|e| format!("Failed to subscribe validators: {:?}", e))?; + debug!( + log, + "Successfully subscribed validators"; + "count" => count + ); } + + Ok(()) } } diff --git a/validator_client/src/fork_service.rs b/validator_client/src/fork_service.rs index b8db7b72e3c..e38a4cf3c1b 100644 --- a/validator_client/src/fork_service.rs +++ b/validator_client/src/fork_service.rs @@ -1,7 +1,7 @@ use environment::RuntimeContext; +use eth2::{types::StateId, BeaconNodeHttpClient}; use futures::StreamExt; use parking_lot::RwLock; -use remote_beacon_node::RemoteBeaconNode; use slog::{debug, trace}; use slot_clock::SlotClock; use std::ops::Deref; @@ -16,7 +16,7 @@ const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(80); pub struct ForkServiceBuilder<T, E: EthSpec> { fork: Option<Fork>, slot_clock: Option<T>, - beacon_node: Option<RemoteBeaconNode<E>>, + beacon_node: Option<BeaconNodeHttpClient>, context: Option<RuntimeContext<E>>, } @@ -35,7 +35,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkServiceBuilder<T, E> { self } - pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode<E>) -> Self { + pub fn beacon_node(mut self, beacon_node: BeaconNodeHttpClient) -> Self { self.beacon_node = Some(beacon_node); self } @@ -66,7 +66,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkServiceBuilder<T, E> { /// Helper to minimise `Arc` usage. pub struct Inner<T, E: EthSpec> { fork: RwLock<Option<Fork>>, - beacon_node: RemoteBeaconNode<E>, + beacon_node: BeaconNodeHttpClient, context: RuntimeContext<E>, slot_clock: T, } @@ -141,9 +141,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> { let fork = self .inner .beacon_node - .http - .beacon() - .get_fork() + .get_beacon_states_fork(StateId::Head) .await .map_err(|e| { trace!( @@ -151,7 +149,15 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> { "Fork update failed"; "error" => format!("Error retrieving fork: {:?}", e) ) - })?; + })? + .ok_or_else(|| { + trace!( + log, + "Fork update failed"; + "error" => "The beacon head fork is unknown" + ) + })? + .data; if self.fork.read().as_ref() != Some(&fork) { *(self.fork.write()) = Some(fork); diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 400768f5cb4..a097d72456e 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -50,8 +50,6 @@ pub enum Error { UnableToSaveDefinitions(validator_definitions::Error), /// It is not legal to try and initialize a disabled validator definition. UnableToInitializeDisabledValidator, - /// It is not legal to try and initialize a disabled validator definition. - PasswordUnknown(PathBuf), /// There was an error reading from stdin. UnableToReadPasswordFromUser(String), /// There was an error running a tokio async task. @@ -333,6 +331,7 @@ impl InitializedValidators { /// validator will be removed from `self.validators`. /// /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + #[allow(dead_code)] // Will be used once VC API is enabled. pub async fn set_validator_status( &mut self, voting_public_key: &PublicKey, diff --git a/validator_client/src/is_synced.rs b/validator_client/src/is_synced.rs index e1017ac7719..f967d629c10 100644 --- a/validator_client/src/is_synced.rs +++ b/validator_client/src/is_synced.rs @@ -1,8 +1,6 @@ -use remote_beacon_node::RemoteBeaconNode; -use rest_types::SyncingResponse; -use slog::{debug, error, Logger}; +use eth2::BeaconNodeHttpClient; +use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; -use types::EthSpec; /// A distance in slots. const SYNC_TOLERANCE: u64 = 4; @@ -17,19 +15,19 @@ const SYNC_TOLERANCE: u64 = 4; /// /// The second condition means the even if the beacon node thinks that it's syncing, we'll still /// try to use it if it's close enough to the head. -pub async fn is_synced<T: SlotClock, E: EthSpec>( - beacon_node: &RemoteBeaconNode<E>, +pub async fn is_synced<T: SlotClock>( + beacon_node: &BeaconNodeHttpClient, slot_clock: &T, log_opt: Option<&Logger>, ) -> bool { - let resp = match beacon_node.http.node().syncing_status().await { + let resp = match beacon_node.get_node_syncing().await { Ok(resp) => resp, Err(e) => { if let Some(log) = log_opt { error!( log, "Unable connect to beacon node"; - "error" => format!("{:?}", e) + "error" => e.to_string() ) } @@ -37,44 +35,38 @@ pub async fn is_synced<T: SlotClock, E: EthSpec>( } }; - match &resp { - SyncingResponse { - is_syncing: false, .. - } => true, - SyncingResponse { - is_syncing: true, - sync_status, - } => { - if let Some(log) = log_opt { - debug!( - log, - "Beacon node sync status"; - "status" => format!("{:?}", resp), - ); - } + let is_synced = !resp.data.is_syncing || (resp.data.sync_distance.as_u64() < SYNC_TOLERANCE); + + if let Some(log) = log_opt { + if !is_synced { + debug!( + log, + "Beacon node sync status"; + "status" => format!("{:?}", resp), + ); - let now = if let Some(slot) = slot_clock.now() { - slot - } else { - // There's no good reason why we shouldn't be able to read the slot clock, so we'll - // indicate we're not synced if that's the case. - return false; - }; + warn!( + log, + "Beacon node is syncing"; + "msg" => "not receiving new duties", + "sync_distance" => resp.data.sync_distance.as_u64(), + "head_slot" => resp.data.head_slot.as_u64(), + ); + } - if sync_status.current_slot + SYNC_TOLERANCE >= now { - true - } else { - if let Some(log) = log_opt { - error!( - log, - "Beacon node is syncing"; - "msg" => "not receiving new duties", - "target_slot" => sync_status.highest_slot.as_u64(), - "current_slot" => sync_status.current_slot.as_u64(), - ); - } - false + if let Some(local_slot) = slot_clock.now() { + let remote_slot = resp.data.head_slot + resp.data.sync_distance; + if remote_slot + 1 < local_slot || local_slot + 1 < remote_slot { + error!( + log, + "Time discrepancy with beacon node"; + "msg" => "check the system time on this host and the beacon node", + "beacon_node_slot" => remote_slot, + "local_slot" => local_slot, + ); } } } + + is_synced } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 6d82baa6bfb..8a0e8ba1edb 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -7,6 +7,7 @@ mod fork_service; mod initialized_validators; mod is_synced; mod notifier; +mod validator_duty; mod validator_store; pub use cli::cli_app; @@ -18,18 +19,18 @@ use block_service::{BlockService, BlockServiceBuilder}; use clap::ArgMatches; use duties_service::{DutiesService, DutiesServiceBuilder}; use environment::RuntimeContext; -use eth2_config::Eth2Config; +use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, StatusCode, Url}; use fork_service::{ForkService, ForkServiceBuilder}; use futures::channel::mpsc; use initialized_validators::InitializedValidators; use notifier::spawn_notifier; -use remote_beacon_node::RemoteBeaconNode; use slog::{error, info, Logger}; use slot_clock::SlotClock; use slot_clock::SystemTimeSlotClock; +use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::time::{delay_for, Duration}; -use types::{EthSpec, Hash256}; +use types::{EthSpec, Hash256, YamlConfig}; use validator_store::ValidatorStore; /// The interval between attempts to contact the beacon node during startup. @@ -61,7 +62,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { /// Instantiates the validator client, _without_ starting the timers to trigger block /// and attestation production. - pub async fn new(mut context: RuntimeContext<T>, config: Config) -> Result<Self, String> { + pub async fn new(context: RuntimeContext<T>, config: Config) -> Result<Self, String> { let log = context.log().clone(); info!( @@ -104,33 +105,36 @@ impl<T: EthSpec> ProductionValidatorClient<T> { "enabled" => validators.num_enabled(), ); + let beacon_node_url: Url = config + .http_server + .parse() + .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?; + let beacon_node_http_client = ClientBuilder::new() + .timeout(HTTP_TIMEOUT) + .build() + .map_err(|e| format!("Unable to build HTTP client: {:?}", e))?; let beacon_node = - RemoteBeaconNode::new_with_timeout(config.http_server.clone(), HTTP_TIMEOUT) - .map_err(|e| format!("Unable to init beacon node http client: {}", e))?; + BeaconNodeHttpClient::from_components(beacon_node_url, beacon_node_http_client); // Perform some potentially long-running initialization tasks. - let (eth2_config, genesis_time, genesis_validators_root) = tokio::select! { + let (yaml_config, genesis_time, genesis_validators_root) = tokio::select! { tuple = init_from_beacon_node(&beacon_node, &context) => tuple?, () = context.executor.exit() => return Err("Shutting down".to_string()) }; - - // Do not permit a connection to a beacon node using different spec constants. - if context.eth2_config.spec_constants != eth2_config.spec_constants { - return Err(format!( - "Beacon node is using an incompatible spec. Got {}, expected {}", - eth2_config.spec_constants, context.eth2_config.spec_constants - )); + let beacon_node_spec = yaml_config.apply_to_chain_spec::<T>(&T::default_spec()) + .ok_or_else(|| + "The minimal/mainnet spec type of the beacon node does not match the validator client. \ + See the --testnet command.".to_string() + )?; + + if context.eth2_config.spec != beacon_node_spec { + return Err( + "The beacon node is using a different Eth2 specification to this validator client. \ + See the --testnet command." + .to_string(), + ); } - // Note: here we just assume the spec variables of the remote node. This is very useful - // for testnets, but perhaps a security issue when it comes to mainnet. - // - // A damaging attack would be for a beacon node to convince the validator client of a - // different `SLOTS_PER_EPOCH` variable. This could result in slashable messages being - // produced. We are safe from this because `SLOTS_PER_EPOCH` is a type-level constant - // for Lighthouse. - context.eth2_config = eth2_config; - let slot_clock = SystemTimeSlotClock::new( context.eth2_config.spec.genesis_slot, Duration::from_secs(genesis_time), @@ -203,7 +207,10 @@ impl<T: EthSpec> ProductionValidatorClient<T> { self.duties_service .clone() - .start_update_service(block_service_tx, &self.context.eth2_config.spec) + .start_update_service( + block_service_tx, + Arc::new(self.context.eth2_config.spec.clone()), + ) .map_err(|e| format!("Unable to start duties service: {}", e))?; self.fork_service @@ -228,80 +235,85 @@ impl<T: EthSpec> ProductionValidatorClient<T> { } async fn init_from_beacon_node<E: EthSpec>( - beacon_node: &RemoteBeaconNode<E>, + beacon_node: &BeaconNodeHttpClient, context: &RuntimeContext<E>, -) -> Result<(Eth2Config, u64, Hash256), String> { +) -> Result<(YamlConfig, u64, Hash256), String> { // Wait for the beacon node to come online. wait_for_node(beacon_node, context.log()).await?; - let eth2_config = beacon_node - .http - .spec() - .get_eth2_config() + let yaml_config = beacon_node + .get_config_spec() .await - .map_err(|e| format!("Unable to read eth2 config from beacon node: {:?}", e))?; - let genesis_time = beacon_node - .http - .beacon() - .get_genesis_time() - .await - .map_err(|e| format!("Unable to read genesis time from beacon node: {:?}", e))?; + .map_err(|e| format!("Unable to read spec from beacon node: {:?}", e))? + .data; + + let genesis = loop { + match beacon_node.get_beacon_genesis().await { + Ok(genesis) => break genesis.data, + Err(e) => { + // A 404 error on the genesis endpoint indicates that genesis has not yet occurred. + if e.status() == Some(StatusCode::NOT_FOUND) { + info!( + context.log(), + "Waiting for genesis"; + ); + } else { + error!( + context.log(), + "Error polling beacon node"; + "error" => format!("{:?}", e) + ); + } + } + } + + delay_for(RETRY_DELAY).await; + }; + let now = SystemTime::now() .duration_since(UNIX_EPOCH) .map_err(|e| format!("Unable to read system time: {:?}", e))?; - let genesis = Duration::from_secs(genesis_time); + let genesis_time = Duration::from_secs(genesis.genesis_time); // If the time now is less than (prior to) genesis, then delay until the // genesis instant. // // If the validator client starts before genesis, it will get errors from // the slot clock. - if now < genesis { + if now < genesis_time { info!( context.log(), "Starting node prior to genesis"; - "seconds_to_wait" => (genesis - now).as_secs() + "seconds_to_wait" => (genesis_time - now).as_secs() ); - delay_for(genesis - now).await; + delay_for(genesis_time - now).await; } else { info!( context.log(), "Genesis has already occurred"; - "seconds_ago" => (now - genesis).as_secs() + "seconds_ago" => (now - genesis_time).as_secs() ); } - let genesis_validators_root = beacon_node - .http - .beacon() - .get_genesis_validators_root() - .await - .map_err(|e| { - format!( - "Unable to read genesis validators root from beacon node: {:?}", - e - ) - })?; - Ok((eth2_config, genesis_time, genesis_validators_root)) + Ok(( + yaml_config, + genesis.genesis_time, + genesis.genesis_validators_root, + )) } /// Request the version from the node, looping back and trying again on failure. Exit once the node /// has been contacted. -async fn wait_for_node<E: EthSpec>( - beacon_node: &RemoteBeaconNode<E>, - log: &Logger, -) -> Result<(), String> { +async fn wait_for_node(beacon_node: &BeaconNodeHttpClient, log: &Logger) -> Result<(), String> { // Try to get the version string from the node, looping until success is returned. loop { let log = log.clone(); let result = beacon_node - .clone() - .http - .node() - .get_version() + .get_node_version() .await - .map_err(|e| format!("{:?}", e)); + .map_err(|e| format!("{:?}", e)) + .map(|body| body.data.version); match result { Ok(version) => { diff --git a/validator_client/src/validator_duty.rs b/validator_client/src/validator_duty.rs new file mode 100644 index 00000000000..e5f56c38555 --- /dev/null +++ b/validator_client/src/validator_duty.rs @@ -0,0 +1,131 @@ +use eth2::{ + types::{BeaconCommitteeSubscription, StateId, ValidatorId}, + BeaconNodeHttpClient, +}; +use serde::{Deserialize, Serialize}; +use types::{CommitteeIndex, Epoch, PublicKey, PublicKeyBytes, Slot}; + +/// This struct is being used as a shim since we deprecated the `rest_api` in favour of `http_api`. +/// +/// Tracking issue: https://github.com/sigp/lighthouse/issues/1643 +// NOTE: if you add or remove fields, please adjust `eq_ignoring_proposal_slots` +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct ValidatorDuty { + /// The validator's BLS public key, uniquely identifying them. + pub validator_pubkey: PublicKey, + /// The validator's index in `state.validators` + pub validator_index: Option<u64>, + /// The slot at which the validator must attest. + pub attestation_slot: Option<Slot>, + /// The index of the committee within `slot` of which the validator is a member. + pub attestation_committee_index: Option<CommitteeIndex>, + /// The position of the validator in the committee. + pub attestation_committee_position: Option<usize>, + /// The committee count at `attestation_slot`. + pub committee_count_at_slot: Option<u64>, + /// The number of validators in the committee. + pub committee_length: Option<u64>, + /// The slots in which a validator must propose a block (can be empty). + /// + /// Should be set to `None` when duties are not yet known (before the current epoch). + pub block_proposal_slots: Option<Vec<Slot>>, +} + +impl ValidatorDuty { + /// Instantiate `Self` as if there are no known dutes for `validator_pubkey`. + fn no_duties(validator_pubkey: PublicKey) -> Self { + ValidatorDuty { + validator_pubkey, + validator_index: None, + attestation_slot: None, + attestation_committee_index: None, + attestation_committee_position: None, + committee_count_at_slot: None, + committee_length: None, + block_proposal_slots: None, + } + } + + /// Instantiate `Self` by performing requests on the `beacon_node`. + /// + /// Will only request proposer duties if `current_epoch == request_epoch`. + pub async fn download( + beacon_node: &BeaconNodeHttpClient, + current_epoch: Epoch, + request_epoch: Epoch, + pubkey: PublicKey, + ) -> Result<ValidatorDuty, String> { + let pubkey_bytes = PublicKeyBytes::from(&pubkey); + + let validator_index = if let Some(index) = beacon_node + .get_beacon_states_validator_id( + StateId::Head, + &ValidatorId::PublicKey(pubkey_bytes.clone()), + ) + .await + .map_err(|e| format!("Failed to get validator index: {}", e))? + .map(|body| body.data.index) + { + index + } else { + return Ok(Self::no_duties(pubkey)); + }; + + if let Some(attester) = beacon_node + .get_validator_duties_attester(request_epoch, Some(&[validator_index])) + .await + .map_err(|e| format!("Failed to get attester duties: {}", e))? + .data + .first() + { + let block_proposal_slots = if current_epoch == request_epoch { + beacon_node + .get_validator_duties_proposer(current_epoch) + .await + .map_err(|e| format!("Failed to get proposer indices: {}", e))? + .data + .into_iter() + .filter(|data| data.pubkey == pubkey_bytes) + .map(|data| data.slot) + .collect() + } else { + vec![] + }; + + Ok(ValidatorDuty { + validator_pubkey: pubkey, + validator_index: Some(attester.validator_index), + attestation_slot: Some(attester.slot), + attestation_committee_index: Some(attester.committee_index), + attestation_committee_position: Some(attester.validator_committee_index as usize), + committee_count_at_slot: Some(attester.committees_at_slot), + committee_length: Some(attester.committee_length), + block_proposal_slots: Some(block_proposal_slots), + }) + } else { + Ok(Self::no_duties(pubkey)) + } + } + + /// Return `true` if these validator duties are equal, ignoring their `block_proposal_slots`. + pub fn eq_ignoring_proposal_slots(&self, other: &Self) -> bool { + self.validator_pubkey == other.validator_pubkey + && self.validator_index == other.validator_index + && self.attestation_slot == other.attestation_slot + && self.attestation_committee_index == other.attestation_committee_index + && self.attestation_committee_position == other.attestation_committee_position + && self.committee_count_at_slot == other.committee_count_at_slot + && self.committee_length == other.committee_length + } + + /// Generate a subscription for `self`, if `self` has appropriate attestation duties. + pub fn subscription(&self, is_aggregator: bool) -> Option<BeaconCommitteeSubscription> { + Some(BeaconCommitteeSubscription { + validator_index: self.validator_index?, + committee_index: self.attestation_committee_index?, + committees_at_slot: self.committee_count_at_slot?, + slot: self.attestation_slot?, + is_aggregator, + }) + } +} From 1ad28c0d2e5b4099a01a5b421d89dc33201e4220 Mon Sep 17 00:00:00 2001 From: Michael Sproul <michael@sigmaprime.io> Date: Wed, 30 Sep 2020 02:36:07 +0000 Subject: [PATCH 05/33] Add database schema versioning (#1688) ## Issue Addressed Closes #673 ## Proposed Changes Store a schema version in the database so that future releases can check they're running against a compatible database version. This would also enable automatic migration on breaking database changes, but that's left as future work. The database config is also stored in the database so that the `slots_per_restore_point` value can be checked for consistency, which closes #673 --- beacon_node/beacon_chain/src/beacon_chain.rs | 22 ++++--- beacon_node/beacon_chain/src/builder.rs | 10 ++-- beacon_node/beacon_chain/tests/tests.rs | 3 +- beacon_node/network/src/persisted_dht.rs | 18 +++--- beacon_node/src/cli.rs | 3 +- beacon_node/store/src/config.rs | 36 ++++++++++- beacon_node/store/src/errors.rs | 8 +++ beacon_node/store/src/hot_cold_store.rs | 63 +++++++++++++++++--- beacon_node/store/src/lib.rs | 3 +- beacon_node/store/src/metadata.rs | 29 +++++++++ 10 files changed, 153 insertions(+), 42 deletions(-) create mode 100644 beacon_node/store/src/metadata.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3bf5ae282d4..d189b01e2de 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -66,10 +66,11 @@ pub const ATTESTATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); /// validator pubkey cache. pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); -pub const BEACON_CHAIN_DB_KEY: [u8; 32] = [0; 32]; -pub const OP_POOL_DB_KEY: [u8; 32] = [0; 32]; -pub const ETH1_CACHE_DB_KEY: [u8; 32] = [0; 32]; -pub const FORK_CHOICE_DB_KEY: [u8; 32] = [0; 32]; +// These keys are all zero because they get stored in different columns, see `DBColumn` type. +pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero(); +pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); +pub const ETH1_CACHE_DB_KEY: Hash256 = Hash256::zero(); +pub const FORK_CHOICE_DB_KEY: Hash256 = Hash256::zero(); /// The result of a chain segment processing. pub enum ChainSegmentResult<T: EthSpec> { @@ -260,7 +261,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { let fork_choice = self.fork_choice.read(); self.store.put_item( - &Hash256::from_slice(&FORK_CHOICE_DB_KEY), + &FORK_CHOICE_DB_KEY, &PersistedForkChoice { fork_choice: fork_choice.to_persisted(), fork_choice_store: fork_choice.fc_store().to_persisted(), @@ -272,8 +273,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { metrics::stop_timer(fork_choice_timer); let head_timer = metrics::start_timer(&metrics::PERSIST_HEAD); - self.store - .put_item(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY), &persisted_head)?; + self.store.put_item(&BEACON_CHAIN_DB_KEY, &persisted_head)?; metrics::stop_timer(head_timer); @@ -290,7 +290,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> { let _timer = metrics::start_timer(&metrics::PERSIST_OP_POOL); self.store.put_item( - &Hash256::from_slice(&OP_POOL_DB_KEY), + &OP_POOL_DB_KEY, &PersistedOperationPool::from_operation_pool(&self.op_pool), )?; @@ -302,10 +302,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> { let _timer = metrics::start_timer(&metrics::PERSIST_OP_POOL); if let Some(eth1_chain) = self.eth1_chain.as_ref() { - self.store.put_item( - &Hash256::from_slice(Ð1_CACHE_DB_KEY), - ð1_chain.as_ssz_container(), - )?; + self.store + .put_item(Ð1_CACHE_DB_KEY, ð1_chain.as_ssz_container())?; } Ok(()) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index ff47c7a2b81..5dbabcdd862 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -229,7 +229,7 @@ where .ok_or_else(|| "get_persisted_eth1_backend requires a store.".to_string())?; store - .get_item::<SszEth1>(&Hash256::from_slice(Ð1_CACHE_DB_KEY)) + .get_item::<SszEth1>(Ð1_CACHE_DB_KEY) .map_err(|e| format!("DB error whilst reading eth1 cache: {:?}", e)) } @@ -241,7 +241,7 @@ where .ok_or_else(|| "store_contains_beacon_chain requires a store.".to_string())?; Ok(store - .get_item::<PersistedBeaconChain>(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY)) + .get_item::<PersistedBeaconChain>(&BEACON_CHAIN_DB_KEY) .map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))? .is_some()) } @@ -272,7 +272,7 @@ where .ok_or_else(|| "resume_from_db requires a store.".to_string())?; let chain = store - .get_item::<PersistedBeaconChain>(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY)) + .get_item::<PersistedBeaconChain>(&BEACON_CHAIN_DB_KEY) .map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))? .ok_or_else(|| { "No persisted beacon chain found in store. Try purging the beacon chain database." @@ -280,7 +280,7 @@ where })?; let persisted_fork_choice = store - .get_item::<PersistedForkChoice>(&Hash256::from_slice(&FORK_CHOICE_DB_KEY)) + .get_item::<PersistedForkChoice>(&FORK_CHOICE_DB_KEY) .map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))? .ok_or_else(|| "No persisted fork choice present in database.".to_string())?; @@ -307,7 +307,7 @@ where self.op_pool = Some( store - .get_item::<PersistedOperationPool<TEthSpec>>(&Hash256::from_slice(&OP_POOL_DB_KEY)) + .get_item::<PersistedOperationPool<TEthSpec>>(&OP_POOL_DB_KEY) .map_err(|e| format!("DB error whilst reading persisted op pool: {:?}", e))? .map(PersistedOperationPool::into_operation_pool) .unwrap_or_else(OperationPool::new), diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 721eb409167..cd8b564787c 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -357,11 +357,10 @@ fn roundtrip_operation_pool() { .persist_op_pool() .expect("should persist op pool"); - let key = Hash256::from_slice(&OP_POOL_DB_KEY); let restored_op_pool = harness .chain .store - .get_item::<PersistedOperationPool<MinimalEthSpec>>(&key) + .get_item::<PersistedOperationPool<MinimalEthSpec>>(&OP_POOL_DB_KEY) .expect("should read db") .expect("should find op pool") .into_operation_pool(); diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs index 2149324422b..c11fcd44852 100644 --- a/beacon_node/network/src/persisted_dht.rs +++ b/beacon_node/network/src/persisted_dht.rs @@ -3,15 +3,14 @@ use std::sync::Arc; use store::{DBColumn, Error as StoreError, HotColdDB, ItemStore, StoreItem}; use types::{EthSpec, Hash256}; -/// 32-byte key for accessing the `DhtEnrs`. -pub const DHT_DB_KEY: &str = "PERSISTEDDHTPERSISTEDDHTPERSISTE"; +/// 32-byte key for accessing the `DhtEnrs`. All zero because `DhtEnrs` has its own column. +pub const DHT_DB_KEY: Hash256 = Hash256::zero(); pub fn load_dht<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>( store: Arc<HotColdDB<E, Hot, Cold>>, ) -> Vec<Enr> { // Load DHT from store - let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); - match store.get_item(&key) { + match store.get_item(&DHT_DB_KEY) { Ok(Some(p)) => { let p: PersistedDht = p; p.enrs @@ -25,9 +24,7 @@ pub fn persist_dht<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>( store: Arc<HotColdDB<E, Hot, Cold>>, enrs: Vec<Enr>, ) -> Result<(), store::Error> { - let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); - store.put_item(&key, &PersistedDht { enrs })?; - Ok(()) + store.put_item(&DHT_DB_KEY, &PersistedDht { enrs }) } /// Wrapper around DHT for persistence to disk. @@ -61,7 +58,7 @@ mod tests { use std::str::FromStr; use store::config::StoreConfig; use store::{HotColdDB, MemoryStore}; - use types::{ChainSpec, Hash256, MinimalEthSpec}; + use types::{ChainSpec, MinimalEthSpec}; #[test] fn test_persisted_dht() { let log = NullLoggerBuilder.build().unwrap(); @@ -71,11 +68,10 @@ mod tests { MemoryStore<MinimalEthSpec>, > = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log).unwrap(); let enrs = vec![Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap()]; - let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); store - .put_item(&key, &PersistedDht { enrs: enrs.clone() }) + .put_item(&DHT_DB_KEY, &PersistedDht { enrs: enrs.clone() }) .unwrap(); - let dht: PersistedDht = store.get_item(&key).unwrap().unwrap(); + let dht: PersistedDht = store.get_item(&DHT_DB_KEY).unwrap().unwrap(); assert_eq!(dht.enrs, enrs); } } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index fd838e03384..2ee3fa41784 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -267,7 +267,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long("slots-per-restore-point") .value_name("SLOT_COUNT") .help("Specifies how often a freezer DB restore point should be stored. \ - DO NOT DECREASE AFTER INITIALIZATION. [default: 2048 (mainnet) or 64 (minimal)]") + Cannot be changed after initialization. \ + [default: 2048 (mainnet) or 64 (minimal)]") .takes_value(true) ) .arg( diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index bebddf8fac5..91cf5ec1cb0 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -1,11 +1,14 @@ +use crate::{DBColumn, Error, StoreItem}; use serde_derive::{Deserialize, Serialize}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; use types::{EthSpec, MinimalEthSpec}; pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5; /// Database configuration parameters. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Encode, Decode)] pub struct StoreConfig { /// Number of slots to wait between storing restore points in the freezer database. pub slots_per_restore_point: u64, @@ -13,6 +16,11 @@ pub struct StoreConfig { pub block_cache_size: usize, } +#[derive(Debug, Clone)] +pub enum StoreConfigError { + MismatchedSlotsPerRestorePoint { config: u64, on_disk: u64 }, +} + impl Default for StoreConfig { fn default() -> Self { Self { @@ -22,3 +30,29 @@ impl Default for StoreConfig { } } } + +impl StoreConfig { + pub fn check_compatibility(&self, on_disk_config: &Self) -> Result<(), StoreConfigError> { + if self.slots_per_restore_point != on_disk_config.slots_per_restore_point { + return Err(StoreConfigError::MismatchedSlotsPerRestorePoint { + config: self.slots_per_restore_point, + on_disk: on_disk_config.slots_per_restore_point, + }); + } + Ok(()) + } +} + +impl StoreItem for StoreConfig { + fn db_column() -> DBColumn { + DBColumn::BeaconMeta + } + + fn as_store_bytes(&self) -> Vec<u8> { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> { + Ok(Self::from_ssz_bytes(bytes)?) + } +} diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 8e9237361c4..622cd2ac73f 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -1,4 +1,5 @@ use crate::chunked_vector::ChunkError; +use crate::config::StoreConfigError; use crate::hot_cold_store::HotColdDBError; use ssz::DecodeError; use types::{BeaconStateError, Hash256, Slot}; @@ -17,6 +18,7 @@ pub enum Error { BlockNotFound(Hash256), NoContinuationData, SplitPointModified(Slot, Slot), + ConfigError(StoreConfigError), } impl From<DecodeError> for Error { @@ -49,6 +51,12 @@ impl From<DBError> for Error { } } +impl From<StoreConfigError> for Error { + fn from(e: StoreConfigError) -> Error { + Error::ConfigError(e) + } +} + #[derive(Debug)] pub struct DBError { pub message: String, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 08e810866f9..55c403aa8a2 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -7,6 +7,9 @@ use crate::impls::beacon_state::{get_full_state, store_full_state}; use crate::iter::{ParentRootBlockIterator, StateRootsIterator}; use crate::leveldb_store::LevelDB; use crate::memory_store::MemoryStore; +use crate::metadata::{ + SchemaVersion, CONFIG_KEY, CURRENT_SCHEMA_VERSION, SCHEMA_VERSION_KEY, SPLIT_KEY, +}; use crate::metrics; use crate::{ get_key_for_col, DBColumn, Error, ItemStore, KeyValueStoreOp, PartialBeaconState, StoreItem, @@ -27,9 +30,6 @@ use std::path::Path; use std::sync::Arc; use types::*; -/// 32-byte key for accessing the `split` of the freezer DB. -pub const SPLIT_DB_KEY: &str = "FREEZERDBSPLITFREEZERDBSPLITFREE"; - /// Defines how blocks should be replayed on states. #[derive(PartialEq)] pub enum BlockReplay { @@ -46,6 +46,8 @@ pub enum BlockReplay { /// intermittent "restore point" states pre-finalization. #[derive(Debug)] pub struct HotColdDB<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> { + /// The schema version. Loaded from disk on initialization. + schema_version: SchemaVersion, /// The slot and state root at the point where the database is split between hot and cold. /// /// States with slots less than `split.slot` are in the cold DB, while states with slots @@ -70,6 +72,10 @@ pub struct HotColdDB<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> { #[derive(Debug, PartialEq)] pub enum HotColdDBError { + UnsupportedSchemaVersion { + software_version: SchemaVersion, + disk_version: SchemaVersion, + }, /// Recoverable error indicating that the database freeze point couldn't be updated /// due to the finalized block not lying on an epoch boundary (should be infrequent). FreezeSlotUnaligned(Slot), @@ -106,6 +112,7 @@ impl<E: EthSpec> HotColdDB<E, MemoryStore<E>, MemoryStore<E>> { Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; let db = HotColdDB { + schema_version: CURRENT_SCHEMA_VERSION, split: RwLock::new(Split::default()), cold_db: MemoryStore::open(), hot_db: MemoryStore::open(), @@ -134,6 +141,7 @@ impl<E: EthSpec> HotColdDB<E, LevelDB<E>, LevelDB<E>> { Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; let db = HotColdDB { + schema_version: CURRENT_SCHEMA_VERSION, split: RwLock::new(Split::default()), cold_db: LevelDB::open(cold_path)?, hot_db: LevelDB::open(hot_path)?, @@ -144,12 +152,33 @@ impl<E: EthSpec> HotColdDB<E, LevelDB<E>, LevelDB<E>> { _phantom: PhantomData, }; + // Ensure that the schema version of the on-disk database matches the software. + // In the future, this would be the spot to hook in auto-migration, etc. + if let Some(schema_version) = db.load_schema_version()? { + if schema_version != CURRENT_SCHEMA_VERSION { + return Err(HotColdDBError::UnsupportedSchemaVersion { + software_version: CURRENT_SCHEMA_VERSION, + disk_version: schema_version, + } + .into()); + } + } else { + db.store_schema_version(CURRENT_SCHEMA_VERSION)?; + } + + // Ensure that any on-disk config is compatible with the supplied config. + if let Some(disk_config) = db.load_config()? { + db.config.check_compatibility(&disk_config)?; + } + db.store_config()?; + // Load the previous split slot from the database (if any). This ensures we can // stop and restart correctly. if let Some(split) = db.load_split()? { info!( db.log, "Hot-Cold DB initialized"; + "version" => db.schema_version.0, "split_slot" => split.slot, "split_state" => format!("{:?}", split.state_root) ); @@ -744,11 +773,29 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold> * self.config.slots_per_restore_point } + /// Load the database schema version from disk. + fn load_schema_version(&self) -> Result<Option<SchemaVersion>, Error> { + self.hot_db.get(&SCHEMA_VERSION_KEY) + } + + /// Store the database schema version. + fn store_schema_version(&self, schema_version: SchemaVersion) -> Result<(), Error> { + self.hot_db.put(&SCHEMA_VERSION_KEY, &schema_version) + } + + /// Load previously-stored config from disk. + fn load_config(&self) -> Result<Option<StoreConfig>, Error> { + self.hot_db.get(&CONFIG_KEY) + } + + /// Write the config to disk. + fn store_config(&self) -> Result<(), Error> { + self.hot_db.put(&CONFIG_KEY, &self.config) + } + /// Load the split point from disk. fn load_split(&self) -> Result<Option<Split>, Error> { - let key = Hash256::from_slice(SPLIT_DB_KEY.as_bytes()); - let split: Option<Split> = self.hot_db.get(&key)?; - Ok(split) + self.hot_db.get(&SPLIT_KEY) } /// Load the state root of a restore point. @@ -927,9 +974,7 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>( slot: frozen_head.slot, state_root: frozen_head_root, }; - store - .hot_db - .put_sync(&Hash256::from_slice(SPLIT_DB_KEY.as_bytes()), &split)?; + store.hot_db.put_sync(&SPLIT_KEY, &split)?; // Split point is now persisted in the hot database on disk. The in-memory split point // hasn't been modified elsewhere since we keep a write lock on it. It's safe to update diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 27187022686..f249be1f897 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -19,6 +19,7 @@ pub mod hot_cold_store; mod impls; mod leveldb_store; mod memory_store; +mod metadata; mod metrics; mod partial_beacon_state; @@ -153,7 +154,7 @@ pub enum DBColumn { } impl Into<&'static str> for DBColumn { - /// Returns a `&str` that can be used for keying a key-value data base. + /// Returns a `&str` prefix to be added to keys before they hit the key-value database. fn into(self) -> &'static str { match self { DBColumn::BeaconMeta => "bma", diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs new file mode 100644 index 00000000000..2d4733d6362 --- /dev/null +++ b/beacon_node/store/src/metadata.rs @@ -0,0 +1,29 @@ +use crate::{DBColumn, Error, StoreItem}; +use ssz::{Decode, Encode}; +use types::Hash256; + +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(1); + +// All the keys that get stored under the `BeaconMeta` column. +// +// We use `repeat_byte` because it's a const fn. +pub const SCHEMA_VERSION_KEY: Hash256 = Hash256::repeat_byte(0); +pub const CONFIG_KEY: Hash256 = Hash256::repeat_byte(1); +pub const SPLIT_KEY: Hash256 = Hash256::repeat_byte(2); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct SchemaVersion(pub u64); + +impl StoreItem for SchemaVersion { + fn db_column() -> DBColumn { + DBColumn::BeaconMeta + } + + fn as_store_bytes(&self) -> Vec<u8> { + self.0.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> { + Ok(SchemaVersion(u64::from_ssz_bytes(bytes)?)) + } +} From a8efefd4bbc77b188ebd05be23c5816054334092 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Wed, 30 Sep 2020 14:40:31 -0400 Subject: [PATCH 06/33] Add macOS compatibility to health endpoint --- Cargo.lock | 64 +++++++++++++++++++++++------ beacon_node/http_api/tests/tests.rs | 4 +- common/eth2/Cargo.toml | 4 ++ common/eth2/src/lighthouse.rs | 45 +++++++++++++++++++- 4 files changed, 101 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a94d97af3a1..551ab81e855 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -154,7 +154,7 @@ version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" dependencies = [ - "memchr", + "memchr 2.3.3", ] [[package]] @@ -563,7 +563,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931" dependencies = [ "lazy_static", - "memchr", + "memchr 2.3.3", "regex-automata", "serde", ] @@ -574,7 +574,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" dependencies = [ - "memchr", + "memchr 2.3.3", "safemem", ] @@ -631,6 +631,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +[[package]] +name = "bytesize" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81a18687293a1546b67c246452202bbbf143d239cb43494cc163da14979082da" + [[package]] name = "bzip2" version = "0.3.3" @@ -1078,7 +1084,7 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" dependencies = [ - "memchr", + "memchr 2.3.3", ] [[package]] @@ -1134,7 +1140,7 @@ checksum = "9fb90051930c9a0f09e585762152048e23ac74d20c10590ef7cf01c0343c3046" dependencies = [ "darwin-libproc-sys", "libc", - "memchr", + "memchr 2.3.3", ] [[package]] @@ -1477,6 +1483,7 @@ dependencies = [ "serde", "serde_json", "serde_utils", + "systemstat", "types", ] @@ -1956,7 +1963,7 @@ dependencies = [ "futures-macro", "futures-sink", "futures-task", - "memchr", + "memchr 2.3.3", "pin-project", "pin-utils", "proc-macro-hack", @@ -1972,7 +1979,7 @@ checksum = "ce54d63f8b0c75023ed920d46fd71d0cbbb830b0ee012726b5b4f506fb6dea5b" dependencies = [ "bytes 0.5.6", "futures 0.3.5", - "memchr", + "memchr 2.3.3", "pin-project", ] @@ -3194,6 +3201,15 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +[[package]] +name = "memchr" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "148fab2e51b4f1cfc66da2a7c32981d1d3c083a803978268bb11fe4b86925e7a" +dependencies = [ + "libc", +] + [[package]] name = "memchr" version = "2.3.3" @@ -3514,6 +3530,15 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" +[[package]] +name = "nom" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05aec50c70fd288702bcd93284a8444607f3292dbdf2a30de5ea5dcdbe72287b" +dependencies = [ + "memchr 1.0.2", +] + [[package]] name = "num-bigint" version = "0.3.0" @@ -3969,7 +3994,7 @@ checksum = "6ab1427f3d2635891f842892dda177883dca0639e05fe66796a62c9d2f23b49c" dependencies = [ "byteorder", "libc", - "nom", + "nom 2.2.1", "rustc_version", ] @@ -4391,7 +4416,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" dependencies = [ "aho-corasick", - "memchr", + "memchr 2.3.3", "regex-syntax", "thread_local", ] @@ -4507,7 +4532,7 @@ dependencies = [ "fallible-streaming-iterator", "libsqlite3-sys", "lru-cache", - "memchr", + "memchr 2.3.3", "smallvec 1.4.2", "time 0.1.44", ] @@ -5312,6 +5337,21 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "systemstat" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2078da8d09c6202bffd5e075946e65bfad5ce2cfa161edb15c5f014a8440adee" +dependencies = [ + "bytesize", + "chrono", + "lazy_static", + "libc", + "nom 3.2.1", + "time 0.1.44", + "winapi 0.3.9", +] + [[package]] name = "take_mut" version = "0.2.2" @@ -5571,7 +5611,7 @@ dependencies = [ "iovec", "lazy_static", "libc", - "memchr", + "memchr 2.3.3", "mio", "mio-named-pipes", "mio-uds", @@ -5992,7 +6032,7 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" dependencies = [ - "memchr", + "memchr 2.3.3", ] [[package]] diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 2a7e8f6d40a..c7e7c05bdcd 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1445,14 +1445,14 @@ impl ApiTester { self } - #[cfg(target_os = "linux")] + #[cfg(any(target_os = "linux", target_os = "macos"))] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); self } - #[cfg(not(target_os = "linux"))] + #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap_err(); diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index f7ccfcf34eb..f786ecd6354 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -20,6 +20,10 @@ serde_utils = { path = "../../consensus/serde_utils" } psutil = { version = "3.1.0", optional = true } procinfo = { version = "0.4.2", optional = true } +[target.'cfg(target_os = "macos")'.dependencies] +systemstat = "0.1.5" +psutil = "3.1.0" + [features] default = ["lighthouse"] lighthouse = ["proto_array", "psutil", "procinfo"] diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 8bfbad84ecf..abaaeaa2250 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -73,6 +73,11 @@ pub struct ValidatorInclusionData { #[cfg(target_os = "linux")] use {procinfo::pid, psutil::process::Process}; +#[cfg(target_os = "macos")] +use { + psutil::process::Process, + systemstat::{Platform, System}, +}; /// Reports on the health of the Lighthouse instance. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] @@ -104,9 +109,10 @@ pub struct Health { } impl Health { - #[cfg(not(target_os = "linux"))] + + #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] pub fn observe() -> Result<Self, String> { - Err("Health is only available on Linux".into()) + Err("Health is only available on Linux and MacOS".into()) } #[cfg(target_os = "linux")] @@ -140,6 +146,41 @@ impl Health { sys_loadavg_15: loadavg.fifteen, }) } + + #[cfg(target_os = "macos")] + pub fn observe() -> Result<Self, String> { + let process = + Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?; + + let process_mem = process + .memory_info() + .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; + + let vm = psutil::memory::virtual_memory() + .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; + + let sys = System::new(); + + let loadavg = sys + .load_average() + .map_err(|e| format!("Unable to get loadavg: {:?}", e))?; + + Ok(Self { + pid: process.pid() as u32, + //TODO: figure out how to get threads for a PID on mac + pid_num_threads: 0, + pid_mem_resident_set_size: process_mem.rss(), + pid_mem_virtual_memory_size: process_mem.vms(), + sys_virt_mem_total: vm.total(), + sys_virt_mem_available: vm.available(), + sys_virt_mem_used: vm.used(), + sys_virt_mem_free: vm.free(), + sys_virt_mem_percent: vm.percent(), + sys_loadavg_1: loadavg.one as f64, + sys_loadavg_5: loadavg.five as f64, + sys_loadavg_15: loadavg.fifteen as f64, + }) + } } impl BeaconNodeHttpClient { From df6a3f84d74c2daa5e9dc7b7c0d03c065314c829 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Thu, 1 Oct 2020 13:44:26 -0400 Subject: [PATCH 07/33] Add system endpoint, drives, network metrics --- Cargo.lock | 31 ++++++ beacon_node/http_api/src/lib.rs | 34 +++++- beacon_node/http_api/tests/tests.rs | 42 ++++++- beacon_node/http_metrics/src/metrics.rs | 5 - book/src/api-lighthouse.md | 1 - common/eth2/Cargo.toml | 1 + common/eth2/src/lighthouse.rs | 139 ++++++++++++++++++++++-- 7 files changed, 230 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 551ab81e855..c772a855c19 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1297,6 +1297,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + [[package]] name = "dtoa" version = "0.4.6" @@ -1483,6 +1489,7 @@ dependencies = [ "serde", "serde_json", "serde_utils", + "sysinfo", "systemstat", "types", ] @@ -3539,6 +3546,15 @@ dependencies = [ "memchr 1.0.2", ] +[[package]] +name = "ntapi" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a31937dea023539c72ddae0e3571deadc1414b300483fa7aaec176168cfa9d2" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "num-bigint" version = "0.3.0" @@ -5337,6 +5353,21 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "sysinfo" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e35d086fd10743c3d963d6eec65f932b5a4afbe948931eaf7ae81f5d6cb555ae" +dependencies = [ + "cfg-if", + "doc-comment", + "libc", + "ntapi", + "once_cell", + "rayon", + "winapi 0.3.9", +] + [[package]] name = "systemstat" version = "0.1.5" diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index b23937b5df1..c82f380e2c4 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1516,8 +1516,21 @@ pub fn serve<T: BeaconChainTypes>( }, ); - // GET lighthouse/health - let get_lighthouse_health = warp::path("lighthouse") + // GET lighthouse/system + let get_lighthouse_system = warp::path("lighthouse") + .and(warp::path("system")) + .and(warp::path::end()) + .and_then(|| { + blocking_json_task(move || { + eth2::lighthouse::System::observe() + .map(api_types::GenericResponse::from) + .map_err(warp_utils::reject::custom_bad_request) + }) + }); + + // GET lighthouse/system/health + let get_lighthouse_system_health = warp::path("lighthouse") + .and(warp::path("system")) .and(warp::path("health")) .and(warp::path::end()) .and_then(|| { @@ -1528,6 +1541,19 @@ pub fn serve<T: BeaconChainTypes>( }) }); + // GET lighthouse/system/drives + let get_lighthouse_system_drives = warp::path("lighthouse") + .and(warp::path("system")) + .and(warp::path("drives")) + .and(warp::path::end()) + .and_then(|| { + blocking_json_task(move || { + eth2::lighthouse::Drive::observe() + .map(api_types::GenericResponse::from) + .map_err(warp_utils::reject::custom_bad_request) + }) + }); + // GET lighthouse/syncing let get_lighthouse_syncing = warp::path("lighthouse") .and(warp::path("syncing")) @@ -1655,7 +1681,9 @@ pub fn serve<T: BeaconChainTypes>( .or(get_validator_blocks.boxed()) .or(get_validator_attestation_data.boxed()) .or(get_validator_aggregate_attestation.boxed()) - .or(get_lighthouse_health.boxed()) + .or(get_lighthouse_system.boxed()) + .or(get_lighthouse_system_health.boxed()) + .or(get_lighthouse_system_drives.boxed()) .or(get_lighthouse_syncing.boxed()) .or(get_lighthouse_peers.boxed()) .or(get_lighthouse_peers_connected.boxed()) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index c7e7c05bdcd..3f3402dcedd 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1446,15 +1446,43 @@ impl ApiTester { } #[cfg(any(target_os = "linux", target_os = "macos"))] - pub async fn test_get_lighthouse_health(self) -> Self { - self.client.get_lighthouse_health().await.unwrap(); + pub async fn test_get_lighthouse_system(self) -> Self { + self.client.get_lighthouse_system().await.unwrap(); + + self + } + + #[cfg(any(target_os = "linux", target_os = "macos"))] + pub async fn test_get_lighthouse_system_health(self) -> Self { + self.client.get_lighthouse_system_health().await.unwrap(); + + self + } + + #[cfg(any(target_os = "linux", target_os = "macos"))] + pub async fn test_get_lighthouse_system_drives(self) -> Self { + self.client.get_lighthouse_system_drives().await.unwrap(); + + self + } + + #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] + pub async fn test_get_lighthouse_system(self) -> Self { + self.client.get_lighthouse_system().await.unwrap_err(); self } #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] - pub async fn test_get_lighthouse_health(self) -> Self { - self.client.get_lighthouse_health().await.unwrap_err(); + pub async fn test_get_lighthouse_system_health(self) -> Self { + self.client.get_lighthouse_system_health().await.unwrap_err(); + + self + } + + #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] + pub async fn test_get_lighthouse_system_drives(self) -> Self { + self.client.get_lighthouse_system_drives().await.unwrap_err(); self } @@ -1773,7 +1801,11 @@ async fn get_validator_beacon_committee_subscriptions() { #[tokio::test(core_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() - .test_get_lighthouse_health() + .test_get_lighthouse_system() + .await + .test_get_lighthouse_system_health() + .await + .test_get_lighthouse_system_drives() .await .test_get_lighthouse_syncing() .await diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index bcd803c405e..87a8c297593 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -6,10 +6,6 @@ use lighthouse_metrics::{Encoder, TextEncoder}; pub use lighthouse_metrics::*; lazy_static! { - pub static ref PROCESS_NUM_THREADS: Result<IntGauge> = try_create_int_gauge( - "process_num_threads", - "Number of threads used by the current process" - ); pub static ref PROCESS_RES_MEM: Result<IntGauge> = try_create_int_gauge( "process_resident_memory_bytes", "Resident memory used by the current process" @@ -78,7 +74,6 @@ pub fn gather_prometheus_metrics<T: BeaconChainTypes>( // This will silently fail if we are unable to observe the health. This is desired behaviour // since we don't support `Health` for all platforms. if let Ok(health) = Health::observe() { - set_gauge(&PROCESS_NUM_THREADS, health.pid_num_threads as i64); set_gauge(&PROCESS_RES_MEM, health.pid_mem_resident_set_size as i64); set_gauge(&PROCESS_VIRT_MEM, health.pid_mem_virtual_memory_size as i64); set_gauge(&SYSTEM_VIRT_MEM_TOTAL, health.sys_virt_mem_total as i64); diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 3f37673fa9d..0e4a16c6101 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -28,7 +28,6 @@ curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/j { "data": { "pid": 1728254, - "pid_num_threads": 47, "pid_mem_resident_set_size": 510054400, "pid_mem_virtual_memory_size": 3963158528, "sys_virt_mem_total": 16715530240, diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index f786ecd6354..5e566f0d98c 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -15,6 +15,7 @@ reqwest = { version = "0.10.8", features = ["json"] } eth2_libp2p = { path = "../../beacon_node/eth2_libp2p" } proto_array = { path = "../../consensus/proto_array", optional = true } serde_utils = { path = "../../consensus/serde_utils" } +sysinfo = "0.15.2" [target.'cfg(target_os = "linux")'.dependencies] psutil = { version = "3.1.0", optional = true } diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index abaaeaa2250..24e237560bf 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -6,6 +6,7 @@ use crate::{ }; use proto_array::core::ProtoArray; use serde::{Deserialize, Serialize}; +use sysinfo::{NetworksExt, System as SystemInfo, SystemExt, NetworkExt}; pub use eth2_libp2p::{types::SyncState, PeerInfo}; @@ -76,16 +77,75 @@ use {procinfo::pid, psutil::process::Process}; #[cfg(target_os = "macos")] use { psutil::process::Process, - systemstat::{Platform, System}, + systemstat::{Platform, System as SystemStat}, }; +/// Reports information about the system the Lighthouse instance is running on. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct System { + pub health : Health, + pub drives : Vec<Drive>, +} + +impl System { + pub fn observe() -> Result<Self, String> { + Ok(Self { + health: Health::observe()?, + drives: Drive::observe()?, + }) + } +} + +/// Reports information about a drive on the system the Lighthouse instance is running on. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Drive { + pub filesystem: String, + pub used: u64, + pub avail: u64, + pub used_pct: u64, + pub total: u64, + pub mounted_on: String, +} + +impl Drive { + pub fn observe() -> Result<Vec<Self>, String> { + let system = SystemStat::new(); + Ok(system.mounts().expect("Could not find mounts.").into_iter().map(|drive| { + Drive { + filesystem: drive.fs_mounted_from , + used: drive.total.as_u64() - drive.avail.as_u64() , + avail: drive.avail.as_u64() , + used_pct: (((drive.total.0 as f64 - drive.avail.0 as f64) / drive.total.0 as f64) * 100.0) as u64 , + total: drive.total.as_u64() , + mounted_on: drive.fs_mounted_on , + } + }).collect()) + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Network { + /// Network interface name + pub name: String, + /// Network metric for received bytes. + pub rx_bytes: u64, + /// Network metric for received errors. + pub rx_errors: u64, + /// Network metric for received packets. + pub rx_packets: u64, + /// Network metric for transmitted bytes. + pub tx_bytes: u64, + /// Network metric for trasmitted errors. + pub tx_errors: u64, + /// Network metric for transmitted packets. + pub tx_packets: u64, +} + /// Reports on the health of the Lighthouse instance. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Health { /// The pid of this process. pub pid: u32, - /// The number of threads used by this pid. - pub pid_num_threads: i32, /// The total resident memory used by this pid. pub pid_mem_resident_set_size: u64, /// The total virtual memory used by this pid. @@ -106,6 +166,8 @@ pub struct Health { pub sys_loadavg_5: f64, /// System load average over 15 minutes. pub sys_loadavg_15: f64, + /// Network interfaces and related statistics. + pub networks: Vec<Network>, } impl Health { @@ -131,9 +193,29 @@ impl Health { let loadavg = psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; + let s = SystemInfo::new_all(); + + let mut rx_bytes = 0; + let mut rx_errors = 0; + let mut rx_packets = 0; + let mut tx_bytes = 0; + let mut tx_errors = 0; + let mut tx_packets = 0; + + let networks = s.get_networks().iter().map(|(name, network)| { + Network { + name: name.to_string(), + rx_bytes: network.get_total_received(), + rx_errors: network.get_total_transmitted(), + rx_packets: network.get_total_packets_received(), + tx_bytes: network.get_total_packets_transmitted(), + tx_errors: network.get_total_errors_on_received(), + tx_packets: network.get_total_errors_on_transmitted(), + } + }).collect(); + Ok(Self { pid: process.pid(), - pid_num_threads: stat.num_threads, pid_mem_resident_set_size: process_mem.rss(), pid_mem_virtual_memory_size: process_mem.vms(), sys_virt_mem_total: vm.total(), @@ -144,6 +226,7 @@ impl Health { sys_loadavg_1: loadavg.one, sys_loadavg_5: loadavg.five, sys_loadavg_15: loadavg.fifteen, + networks, }) } @@ -159,16 +242,27 @@ impl Health { let vm = psutil::memory::virtual_memory() .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; - let sys = System::new(); + let sys = SystemStat::new(); let loadavg = sys .load_average() .map_err(|e| format!("Unable to get loadavg: {:?}", e))?; + let s = SystemInfo::new_all(); + let networks = s.get_networks().iter().map(|(name, network)| { + Network { + name: name.to_string(), + rx_bytes: network.get_total_received(), + rx_errors: network.get_total_transmitted(), + rx_packets: network.get_total_packets_received(), + tx_bytes: network.get_total_packets_transmitted(), + tx_errors: network.get_total_errors_on_received(), + tx_packets: network.get_total_errors_on_transmitted(), + } + }).collect(); + Ok(Self { pid: process.pid() as u32, - //TODO: figure out how to get threads for a PID on mac - pid_num_threads: 0, pid_mem_resident_set_size: process_mem.rss(), pid_mem_virtual_memory_size: process_mem.vms(), sys_virt_mem_total: vm.total(), @@ -179,23 +273,50 @@ impl Health { sys_loadavg_1: loadavg.one as f64, sys_loadavg_5: loadavg.five as f64, sys_loadavg_15: loadavg.fifteen as f64, + networks, }) } } impl BeaconNodeHttpClient { - /// `GET lighthouse/health` - pub async fn get_lighthouse_health(&self) -> Result<GenericResponse<Health>, Error> { + /// `GET lighthouse/system` + pub async fn get_lighthouse_system(&self) -> Result<GenericResponse<System>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("system"); + + self.get(path).await + } + + /// `GET lighthouse/system/health` + pub async fn get_lighthouse_system_health(&self) -> Result<GenericResponse<Health>, Error> { let mut path = self.server.clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("lighthouse") + .push("system") .push("health"); self.get(path).await } + /// `GET lighthouse/system/drives` + pub async fn get_lighthouse_system_drives(&self) -> Result<GenericResponse<Vec<Drive>>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("system") + .push("drives"); + + self.get(path).await + } + /// `GET lighthouse/syncing` pub async fn get_lighthouse_syncing(&self) -> Result<GenericResponse<SyncState>, Error> { let mut path = self.server.clone(); From e511c09ff755a945b0e85a22ef974348808c20cb Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Thu, 1 Oct 2020 21:25:28 -0400 Subject: [PATCH 08/33] Sum network stats across interfaces --- common/eth2/src/lighthouse.rs | 85 ++++++++++++++++++++--------------- 1 file changed, 49 insertions(+), 36 deletions(-) diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 24e237560bf..e2f42c1137a 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -125,19 +125,17 @@ impl Drive { #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Network { - /// Network interface name - pub name: String, - /// Network metric for received bytes. + /// Network metric for received bytes across all network interfaces. pub rx_bytes: u64, - /// Network metric for received errors. + /// Network metric for received errors across all network interfaces. pub rx_errors: u64, - /// Network metric for received packets. + /// Network metric for received packets across all network interfaces. pub rx_packets: u64, - /// Network metric for transmitted bytes. + /// Network metric for transmitted bytes across all network interfaces. pub tx_bytes: u64, - /// Network metric for trasmitted errors. + /// Network metric for trasmitted errors across all network interfaces. pub tx_errors: u64, - /// Network metric for transmitted packets. + /// Network metric for transmitted packets across all network interfaces. pub tx_packets: u64, } @@ -166,8 +164,8 @@ pub struct Health { pub sys_loadavg_5: f64, /// System load average over 15 minutes. pub sys_loadavg_15: f64, - /// Network interfaces and related statistics. - pub networks: Vec<Network>, + /// Network statistics. + pub network: Network, } impl Health { @@ -193,8 +191,6 @@ impl Health { let loadavg = psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; - let s = SystemInfo::new_all(); - let mut rx_bytes = 0; let mut rx_errors = 0; let mut rx_packets = 0; @@ -202,17 +198,15 @@ impl Health { let mut tx_errors = 0; let mut tx_packets = 0; - let networks = s.get_networks().iter().map(|(name, network)| { - Network { - name: name.to_string(), - rx_bytes: network.get_total_received(), - rx_errors: network.get_total_transmitted(), - rx_packets: network.get_total_packets_received(), - tx_bytes: network.get_total_packets_transmitted(), - tx_errors: network.get_total_errors_on_received(), - tx_packets: network.get_total_errors_on_transmitted(), - } - }).collect(); + let s = SystemInfo::new_all(); + s.get_networks().iter().for_each(|(_, network)| { + rx_bytes = rx_bytes + network.get_total_received(); + rx_errors = rx_errors + network.get_total_transmitted(); + rx_packets = rx_packets + network.get_total_packets_received(); + tx_bytes = tx_bytes + network.get_total_packets_transmitted(); + tx_errors = tx_errors + network.get_total_errors_on_received(); + tx_packets = tx_packets + network.get_total_errors_on_transmitted(); + }); Ok(Self { pid: process.pid(), @@ -226,7 +220,14 @@ impl Health { sys_loadavg_1: loadavg.one, sys_loadavg_5: loadavg.five, sys_loadavg_15: loadavg.fifteen, - networks, + network: Network{ + rx_bytes, + rx_errors, + rx_packets, + tx_bytes, + tx_errors, + tx_packets, + }, }) } @@ -248,18 +249,22 @@ impl Health { .load_average() .map_err(|e| format!("Unable to get loadavg: {:?}", e))?; + let mut rx_bytes = 0; + let mut rx_errors = 0; + let mut rx_packets = 0; + let mut tx_bytes = 0; + let mut tx_errors = 0; + let mut tx_packets = 0; + let s = SystemInfo::new_all(); - let networks = s.get_networks().iter().map(|(name, network)| { - Network { - name: name.to_string(), - rx_bytes: network.get_total_received(), - rx_errors: network.get_total_transmitted(), - rx_packets: network.get_total_packets_received(), - tx_bytes: network.get_total_packets_transmitted(), - tx_errors: network.get_total_errors_on_received(), - tx_packets: network.get_total_errors_on_transmitted(), - } - }).collect(); + s.get_networks().iter().for_each(|(_, network)| { + rx_bytes = rx_bytes + network.get_total_received(); + rx_errors = rx_errors + network.get_total_transmitted(); + rx_packets = rx_packets + network.get_total_packets_received(); + tx_bytes = tx_bytes + network.get_total_packets_transmitted(); + tx_errors = tx_errors + network.get_total_errors_on_received(); + tx_packets = tx_packets + network.get_total_errors_on_transmitted(); + }); Ok(Self { pid: process.pid() as u32, @@ -273,7 +278,15 @@ impl Health { sys_loadavg_1: loadavg.one as f64, sys_loadavg_5: loadavg.five as f64, sys_loadavg_15: loadavg.fifteen as f64, - networks, + network: Network{ + rx_bytes, + rx_errors, + rx_packets, + tx_bytes, + tx_errors, + tx_packets, + }, + }) } } From 911cc60b0fab234507b13e48d5e708bd2e086fde Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Thu, 1 Oct 2020 21:55:28 -0400 Subject: [PATCH 09/33] fix linux dependency --- beacon_node/http_api/tests/tests.rs | 10 ++++- common/eth2/Cargo.toml | 2 +- common/eth2/src/lighthouse.rs | 70 ++++++++++++++--------------- 3 files changed, 44 insertions(+), 38 deletions(-) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 3f3402dcedd..7fa65d14496 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1475,14 +1475,20 @@ impl ApiTester { #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] pub async fn test_get_lighthouse_system_health(self) -> Self { - self.client.get_lighthouse_system_health().await.unwrap_err(); + self.client + .get_lighthouse_system_health() + .await + .unwrap_err(); self } #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] pub async fn test_get_lighthouse_system_drives(self) -> Self { - self.client.get_lighthouse_system_drives().await.unwrap_err(); + self.client + .get_lighthouse_system_drives() + .await + .unwrap_err(); self } diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 5e566f0d98c..d73d5a2b6c9 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -16,13 +16,13 @@ eth2_libp2p = { path = "../../beacon_node/eth2_libp2p" } proto_array = { path = "../../consensus/proto_array", optional = true } serde_utils = { path = "../../consensus/serde_utils" } sysinfo = "0.15.2" +systemstat = "0.1.5" [target.'cfg(target_os = "linux")'.dependencies] psutil = { version = "3.1.0", optional = true } procinfo = { version = "0.4.2", optional = true } [target.'cfg(target_os = "macos")'.dependencies] -systemstat = "0.1.5" psutil = "3.1.0" [features] diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index e2f42c1137a..46562961c33 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -6,7 +6,8 @@ use crate::{ }; use proto_array::core::ProtoArray; use serde::{Deserialize, Serialize}; -use sysinfo::{NetworksExt, System as SystemInfo, SystemExt, NetworkExt}; +use sysinfo::{NetworkExt, NetworksExt, System as SystemInfo, SystemExt}; +use systemstat::{Platform, System as SystemStat}; pub use eth2_libp2p::{types::SyncState, PeerInfo}; @@ -72,19 +73,16 @@ pub struct ValidatorInclusionData { pub is_previous_epoch_head_attester: bool, } +#[cfg(target_os = "macos")] +use psutil::process::Process; #[cfg(target_os = "linux")] use {procinfo::pid, psutil::process::Process}; -#[cfg(target_os = "macos")] -use { - psutil::process::Process, - systemstat::{Platform, System as SystemStat}, -}; /// Reports information about the system the Lighthouse instance is running on. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct System { - pub health : Health, - pub drives : Vec<Drive>, + pub health: Health, + pub drives: Vec<Drive>, } impl System { @@ -110,16 +108,20 @@ pub struct Drive { impl Drive { pub fn observe() -> Result<Vec<Self>, String> { let system = SystemStat::new(); - Ok(system.mounts().expect("Could not find mounts.").into_iter().map(|drive| { - Drive { - filesystem: drive.fs_mounted_from , - used: drive.total.as_u64() - drive.avail.as_u64() , - avail: drive.avail.as_u64() , - used_pct: (((drive.total.0 as f64 - drive.avail.0 as f64) / drive.total.0 as f64) * 100.0) as u64 , - total: drive.total.as_u64() , - mounted_on: drive.fs_mounted_on , - } - }).collect()) + Ok(system + .mounts() + .expect("Could not find mounts.") + .into_iter() + .map(|drive| Drive { + filesystem: drive.fs_mounted_from, + used: drive.total.as_u64() - drive.avail.as_u64(), + avail: drive.avail.as_u64(), + used_pct: (((drive.total.0 as f64 - drive.avail.0 as f64) / drive.total.0 as f64) + * 100.0) as u64, + total: drive.total.as_u64(), + mounted_on: drive.fs_mounted_on, + }) + .collect()) } } @@ -169,7 +171,6 @@ pub struct Health { } impl Health { - #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] pub fn observe() -> Result<Self, String> { Err("Health is only available on Linux and MacOS".into()) @@ -200,12 +201,12 @@ impl Health { let s = SystemInfo::new_all(); s.get_networks().iter().for_each(|(_, network)| { - rx_bytes = rx_bytes + network.get_total_received(); - rx_errors = rx_errors + network.get_total_transmitted(); - rx_packets = rx_packets + network.get_total_packets_received(); - tx_bytes = tx_bytes + network.get_total_packets_transmitted(); - tx_errors = tx_errors + network.get_total_errors_on_received(); - tx_packets = tx_packets + network.get_total_errors_on_transmitted(); + rx_bytes = rx_bytes + network.get_total_received(); + rx_errors = rx_errors + network.get_total_transmitted(); + rx_packets = rx_packets + network.get_total_packets_received(); + tx_bytes = tx_bytes + network.get_total_packets_transmitted(); + tx_errors = tx_errors + network.get_total_errors_on_received(); + tx_packets = tx_packets + network.get_total_errors_on_transmitted(); }); Ok(Self { @@ -220,7 +221,7 @@ impl Health { sys_loadavg_1: loadavg.one, sys_loadavg_5: loadavg.five, sys_loadavg_15: loadavg.fifteen, - network: Network{ + network: Network { rx_bytes, rx_errors, rx_packets, @@ -256,14 +257,14 @@ impl Health { let mut tx_errors = 0; let mut tx_packets = 0; - let s = SystemInfo::new_all(); + let s = SystemInfo::new_all(); s.get_networks().iter().for_each(|(_, network)| { - rx_bytes = rx_bytes + network.get_total_received(); - rx_errors = rx_errors + network.get_total_transmitted(); - rx_packets = rx_packets + network.get_total_packets_received(); - tx_bytes = tx_bytes + network.get_total_packets_transmitted(); - tx_errors = tx_errors + network.get_total_errors_on_received(); - tx_packets = tx_packets + network.get_total_errors_on_transmitted(); + rx_bytes = rx_bytes + network.get_total_received(); + rx_errors = rx_errors + network.get_total_transmitted(); + rx_packets = rx_packets + network.get_total_packets_received(); + tx_bytes = tx_bytes + network.get_total_packets_transmitted(); + tx_errors = tx_errors + network.get_total_errors_on_received(); + tx_packets = tx_packets + network.get_total_errors_on_transmitted(); }); Ok(Self { @@ -278,7 +279,7 @@ impl Health { sys_loadavg_1: loadavg.one as f64, sys_loadavg_5: loadavg.five as f64, sys_loadavg_15: loadavg.fifteen as f64, - network: Network{ + network: Network { rx_bytes, rx_errors, rx_packets, @@ -286,7 +287,6 @@ impl Health { tx_errors, tx_packets, }, - }) } } From 603941f3c2242b0ef143865ffa7f5760685bb0aa Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Thu, 1 Oct 2020 21:58:09 -0400 Subject: [PATCH 10/33] cargo clippy --- common/eth2/src/lighthouse.rs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 46562961c33..ce4f0a0fef3 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -201,12 +201,12 @@ impl Health { let s = SystemInfo::new_all(); s.get_networks().iter().for_each(|(_, network)| { - rx_bytes = rx_bytes + network.get_total_received(); - rx_errors = rx_errors + network.get_total_transmitted(); - rx_packets = rx_packets + network.get_total_packets_received(); - tx_bytes = tx_bytes + network.get_total_packets_transmitted(); - tx_errors = tx_errors + network.get_total_errors_on_received(); - tx_packets = tx_packets + network.get_total_errors_on_transmitted(); + rx_bytes += network.get_total_received(); + rx_errors += network.get_total_transmitted(); + rx_packets += network.get_total_packets_received(); + tx_bytes += network.get_total_packets_transmitted(); + tx_errors += network.get_total_errors_on_received(); + tx_packets += network.get_total_errors_on_transmitted(); }); Ok(Self { @@ -259,12 +259,12 @@ impl Health { let s = SystemInfo::new_all(); s.get_networks().iter().for_each(|(_, network)| { - rx_bytes = rx_bytes + network.get_total_received(); - rx_errors = rx_errors + network.get_total_transmitted(); - rx_packets = rx_packets + network.get_total_packets_received(); - tx_bytes = tx_bytes + network.get_total_packets_transmitted(); - tx_errors = tx_errors + network.get_total_errors_on_received(); - tx_packets = tx_packets + network.get_total_errors_on_transmitted(); + rx_bytes += network.get_total_received(); + rx_errors += network.get_total_transmitted(); + rx_packets += network.get_total_packets_received(); + tx_bytes += network.get_total_packets_transmitted(); + tx_errors += network.get_total_errors_on_received(); + tx_packets += network.get_total_errors_on_transmitted(); }); Ok(Self { From ff90d23071161ea87c21b21a8298fd1b58556879 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Thu, 1 Oct 2020 22:02:52 -0400 Subject: [PATCH 11/33] remove procinfo --- Cargo.lock | 21 +-------------------- common/eth2/Cargo.toml | 3 +-- common/eth2/src/lighthouse.rs | 4 +--- 3 files changed, 3 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c772a855c19..c4d827be607 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1482,7 +1482,6 @@ version = "0.1.0" dependencies = [ "eth2_libp2p", "hex 0.4.2", - "procinfo", "proto_array", "psutil", "reqwest", @@ -3531,12 +3530,6 @@ dependencies = [ "validator_dir", ] -[[package]] -name = "nom" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" - [[package]] name = "nom" version = "3.2.1" @@ -4002,18 +3995,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "procinfo" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ab1427f3d2635891f842892dda177883dca0639e05fe66796a62c9d2f23b49c" -dependencies = [ - "byteorder", - "libc", - "nom 2.2.1", - "rustc_version", -] - [[package]] name = "prometheus" version = "0.9.0" @@ -5378,7 +5359,7 @@ dependencies = [ "chrono", "lazy_static", "libc", - "nom 3.2.1", + "nom", "time 0.1.44", "winapi 0.3.9", ] diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index d73d5a2b6c9..391881e05bd 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -20,11 +20,10 @@ systemstat = "0.1.5" [target.'cfg(target_os = "linux")'.dependencies] psutil = { version = "3.1.0", optional = true } -procinfo = { version = "0.4.2", optional = true } [target.'cfg(target_os = "macos")'.dependencies] psutil = "3.1.0" [features] default = ["lighthouse"] -lighthouse = ["proto_array", "psutil", "procinfo"] +lighthouse = ["proto_array", "psutil"] diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index ce4f0a0fef3..495cca1730f 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -76,7 +76,7 @@ pub struct ValidatorInclusionData { #[cfg(target_os = "macos")] use psutil::process::Process; #[cfg(target_os = "linux")] -use {procinfo::pid, psutil::process::Process}; +use psutil::process::Process; /// Reports information about the system the Lighthouse instance is running on. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] @@ -185,8 +185,6 @@ impl Health { .memory_info() .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; - let stat = pid::stat_self().map_err(|e| format!("Unable to get stat: {:?}", e))?; - let vm = psutil::memory::virtual_memory() .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; let loadavg = From b9a357e5e26430716422bdf63f4b9ca6408c838f Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Fri, 2 Oct 2020 08:44:45 -0400 Subject: [PATCH 12/33] update drive metrics --- common/eth2/src/lighthouse.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 495cca1730f..53cef48d345 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -6,7 +6,7 @@ use crate::{ }; use proto_array::core::ProtoArray; use serde::{Deserialize, Serialize}; -use sysinfo::{NetworkExt, NetworksExt, System as SystemInfo, SystemExt}; +use sysinfo::{DiskExt, NetworkExt, NetworksExt, System as SystemInfo, SystemExt}; use systemstat::{Platform, System as SystemStat}; pub use eth2_libp2p::{types::SyncState, PeerInfo}; @@ -98,9 +98,8 @@ impl System { #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Drive { pub filesystem: String, - pub used: u64, pub avail: u64, - pub used_pct: u64, + pub avail_pct: u64, pub total: u64, pub mounted_on: String, } @@ -114,9 +113,8 @@ impl Drive { .into_iter() .map(|drive| Drive { filesystem: drive.fs_mounted_from, - used: drive.total.as_u64() - drive.avail.as_u64(), avail: drive.avail.as_u64(), - used_pct: (((drive.total.0 as f64 - drive.avail.0 as f64) / drive.total.0 as f64) + avail_pct: (((drive.total.0 as f64 - drive.avail.0 as f64) / drive.total.0 as f64) * 100.0) as u64, total: drive.total.as_u64(), mounted_on: drive.fs_mounted_on, From b193ae287f91cefde24152ddbc4d0e989197b38b Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Fri, 2 Oct 2020 09:55:53 -0400 Subject: [PATCH 13/33] update drive metrics --- common/eth2/src/lighthouse.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 53cef48d345..8cf404dc6aa 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -99,7 +99,8 @@ impl System { pub struct Drive { pub filesystem: String, pub avail: u64, - pub avail_pct: u64, + pub used: u64, + pub used_pct: u64, pub total: u64, pub mounted_on: String, } @@ -114,7 +115,8 @@ impl Drive { .map(|drive| Drive { filesystem: drive.fs_mounted_from, avail: drive.avail.as_u64(), - avail_pct: (((drive.total.0 as f64 - drive.avail.0 as f64) / drive.total.0 as f64) + used: (drive.total - drive.avail) as u64, + used_pct: (((drive.total.0 as f64 - drive.avail.0 as f64) / drive.total.0 as f64) * 100.0) as u64, total: drive.total.as_u64(), mounted_on: drive.fs_mounted_on, @@ -185,6 +187,7 @@ impl Health { let vm = psutil::memory::virtual_memory() .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; + let loadavg = psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; From f94ce56d27a0794ef96a8c7a00ec5b7828d85bc2 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Fri, 2 Oct 2020 10:15:57 -0400 Subject: [PATCH 14/33] VC updates --- Cargo.lock | 98 ++++++++++++++------ common/eth2/src/lighthouse.rs | 4 +- common/eth2/src/lighthouse_vc/http_client.rs | 5 +- validator_client/src/http_api/mod.rs | 7 +- validator_client/src/http_api/tests.rs | 6 +- 5 files changed, 84 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 256d91740a1..cc3b555184d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -155,7 +155,7 @@ version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" dependencies = [ - "memchr", + "memchr 2.3.3", ] [[package]] @@ -564,7 +564,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931" dependencies = [ "lazy_static", - "memchr", + "memchr 2.3.3", "regex-automata", "serde", ] @@ -575,7 +575,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" dependencies = [ - "memchr", + "memchr 2.3.3", "safemem", ] @@ -632,6 +632,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +[[package]] +name = "bytesize" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81a18687293a1546b67c246452202bbbf143d239cb43494cc163da14979082da" + [[package]] name = "bzip2" version = "0.3.3" @@ -1077,7 +1083,7 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" dependencies = [ - "memchr", + "memchr 2.3.3", ] [[package]] @@ -1133,7 +1139,7 @@ checksum = "9fb90051930c9a0f09e585762152048e23ac74d20c10590ef7cf01c0343c3046" dependencies = [ "darwin-libproc-sys", "libc", - "memchr", + "memchr 2.3.3", ] [[package]] @@ -1479,7 +1485,6 @@ dependencies = [ "eth2_libp2p", "hex 0.4.2", "libsecp256k1", - "procinfo", "proto_array", "psutil", "reqwest", @@ -1487,6 +1492,8 @@ dependencies = [ "serde", "serde_json", "serde_utils", + "sysinfo", + "systemstat", "types", "zeroize", ] @@ -1967,7 +1974,7 @@ dependencies = [ "futures-macro", "futures-sink", "futures-task", - "memchr", + "memchr 2.3.3", "pin-project", "pin-utils", "proc-macro-hack", @@ -1983,7 +1990,7 @@ checksum = "ce54d63f8b0c75023ed920d46fd71d0cbbb830b0ee012726b5b4f506fb6dea5b" dependencies = [ "bytes 0.5.6", "futures 0.3.5", - "memchr", + "memchr 2.3.3", "pin-project", ] @@ -3206,6 +3213,15 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +[[package]] +name = "memchr" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "148fab2e51b4f1cfc66da2a7c32981d1d3c083a803978268bb11fe4b86925e7a" +dependencies = [ + "libc", +] + [[package]] name = "memchr" version = "2.3.3" @@ -3522,9 +3538,21 @@ dependencies = [ [[package]] name = "nom" -version = "2.2.1" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" +checksum = "05aec50c70fd288702bcd93284a8444607f3292dbdf2a30de5ea5dcdbe72287b" +dependencies = [ + "memchr 1.0.2", +] + +[[package]] +name = "ntapi" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a31937dea023539c72ddae0e3571deadc1414b300483fa7aaec176168cfa9d2" +dependencies = [ + "winapi 0.3.9", +] [[package]] name = "num-bigint" @@ -3973,18 +4001,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "procinfo" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ab1427f3d2635891f842892dda177883dca0639e05fe66796a62c9d2f23b49c" -dependencies = [ - "byteorder", - "libc", - "nom", - "rustc_version", -] - [[package]] name = "prometheus" version = "0.9.0" @@ -4403,7 +4419,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" dependencies = [ "aho-corasick", - "memchr", + "memchr 2.3.3", "regex-syntax", "thread_local", ] @@ -4519,7 +4535,7 @@ dependencies = [ "fallible-streaming-iterator", "libsqlite3-sys", "lru-cache", - "memchr", + "memchr 2.3.3", "smallvec 1.4.2", "time 0.1.44", ] @@ -5349,6 +5365,36 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "sysinfo" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e35d086fd10743c3d963d6eec65f932b5a4afbe948931eaf7ae81f5d6cb555ae" +dependencies = [ + "cfg-if", + "doc-comment", + "libc", + "ntapi", + "once_cell", + "rayon", + "winapi 0.3.9", +] + +[[package]] +name = "systemstat" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2078da8d09c6202bffd5e075946e65bfad5ce2cfa161edb15c5f014a8440adee" +dependencies = [ + "bytesize", + "chrono", + "lazy_static", + "libc", + "nom", + "time 0.1.44", + "winapi 0.3.9", +] + [[package]] name = "take_mut" version = "0.2.2" @@ -5608,7 +5654,7 @@ dependencies = [ "iovec", "lazy_static", "libc", - "memchr", + "memchr 2.3.3", "mio", "mio-named-pipes", "mio-uds", @@ -6028,7 +6074,7 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" dependencies = [ - "memchr", + "memchr 2.3.3", ] [[package]] diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 8cf404dc6aa..e5322e8149f 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -6,7 +6,7 @@ use crate::{ }; use proto_array::core::ProtoArray; use serde::{Deserialize, Serialize}; -use sysinfo::{DiskExt, NetworkExt, NetworksExt, System as SystemInfo, SystemExt}; +use sysinfo::{NetworkExt, NetworksExt, System as SystemInfo, SystemExt}; use systemstat::{Platform, System as SystemStat}; pub use eth2_libp2p::{types::SyncState, PeerInfo}; @@ -115,7 +115,7 @@ impl Drive { .map(|drive| Drive { filesystem: drive.fs_mounted_from, avail: drive.avail.as_u64(), - used: (drive.total - drive.avail) as u64, + used: drive.total.as_u64() - drive.avail.as_u64(), used_pct: (((drive.total.0 as f64 - drive.avail.0 as f64) / drive.total.0 as f64) * 100.0) as u64, total: drive.total.as_u64(), diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index b08ceabb2a2..b4ffce82913 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -197,13 +197,14 @@ impl ValidatorClientHttpClient { self.get(path).await } - /// `GET lighthouse/health` - pub async fn get_lighthouse_health(&self) -> Result<GenericResponse<Health>, Error> { + /// `GET lighthouse/system/health` + pub async fn get_lighthouse_system_health(&self) -> Result<GenericResponse<Health>, Error> { let mut path = self.server.clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("lighthouse") + .push("system") .push("health"); self.get(path).await diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 7e0d387d26a..6961cfc745b 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -153,8 +153,9 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( }) }); - // GET lighthouse/health - let get_lighthouse_health = warp::path("lighthouse") + // GET lighthouse/system/health + let get_lighthouse_system_health = warp::path("lighthouse") + .and(warp::path("system")) .and(warp::path("health")) .and(warp::path::end()) .and(signer.clone()) @@ -412,7 +413,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( .and( warp::get().and( get_node_version - .or(get_lighthouse_health) + .or(get_lighthouse_system_health) .or(get_lighthouse_spec) .or(get_lighthouse_validators) .or(get_lighthouse_validators_pubkey), diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index e9344b5f4b8..17e6c575c66 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -156,8 +156,8 @@ impl ApiTester { } #[cfg(target_os = "linux")] - pub async fn test_get_lighthouse_health(self) -> Self { - self.client.get_lighthouse_health().await.unwrap(); + pub async fn test_get_lighthouse_system_health(self) -> Self { + self.client.get_lighthouse_system_health().await.unwrap(); self } @@ -433,7 +433,7 @@ async fn simple_getters() { .await .test_get_lighthouse_version() .await - .test_get_lighthouse_health() + .test_get_lighthouse_system_health() .await .test_get_lighthouse_spec() .await; From 146d67d73fef1057a8d17a2f286e25fc9c40bc40 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Fri, 2 Oct 2020 11:03:28 -0400 Subject: [PATCH 15/33] add log info --- beacon_node/http_api/src/lib.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index badfcdfd348..629a2de1f90 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -108,6 +108,18 @@ pub fn slog_logging( log: Logger, ) -> warp::filters::log::Log<impl Fn(warp::filters::log::Info) + Clone> { warp::log::custom(move |info| { + let remote_port = info.remote_addr().unwrap().port(); + let remote_ip = info.remote_addr().unwrap().ip().to_string(); + let remote_addr = info.remote_addr().unwrap().to_string(); + let mut header = "".to_string(); + info.request_headers().into_iter().for_each(| h| { + header.push_str(h.0.as_str()); + header.push_str(h.1.to_str().unwrap()); + }); + + let user_agent = info.user_agent().unwrap(); + let host = info.host().unwrap().to_string(); + match info.status() { status if status == StatusCode::OK || status == StatusCode::NOT_FOUND => { trace!( @@ -127,6 +139,12 @@ pub fn slog_logging( "status" => status.to_string(), "path" => info.path(), "method" => info.method().to_string(), + "remote_port" => remote_port, +"remote_ip" => remote_ip, +"remote_addr" => remote_addr, +"header" => header, +"user_agent" => user_agent, +"host" => host, ); } }; From 7ca2adab501cc4f92a46233933d3be43e9678089 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Fri, 2 Oct 2020 11:24:56 -0400 Subject: [PATCH 16/33] Revert log update --- beacon_node/http_api/src/lib.rs | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 629a2de1f90..badfcdfd348 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -108,18 +108,6 @@ pub fn slog_logging( log: Logger, ) -> warp::filters::log::Log<impl Fn(warp::filters::log::Info) + Clone> { warp::log::custom(move |info| { - let remote_port = info.remote_addr().unwrap().port(); - let remote_ip = info.remote_addr().unwrap().ip().to_string(); - let remote_addr = info.remote_addr().unwrap().to_string(); - let mut header = "".to_string(); - info.request_headers().into_iter().for_each(| h| { - header.push_str(h.0.as_str()); - header.push_str(h.1.to_str().unwrap()); - }); - - let user_agent = info.user_agent().unwrap(); - let host = info.host().unwrap().to_string(); - match info.status() { status if status == StatusCode::OK || status == StatusCode::NOT_FOUND => { trace!( @@ -139,12 +127,6 @@ pub fn slog_logging( "status" => status.to_string(), "path" => info.path(), "method" => info.method().to_string(), - "remote_port" => remote_port, -"remote_ip" => remote_ip, -"remote_addr" => remote_addr, -"header" => header, -"user_agent" => user_agent, -"host" => host, ); } }; From 0c2759d5d036b13ed09cd02e577ed7770c9e5690 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Fri, 2 Oct 2020 11:43:14 -0400 Subject: [PATCH 17/33] Add drives info to the VC api --- common/eth2/src/lighthouse_vc/http_client.rs | 25 ++++++++++++ validator_client/src/http_api/mod.rs | 29 ++++++++++++++ validator_client/src/http_api/tests.rs | 41 ++++++++++++++++++-- 3 files changed, 91 insertions(+), 4 deletions(-) diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index b4ffce82913..f2c585eef57 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -197,6 +197,18 @@ impl ValidatorClientHttpClient { self.get(path).await } + /// `GET lighthouse/system` + pub async fn get_lighthouse_system(&self) -> Result<GenericResponse<Health>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("system"); + + self.get(path).await + } + /// `GET lighthouse/system/health` pub async fn get_lighthouse_system_health(&self) -> Result<GenericResponse<Health>, Error> { let mut path = self.server.clone(); @@ -210,6 +222,19 @@ impl ValidatorClientHttpClient { self.get(path).await } + /// `GET lighthouse/system/drives` + pub async fn get_lighthouse_system_drives(&self) -> Result<GenericResponse<Health>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("system") + .push("drives"); + + self.get(path).await + } + /// `GET lighthouse/spec` pub async fn get_lighthouse_spec(&self) -> Result<GenericResponse<YamlConfig>, Error> { let mut path = self.server.clone(); diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 6961cfc745b..545eca7f227 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -153,6 +153,19 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( }) }); + // GET lighthouse/system + let get_lighthouse_system = warp::path("lighthouse") + .and(warp::path("system")) + .and(warp::path::end()) + .and(signer.clone()) + .and_then(|signer| { + blocking_signed_json_task(signer, move || { + eth2::lighthouse::System::observe() + .map(api_types::GenericResponse::from) + .map_err(warp_utils::reject::custom_bad_request) + }) + }); + // GET lighthouse/system/health let get_lighthouse_system_health = warp::path("lighthouse") .and(warp::path("system")) @@ -167,6 +180,20 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( }) }); + // GET lighthouse/system/drives + let get_lighthouse_system_drives = warp::path("lighthouse") + .and(warp::path("system")) + .and(warp::path("drives")) + .and(warp::path::end()) + .and(signer.clone()) + .and_then(|signer| { + blocking_signed_json_task(signer, move || { + eth2::lighthouse::Drive::observe() + .map(api_types::GenericResponse::from) + .map_err(warp_utils::reject::custom_bad_request) + }) + }); + // GET lighthouse/spec let get_lighthouse_spec = warp::path("lighthouse") .and(warp::path("spec")) @@ -413,7 +440,9 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( .and( warp::get().and( get_node_version + .or(get_lighthouse_system) .or(get_lighthouse_system_health) + .or(get_lighthouse_system_drives) .or(get_lighthouse_spec) .or(get_lighthouse_validators) .or(get_lighthouse_validators_pubkey), diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 17e6c575c66..3ce67d57368 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -155,19 +155,48 @@ impl ApiTester { self } - #[cfg(target_os = "linux")] + #[cfg(any(target_os = "linux", target_os = "macos"))] + pub async fn test_get_lighthouse_system(self) -> Self { + self.client.get_lighthouse_system().await.unwrap(); + + self + } + + #[cfg(any(target_os = "linux", target_os = "macos"))] pub async fn test_get_lighthouse_system_health(self) -> Self { self.client.get_lighthouse_system_health().await.unwrap(); self } - #[cfg(not(target_os = "linux"))] - pub async fn test_get_lighthouse_health(self) -> Self { - self.client.get_lighthouse_health().await.unwrap_err(); + #[cfg(any(target_os = "linux", target_os = "macos"))] + pub async fn test_get_lighthouse_system_drives(self) -> Self { + self.client.get_lighthouse_system_drives().await.unwrap(); + + self + } + + #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] + pub async fn test_get_lighthouse_system(self) -> Self { + self.client.get_lighthouse_system().await.unwrap_err(); + + self + } + + #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] + pub async fn test_get_lighthouse_system_health(self) -> Self { + self.client.get_lighthouse_system_health().await.unwrap_err(); + + self + } + + #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] + pub async fn test_get_lighthouse_system_drives(self) -> Self { + self.client.get_lighthouse_system_drives().await.unwrap_err(); self } + pub fn vals_total(&self) -> usize { self.initialized_validators.read().num_total() } @@ -433,8 +462,12 @@ async fn simple_getters() { .await .test_get_lighthouse_version() .await + .test_get_lighthouse_system() + .await .test_get_lighthouse_system_health() .await + .test_get_lighthouse_system_drives() + .await .test_get_lighthouse_spec() .await; } From c61008ac9be5929d4e2ee6126280b1c054a728dd Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Fri, 2 Oct 2020 12:07:23 -0400 Subject: [PATCH 18/33] Add network stats to prometheus --- beacon_node/http_metrics/src/metrics.rs | 30 +++++++++++++++++++++++++ common/eth2/src/lighthouse.rs | 14 ++++++------ validator_client/src/http_api/tests.rs | 10 +++++++-- 3 files changed, 45 insertions(+), 9 deletions(-) diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index 87a8c297593..82413bc031d 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -34,6 +34,30 @@ lazy_static! { try_create_float_gauge("system_loadavg_5", "Loadavg over 5 minutes"); pub static ref SYSTEM_LOADAVG_15: Result<Gauge> = try_create_float_gauge("system_loadavg_15", "Loadavg over 15 minutes"); + pub static ref SYSTEM_RX_BYTES: Result<IntGauge> = try_create_int_gauge( + "rx_bytes", + "Total bytes received across all network interfaces." + ); + pub static ref SYSTEM_RX_ERRORS: Result<IntGauge> = try_create_int_gauge( + "rx_errors", + "Total errors received across all network interfaces." + ); + pub static ref SYSTEM_RX_PACKETS: Result<IntGauge> = try_create_int_gauge( + "rx_packets", + "Total packets received across all network interfaces." + ); + pub static ref SYSTEM_TX_BYTES: Result<IntGauge> = try_create_int_gauge( + "tx_bytes", + "Total bytes transmitted across all network interfaces." + ); + pub static ref SYSTEM_TX_ERRORS: Result<IntGauge> = try_create_int_gauge( + "tx_errors", + "Total errors transmitted across all network interfaces." + ); + pub static ref SYSTEM_TX_PACKETS: Result<IntGauge> = try_create_int_gauge( + "tx_packets", + "Total packets transmitted across all network interfaces." + ); } pub fn gather_prometheus_metrics<T: BeaconChainTypes>( @@ -90,6 +114,12 @@ pub fn gather_prometheus_metrics<T: BeaconChainTypes>( set_float_gauge(&SYSTEM_LOADAVG_1, health.sys_loadavg_1); set_float_gauge(&SYSTEM_LOADAVG_5, health.sys_loadavg_5); set_float_gauge(&SYSTEM_LOADAVG_15, health.sys_loadavg_15); + set_gauge(&SYSTEM_RX_BYTES, health.network.rx_bytes as i64); + set_gauge(&SYSTEM_RX_ERRORS, health.network.rx_errors as i64); + set_gauge(&SYSTEM_RX_PACKETS, health.network.rx_packets as i64); + set_gauge(&SYSTEM_TX_BYTES, health.network.tx_bytes as i64); + set_gauge(&SYSTEM_TX_ERRORS, health.network.tx_errors as i64); + set_gauge(&SYSTEM_TX_PACKETS, health.network.tx_packets as i64); } encoder diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index e5322e8149f..166ecc752c1 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -127,17 +127,17 @@ impl Drive { #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Network { - /// Network metric for received bytes across all network interfaces. + /// Network metric for total received bytes across all network interfaces. pub rx_bytes: u64, - /// Network metric for received errors across all network interfaces. + /// Network metric for total received errors across all network interfaces. pub rx_errors: u64, - /// Network metric for received packets across all network interfaces. + /// Network metric for total received packets across all network interfaces. pub rx_packets: u64, - /// Network metric for transmitted bytes across all network interfaces. + /// Network metric for total transmitted bytes across all network interfaces. pub tx_bytes: u64, - /// Network metric for trasmitted errors across all network interfaces. + /// Network metric for total trasmitted errors across all network interfaces. pub tx_errors: u64, - /// Network metric for transmitted packets across all network interfaces. + /// Network metric for total transmitted packets across all network interfaces. pub tx_packets: u64, } @@ -166,7 +166,7 @@ pub struct Health { pub sys_loadavg_5: f64, /// System load average over 15 minutes. pub sys_loadavg_15: f64, - /// Network statistics. + /// Network statistics, totals across all network interfaces. pub network: Network, } diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 3ce67d57368..d82a0975f36 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -185,14 +185,20 @@ impl ApiTester { #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] pub async fn test_get_lighthouse_system_health(self) -> Self { - self.client.get_lighthouse_system_health().await.unwrap_err(); + self.client + .get_lighthouse_system_health() + .await + .unwrap_err(); self } #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] pub async fn test_get_lighthouse_system_drives(self) -> Self { - self.client.get_lighthouse_system_drives().await.unwrap_err(); + self.client + .get_lighthouse_system_drives() + .await + .unwrap_err(); self } From f32e8a030ab5f7db5679cc87de964fb5c03d39df Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Fri, 2 Oct 2020 12:30:24 -0400 Subject: [PATCH 19/33] Refactor Network::observe, start updating docs --- book/src/api-lighthouse.md | 38 ++++++++------- book/src/api-vc-endpoints.md | 58 +++++++++++++++-------- common/eth2/src/lighthouse.rs | 89 +++++++++++++++-------------------- 3 files changed, 99 insertions(+), 86 deletions(-) diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 0e4a16c6101..2c12a988a3d 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -16,30 +16,34 @@ Although we don't recommend that users rely on these endpoints, we document them briefly so they can be utilized by developers and researchers. -### `/lighthouse/health` +### `/lighthouse/system` -*Presently only available on Linux.* +```bash +curl -X GET "http://localhost:5052/lighthouse/system" -H "accept: application/json" | jq +``` + +```json + +``` + +### `/lighthouse/system/health` ```bash curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/json" | jq ``` ```json -{ - "data": { - "pid": 1728254, - "pid_mem_resident_set_size": 510054400, - "pid_mem_virtual_memory_size": 3963158528, - "sys_virt_mem_total": 16715530240, - "sys_virt_mem_available": 4065374208, - "sys_virt_mem_used": 11383402496, - "sys_virt_mem_free": 1368662016, - "sys_virt_mem_percent": 75.67906, - "sys_loadavg_1": 4.92, - "sys_loadavg_5": 5.53, - "sys_loadavg_15": 5.58 - } -} + +``` + +### `/lighthouse/system/drives` + +```bash +curl -X GET "http://localhost:5052/lighthouse/drives" -H "accept: application/json" | jq +``` + +```json + ``` ### `/lighthouse/syncing` diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 78ea493567b..512bd2108f8 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -36,7 +36,7 @@ Typical Responses | 200 } ``` -## `GET /lighthouse/health` +## `GET /lighthouse/system` Returns information regarding the health of the host machine. @@ -44,32 +44,52 @@ Returns information regarding the health of the host machine. | Property | Specification | | --- |--- | -Path | `/lighthouse/health` +Path | `/lighthouse/system` Method | GET Required Headers | [`Authorization`](./api-vc-auth-header.md) Typical Responses | 200 -*Note: this endpoint is presently only available on Linux.* +### Example Response Body + +```json + +``` + +## `GET /lighthouse/system/health` + +Returns information regarding the health of the host machine. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/system/health` +Method | GET +Required Headers | [`Authorization`](./api-vc-auth-header.md) +Typical Responses | 200 ### Example Response Body ```json -{ - "data": { - "pid": 1476293, - "pid_num_threads": 19, - "pid_mem_resident_set_size": 4009984, - "pid_mem_virtual_memory_size": 1306775552, - "sys_virt_mem_total": 33596100608, - "sys_virt_mem_available": 23073017856, - "sys_virt_mem_used": 9346957312, - "sys_virt_mem_free": 22410510336, - "sys_virt_mem_percent": 31.322334, - "sys_loadavg_1": 0.98, - "sys_loadavg_5": 0.98, - "sys_loadavg_15": 1.01 - } -} +``` + +## `GET /lighthouse/system/drives` + +Returns information regarding the health of the host machine. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/system/drives` +Method | GET +Required Headers | [`Authorization`](./api-vc-auth-header.md) +Typical Responses | 200 + +### Example Response Body + +```json + ``` ## `GET /lighthouse/spec` diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 166ecc752c1..42b7a9c8625 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -97,11 +97,17 @@ impl System { /// Reports information about a drive on the system the Lighthouse instance is running on. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Drive { + /// The filesystem name. pub filesystem: String, + /// The amount of disk space available on the filesystem. pub avail: u64, + /// The amount of disk space used on the filesystem. Equivalent to `total-avail`. pub used: u64, + /// The percentage of disk space used on the filesystem. Equivalent to `(total-avail) / total`. pub used_pct: u64, + /// The total amount of disk space on the filesystem. pub total: u64, + /// The filesystem mount point. pub mounted_on: String, } @@ -125,6 +131,7 @@ impl Drive { } } +/// Reports information about the network on the system the Lighthouse instance is running on. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Network { /// Network metric for total received bytes across all network interfaces. @@ -141,6 +148,36 @@ pub struct Network { pub tx_packets: u64, } +impl Network { + pub fn observe() -> Result<Self, String> { + let mut rx_bytes = 0; + let mut rx_errors = 0; + let mut rx_packets = 0; + let mut tx_bytes = 0; + let mut tx_errors = 0; + let mut tx_packets = 0; + + let s = SystemInfo::new_all(); + s.get_networks().iter().for_each(|(_, network)| { + rx_bytes += network.get_total_received(); + rx_errors += network.get_total_transmitted(); + rx_packets += network.get_total_packets_received(); + tx_bytes += network.get_total_packets_transmitted(); + tx_errors += network.get_total_errors_on_received(); + tx_packets += network.get_total_errors_on_transmitted(); + }); + + Ok(Network { + rx_bytes, + rx_errors, + rx_packets, + tx_bytes, + tx_errors, + tx_packets, + }) + } +} + /// Reports on the health of the Lighthouse instance. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Health { @@ -191,23 +228,6 @@ impl Health { let loadavg = psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; - let mut rx_bytes = 0; - let mut rx_errors = 0; - let mut rx_packets = 0; - let mut tx_bytes = 0; - let mut tx_errors = 0; - let mut tx_packets = 0; - - let s = SystemInfo::new_all(); - s.get_networks().iter().for_each(|(_, network)| { - rx_bytes += network.get_total_received(); - rx_errors += network.get_total_transmitted(); - rx_packets += network.get_total_packets_received(); - tx_bytes += network.get_total_packets_transmitted(); - tx_errors += network.get_total_errors_on_received(); - tx_packets += network.get_total_errors_on_transmitted(); - }); - Ok(Self { pid: process.pid(), pid_mem_resident_set_size: process_mem.rss(), @@ -220,14 +240,7 @@ impl Health { sys_loadavg_1: loadavg.one, sys_loadavg_5: loadavg.five, sys_loadavg_15: loadavg.fifteen, - network: Network { - rx_bytes, - rx_errors, - rx_packets, - tx_bytes, - tx_errors, - tx_packets, - }, + network: Network::observe()?, }) } @@ -249,23 +262,6 @@ impl Health { .load_average() .map_err(|e| format!("Unable to get loadavg: {:?}", e))?; - let mut rx_bytes = 0; - let mut rx_errors = 0; - let mut rx_packets = 0; - let mut tx_bytes = 0; - let mut tx_errors = 0; - let mut tx_packets = 0; - - let s = SystemInfo::new_all(); - s.get_networks().iter().for_each(|(_, network)| { - rx_bytes += network.get_total_received(); - rx_errors += network.get_total_transmitted(); - rx_packets += network.get_total_packets_received(); - tx_bytes += network.get_total_packets_transmitted(); - tx_errors += network.get_total_errors_on_received(); - tx_packets += network.get_total_errors_on_transmitted(); - }); - Ok(Self { pid: process.pid() as u32, pid_mem_resident_set_size: process_mem.rss(), @@ -278,14 +274,7 @@ impl Health { sys_loadavg_1: loadavg.one as f64, sys_loadavg_5: loadavg.five as f64, sys_loadavg_15: loadavg.fifteen as f64, - network: Network { - rx_bytes, - rx_errors, - rx_packets, - tx_bytes, - tx_errors, - tx_packets, - }, + network: Network::observe()?, }) } } From b170fbd3d3e5acc14c9acd33bcd2012d5393dde8 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Fri, 2 Oct 2020 12:53:27 -0400 Subject: [PATCH 20/33] filter drives with zero total disk --- book/src/api-lighthouse.md | 24 +++++++++++++++++++++++- common/eth2/src/lighthouse.rs | 2 ++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 2c12a988a3d..4530998c03b 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -33,7 +33,29 @@ curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/j ``` ```json - +{ + "data": { + "pid": 92063, + "pid_mem_resident_set_size": 188657664, + "pid_mem_virtual_memory_size": 4852129792, + "sys_virt_mem_total": 17179869184, + "sys_virt_mem_available": 6760796160, + "sys_virt_mem_used": 8087846912, + "sys_virt_mem_free": 1614999552, + "sys_virt_mem_percent": 60.646988, + "sys_loadavg_1": 4.01708984375, + "sys_loadavg_5": 3.17626953125, + "sys_loadavg_15": 3.8837890625, + "network": { + "rx_bytes": 193094299648, + "rx_errors": 66251016192, + "rx_packets": 106043274, + "tx_bytes": 47319518, + "tx_errors": 0, + "tx_packets": 0 + } + } +} ``` ### `/lighthouse/system/drives` diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 42b7a9c8625..cea740499c2 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -118,6 +118,8 @@ impl Drive { .mounts() .expect("Could not find mounts.") .into_iter() + // filter out drives with zero total disk space + .filter(|drive|drive.total.as_u64() != 0) .map(|drive| Drive { filesystem: drive.fs_mounted_from, avail: drive.avail.as_u64(), From 2fac4e9ed135fdbb0d03fa0fc931265c625d5b31 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Fri, 2 Oct 2020 13:32:09 -0400 Subject: [PATCH 21/33] Update lighthouse book examples --- book/src/api-lighthouse.md | 111 +++++++++++++++++++++++++++++------ book/src/api-vc-endpoints.md | 104 +++++++++++++++++++++++++++++++- 2 files changed, 196 insertions(+), 19 deletions(-) diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 4530998c03b..022f56cc2d0 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -23,7 +23,57 @@ curl -X GET "http://localhost:5052/lighthouse/system" -H "accept: application/j ``` ```json - +{ + "data": { + "health": { + "pid": 11612, + "pid_mem_resident_set_size": 170893312, + "pid_mem_virtual_memory_size": 1401901056, + "sys_virt_mem_total": 8363692032, + "sys_virt_mem_available": 5679951872, + "sys_virt_mem_used": 2435825664, + "sys_virt_mem_free": 2547994624, + "sys_virt_mem_percent": 32.087982, + "sys_loadavg_1": 0.24, + "sys_loadavg_5": 0.55, + "sys_loadavg_15": 1.42, + "network": { + "rx_bytes": 1333660554356, + "rx_errors": 824206201966, + "rx_packets": 2565207513, + "tx_bytes": 3048133285, + "tx_errors": 0, + "tx_packets": 0 + } + }, + "drives": [ + { + "filesystem": "udev", + "avail": 4168499200, + "used": 0, + "used_pct": 0, + "total": 4168499200, + "mounted_on": "/dev" + }, + { + "filesystem": "/dev/vda1", + "avail": 91556573184, + "used": 74761998336, + "used_pct": 44, + "total": 166318571520, + "mounted_on": "/" + }, + { + "filesystem": "/dev/vda15", + "avail": 105666560, + "used": 3756032, + "used_pct": 3, + "total": 109422592, + "mounted_on": "/boot/efi" + } + ] + } +} ``` ### `/lighthouse/system/health` @@ -35,22 +85,22 @@ curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/j ```json { "data": { - "pid": 92063, - "pid_mem_resident_set_size": 188657664, - "pid_mem_virtual_memory_size": 4852129792, - "sys_virt_mem_total": 17179869184, - "sys_virt_mem_available": 6760796160, - "sys_virt_mem_used": 8087846912, - "sys_virt_mem_free": 1614999552, - "sys_virt_mem_percent": 60.646988, - "sys_loadavg_1": 4.01708984375, - "sys_loadavg_5": 3.17626953125, - "sys_loadavg_15": 3.8837890625, + "pid": 11612, + "pid_mem_resident_set_size": 396988416, + "pid_mem_virtual_memory_size": 1902653440, + "sys_virt_mem_total": 8363692032, + "sys_virt_mem_available": 5458038784, + "sys_virt_mem_used": 2656464896, + "sys_virt_mem_free": 2229014528, + "sys_virt_mem_percent": 34.741276, + "sys_loadavg_1": 2.54, + "sys_loadavg_5": 1.61, + "sys_loadavg_15": 1.64, "network": { - "rx_bytes": 193094299648, - "rx_errors": 66251016192, - "rx_packets": 106043274, - "tx_bytes": 47319518, + "rx_bytes": 1333721410240, + "rx_errors": 824208688988, + "rx_packets": 2565265020, + "tx_bytes": 3048160193, "tx_errors": 0, "tx_packets": 0 } @@ -65,7 +115,34 @@ curl -X GET "http://localhost:5052/lighthouse/drives" -H "accept: application/j ``` ```json - +{ + "data": [ + { + "filesystem": "udev", + "avail": 4168499200, + "used": 0, + "used_pct": 0, + "total": 4168499200, + "mounted_on": "/dev" + }, + { + "filesystem": "/dev/vda1", + "avail": 91473604608, + "used": 74844966912, + "used_pct": 45, + "total": 166318571520, + "mounted_on": "/" + }, + { + "filesystem": "/dev/vda15", + "avail": 105666560, + "used": 3756032, + "used_pct": 3, + "total": 109422592, + "mounted_on": "/boot/efi" + } + ] +} ``` ### `/lighthouse/syncing` diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 512bd2108f8..96abc92c2c8 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -52,7 +52,57 @@ Typical Responses | 200 ### Example Response Body ```json - +{ + "data": { + "health": { + "pid": 11653, + "pid_mem_resident_set_size": 170893312, + "pid_mem_virtual_memory_size": 1401901056, + "sys_virt_mem_total": 8363692032, + "sys_virt_mem_available": 5679951872, + "sys_virt_mem_used": 2435825664, + "sys_virt_mem_free": 2547994624, + "sys_virt_mem_percent": 32.087982, + "sys_loadavg_1": 0.24, + "sys_loadavg_5": 0.55, + "sys_loadavg_15": 1.42, + "network": { + "rx_bytes": 1333660554356, + "rx_errors": 824206201966, + "rx_packets": 2565207513, + "tx_bytes": 3048133285, + "tx_errors": 0, + "tx_packets": 0 + } + }, + "drives": [ + { + "filesystem": "udev", + "avail": 4168499200, + "used": 0, + "used_pct": 0, + "total": 4168499200, + "mounted_on": "/dev" + }, + { + "filesystem": "/dev/vda1", + "avail": 91556573184, + "used": 74761998336, + "used_pct": 44, + "total": 166318571520, + "mounted_on": "/" + }, + { + "filesystem": "/dev/vda15", + "avail": 105666560, + "used": 3756032, + "used_pct": 3, + "total": 109422592, + "mounted_on": "/boot/efi" + } + ] + } +} ``` ## `GET /lighthouse/system/health` @@ -71,6 +121,29 @@ Typical Responses | 200 ### Example Response Body ```json +{ + "data": { + "pid": 11653, + "pid_mem_resident_set_size": 396988416, + "pid_mem_virtual_memory_size": 1902653440, + "sys_virt_mem_total": 8363692032, + "sys_virt_mem_available": 5458038784, + "sys_virt_mem_used": 2656464896, + "sys_virt_mem_free": 2229014528, + "sys_virt_mem_percent": 34.741276, + "sys_loadavg_1": 2.54, + "sys_loadavg_5": 1.61, + "sys_loadavg_15": 1.64, + "network": { + "rx_bytes": 1333721410240, + "rx_errors": 824208688988, + "rx_packets": 2565265020, + "tx_bytes": 3048160193, + "tx_errors": 0, + "tx_packets": 0 + } + } +} ``` ## `GET /lighthouse/system/drives` @@ -89,7 +162,34 @@ Typical Responses | 200 ### Example Response Body ```json - +{ + "data": [ + { + "filesystem": "udev", + "avail": 4168499200, + "used": 0, + "used_pct": 0, + "total": 4168499200, + "mounted_on": "/dev" + }, + { + "filesystem": "/dev/vda1", + "avail": 91473604608, + "used": 74844966912, + "used_pct": 45, + "total": 166318571520, + "mounted_on": "/" + }, + { + "filesystem": "/dev/vda15", + "avail": 105666560, + "used": 3756032, + "used_pct": 3, + "total": 109422592, + "mounted_on": "/boot/efi" + } + ] +} ``` ## `GET /lighthouse/spec` From 79f27c8e939cfa816e970dab2f774f304c318d3b Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Fri, 2 Oct 2020 13:47:42 -0400 Subject: [PATCH 22/33] fix vc api tests --- common/eth2/src/lighthouse.rs | 2 +- common/eth2/src/lighthouse_vc/http_client.rs | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index cea740499c2..bb7d85c99ee 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -119,7 +119,7 @@ impl Drive { .expect("Could not find mounts.") .into_iter() // filter out drives with zero total disk space - .filter(|drive|drive.total.as_u64() != 0) + .filter(|drive| drive.total.as_u64() != 0) .map(|drive| Drive { filesystem: drive.fs_mounted_from, avail: drive.avail.as_u64(), diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index f2c585eef57..95df3d2bdb5 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -10,6 +10,7 @@ use ring::digest::{digest, SHA256}; use secp256k1::{Message, PublicKey, Signature}; use serde::{de::DeserializeOwned, Serialize}; +use crate::lighthouse::{Drive, System}; pub use reqwest; pub use reqwest::{Response, StatusCode, Url}; @@ -198,7 +199,7 @@ impl ValidatorClientHttpClient { } /// `GET lighthouse/system` - pub async fn get_lighthouse_system(&self) -> Result<GenericResponse<Health>, Error> { + pub async fn get_lighthouse_system(&self) -> Result<GenericResponse<System>, Error> { let mut path = self.server.clone(); path.path_segments_mut() @@ -223,7 +224,7 @@ impl ValidatorClientHttpClient { } /// `GET lighthouse/system/drives` - pub async fn get_lighthouse_system_drives(&self) -> Result<GenericResponse<Health>, Error> { + pub async fn get_lighthouse_system_drives(&self) -> Result<GenericResponse<Vec<Drive>>, Error> { let mut path = self.server.clone(); path.path_segments_mut() From fd5cc659296db7a81c85234b911787431e8653b3 Mon Sep 17 00:00:00 2001 From: realbigsean <seananderson33@gmail.com> Date: Fri, 2 Oct 2020 13:53:59 -0400 Subject: [PATCH 23/33] update docs --- book/src/api-lighthouse.md | 2 ++ book/src/api-vc-endpoints.md | 6 ++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 022f56cc2d0..488e019e7e7 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -18,6 +18,8 @@ researchers. ### `/lighthouse/system` +*Available on Linux and macOS.* + ```bash curl -X GET "http://localhost:5052/lighthouse/system" -H "accept: application/json" | jq ``` diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 96abc92c2c8..936fe131862 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -38,7 +38,9 @@ Typical Responses | 200 ## `GET /lighthouse/system` -Returns information regarding the health of the host machine. +*Available on Linux and macOS.* + +Returns system statistics and information about the host machine. ### HTTP Specification @@ -148,7 +150,7 @@ Typical Responses | 200 ## `GET /lighthouse/system/drives` -Returns information regarding the health of the host machine. +Returns information regarding drives on the host machine. ### HTTP Specification From c4b8bcdf892dbd0393b1f68ea8ffacf3490486a3 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Mon, 2 Nov 2020 17:06:20 +1100 Subject: [PATCH 24/33] Add MountInfo --- common/eth2/src/lighthouse.rs | 76 +++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index d77df9690d5..6b42bdd57e3 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -9,6 +9,7 @@ use proto_array::core::ProtoArray; use reqwest::IntoUrl; use serde::{Deserialize, Serialize}; use ssz::Decode; +use std::path::{Path, PathBuf}; use sysinfo::{NetworkExt, NetworksExt, System as SystemInfo, SystemExt}; use systemstat::{Platform, System as SystemStat}; @@ -97,6 +98,73 @@ impl System { } } +/// Contains information about a file system mount. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct MountInfo { + avail: u64, + total: u64, + used: u64, + used_pct: f64, + mounted_on: PathBuf, +} + +impl MountInfo { + /// Attempts to find the `MountInfo` for the given `path`. + pub fn for_path<P: AsRef<Path>>(path: P) -> Result<Option<Self>, String> { + let system = SystemStat::new(); + let mounts = system + .mounts() + .map_err(|e| format!("Unable to enumerate mounts: {:?}", e))?; + + let mut mounts = mounts + .iter() + .filter_map(|drive| { + let mount_path = Path::new(&drive.fs_mounted_on); + let num_components = mount_path.iter().count(); + + Some((drive, mount_path, num_components)) + .filter(|_| path.as_ref().starts_with(&mount_path)) + }) + .collect::<Vec<_>>(); + + // Sort the list of mount points, such that the path with the most components is first. + // + // For example: + // + // ``` + // let mounts = ["/home/paul", "/home", "/"]; + // ``` + // + // The intention here is to find the "closest" mount-point to `path`, such that + // `/home/paul/file` matches `/home/paul`, not `/` or `/home`. + mounts.sort_unstable_by(|(_, _, a), (_, _, b)| b.cmp(a)); + + let disk_usage = mounts.first().map(|(drive, mount_path, _)| { + let avail = drive.avail.as_u64(); + let total = drive.total.as_u64(); + let used = total.saturating_sub(avail); + let mut used_pct = if total > 0 { + used as f64 / total as f64 + } else { + 0.0 + } * 100.0; + + // Round to two decimals. + used_pct = (used_pct * 100.00).round() / 100.00; + + Self { + avail, + total, + used, + used_pct, + mounted_on: mount_path.into(), + } + }); + + Ok(disk_usage) + } +} + /// Reports information about a drive on the system the Lighthouse instance is running on. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Drive { @@ -210,6 +278,10 @@ pub struct Health { pub sys_loadavg_15: f64, /// Network statistics, totals across all network interfaces. pub network: Network, + /// Filesystem information. + pub chain_database: Option<MountInfo>, + /// Filesystem information. + pub freezer_database: Option<MountInfo>, } impl Health { @@ -246,6 +318,10 @@ impl Health { sys_loadavg_5: loadavg.five, sys_loadavg_15: loadavg.fifteen, network: Network::observe()?, + chain_database: MountInfo::for_path("/home/paul/.lighthouse/medalla/beacon/chain_db")?, + freezer_database: MountInfo::for_path( + "/home/paul/.lighthouse/medalla/beacon/freezer_db", + )?, }) } From 38f6e61ce5db11e9a98403270ba270e4ae8a6bf2 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Mon, 2 Nov 2020 18:37:40 +1100 Subject: [PATCH 25/33] Remove system endpoints --- beacon_node/client/src/builder.rs | 17 +++- beacon_node/http_api/src/lib.rs | 42 +++------- beacon_node/http_api/tests/tests.rs | 1 + common/eth2/src/lighthouse.rs | 85 +------------------- common/eth2/src/lighthouse_vc/http_client.rs | 31 +------ validator_client/src/http_api/mod.rs | 36 +-------- 6 files changed, 35 insertions(+), 177 deletions(-) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index ec039aeaedd..3c33f61d0c2 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -326,6 +326,18 @@ where .runtime_context .as_ref() .ok_or_else(|| "build requires a runtime context".to_string())?; + let chain_db_path = self + .db_path + .ok_or_else(|| "build requires a chain db path context".to_string())?; + let freezer_db_path = self + .freezer_db_path + .ok_or_else(|| "build requires a freezer db path".to_string())?; + + let db_paths = http_api::DBPaths { + chain_db: chain_db_path, + freezer_db: freezer_db_path, + }; + let log = runtime_context.log().clone(); let http_api_listen_addr = if self.http_api_config.enabled { @@ -334,6 +346,7 @@ where chain: self.beacon_chain.clone(), network_tx: self.network_send.clone(), network_globals: self.network_globals.clone(), + db_paths: Some(db_paths.clone()), log: log.clone(), }); @@ -357,8 +370,8 @@ where let ctx = Arc::new(http_metrics::Context { config: self.http_metrics_config.clone(), chain: self.beacon_chain.clone(), - db_path: self.db_path.clone(), - freezer_db_path: self.freezer_db_path.clone(), + db_path: Some(db_paths.chain_db), + freezer_db_path: Some(db_paths.freezer_db), log: log.clone(), }); diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 0e9bb3d868a..c0afb953565 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -35,6 +35,7 @@ use std::borrow::Cow; use std::convert::TryInto; use std::future::Future; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use std::path::PathBuf; use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use types::{ @@ -55,6 +56,12 @@ const API_VERSION: &str = "v1"; /// finalized head. const SYNC_TOLERANCE_EPOCHS: u64 = 8; +#[derive(Debug, Clone, PartialEq)] +pub struct DBPaths { + pub chain_db: PathBuf, + pub freezer_db: PathBuf, +} + /// A wrapper around all the items required to spawn the HTTP server. /// /// The server will gracefully handle the case where any fields are `None`. @@ -63,6 +70,7 @@ pub struct Context<T: BeaconChainTypes> { pub chain: Option<Arc<BeaconChain<T>>>, pub network_tx: Option<UnboundedSender<NetworkMessage<T::EthSpec>>>, pub network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>, + pub db_paths: Option<DBPaths>, pub log: Logger, } @@ -1699,21 +1707,8 @@ pub fn serve<T: BeaconChainTypes>( }, ); - // GET lighthouse/system - let get_lighthouse_system = warp::path("lighthouse") - .and(warp::path("system")) - .and(warp::path::end()) - .and_then(|| { - blocking_json_task(move || { - eth2::lighthouse::System::observe() - .map(api_types::GenericResponse::from) - .map_err(warp_utils::reject::custom_bad_request) - }) - }); - - // GET lighthouse/system/health - let get_lighthouse_system_health = warp::path("lighthouse") - .and(warp::path("system")) + // GET lighthouse/health + let get_lighthouse_health = warp::path("lighthouse") .and(warp::path("health")) .and(warp::path::end()) .and_then(|| { @@ -1724,19 +1719,6 @@ pub fn serve<T: BeaconChainTypes>( }) }); - // GET lighthouse/system/drives - let get_lighthouse_system_drives = warp::path("lighthouse") - .and(warp::path("system")) - .and(warp::path("drives")) - .and(warp::path::end()) - .and_then(|| { - blocking_json_task(move || { - eth2::lighthouse::Drive::observe() - .map(api_types::GenericResponse::from) - .map_err(warp_utils::reject::custom_bad_request) - }) - }); - // GET lighthouse/syncing let get_lighthouse_syncing = warp::path("lighthouse") .and(warp::path("syncing")) @@ -1891,9 +1873,7 @@ pub fn serve<T: BeaconChainTypes>( .or(get_validator_blocks.boxed()) .or(get_validator_attestation_data.boxed()) .or(get_validator_aggregate_attestation.boxed()) - .or(get_lighthouse_system.boxed()) - .or(get_lighthouse_system_health.boxed()) - .or(get_lighthouse_system_drives.boxed()) + .or(get_lighthouse_health.boxed()) .or(get_lighthouse_syncing.boxed()) .or(get_lighthouse_peers.boxed()) .or(get_lighthouse_peers_connected.boxed()) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index fca6643b54e..2f1e38be8d7 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -177,6 +177,7 @@ impl ApiTester { chain: Some(chain.clone()), network_tx: Some(network_tx), network_globals: Some(Arc::new(network_globals)), + db_paths: None, log, }); let ctx = context.clone(); diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 6b42bdd57e3..2de2443e974 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -82,22 +82,6 @@ use psutil::process::Process; #[cfg(target_os = "linux")] use psutil::process::Process; -/// Reports information about the system the Lighthouse instance is running on. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct System { - pub health: Health, - pub drives: Vec<Drive>, -} - -impl System { - pub fn observe() -> Result<Self, String> { - Ok(Self { - health: Health::observe()?, - drives: Drive::observe()?, - }) - } -} - /// Contains information about a file system mount. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MountInfo { @@ -165,45 +149,6 @@ impl MountInfo { } } -/// Reports information about a drive on the system the Lighthouse instance is running on. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct Drive { - /// The filesystem name. - pub filesystem: String, - /// The amount of disk space available on the filesystem. - pub avail: u64, - /// The amount of disk space used on the filesystem. Equivalent to `total-avail`. - pub used: u64, - /// The percentage of disk space used on the filesystem. Equivalent to `(total-avail) / total`. - pub used_pct: u64, - /// The total amount of disk space on the filesystem. - pub total: u64, - /// The filesystem mount point. - pub mounted_on: String, -} - -impl Drive { - pub fn observe() -> Result<Vec<Self>, String> { - let system = SystemStat::new(); - Ok(system - .mounts() - .expect("Could not find mounts.") - .into_iter() - // filter out drives with zero total disk space - .filter(|drive| drive.total.as_u64() != 0) - .map(|drive| Drive { - filesystem: drive.fs_mounted_from, - avail: drive.avail.as_u64(), - used: drive.total.as_u64() - drive.avail.as_u64(), - used_pct: (((drive.total.0 as f64 - drive.avail.0 as f64) / drive.total.0 as f64) - * 100.0) as u64, - total: drive.total.as_u64(), - mounted_on: drive.fs_mounted_on, - }) - .collect()) - } -} - /// Reports information about the network on the system the Lighthouse instance is running on. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Network { @@ -382,44 +327,18 @@ impl BeaconNodeHttpClient { } } - /// `GET lighthouse/system` - pub async fn get_lighthouse_system(&self) -> Result<GenericResponse<System>, Error> { + /// `GET lighthouse/health` + pub async fn get_lighthouse_health(&self) -> Result<GenericResponse<Health>, Error> { let mut path = self.server.clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("lighthouse") - .push("system"); - - self.get(path).await - } - - /// `GET lighthouse/system/health` - pub async fn get_lighthouse_system_health(&self) -> Result<GenericResponse<Health>, Error> { - let mut path = self.server.clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("system") .push("health"); self.get(path).await } - /// `GET lighthouse/system/drives` - pub async fn get_lighthouse_system_drives(&self) -> Result<GenericResponse<Vec<Drive>>, Error> { - let mut path = self.server.clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("system") - .push("drives"); - - self.get(path).await - } - /// `GET lighthouse/syncing` pub async fn get_lighthouse_syncing(&self) -> Result<GenericResponse<SyncState>, Error> { let mut path = self.server.clone(); diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 95df3d2bdb5..f3201b5eeed 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -10,7 +10,7 @@ use ring::digest::{digest, SHA256}; use secp256k1::{Message, PublicKey, Signature}; use serde::{de::DeserializeOwned, Serialize}; -use crate::lighthouse::{Drive, System}; +use crate::lighthouse::Health; pub use reqwest; pub use reqwest::{Response, StatusCode, Url}; @@ -198,20 +198,8 @@ impl ValidatorClientHttpClient { self.get(path).await } - /// `GET lighthouse/system` - pub async fn get_lighthouse_system(&self) -> Result<GenericResponse<System>, Error> { - let mut path = self.server.clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("system"); - - self.get(path).await - } - - /// `GET lighthouse/system/health` - pub async fn get_lighthouse_system_health(&self) -> Result<GenericResponse<Health>, Error> { + /// `GET lighthouse/health` + pub async fn get_lighthouse_health(&self) -> Result<GenericResponse<Health>, Error> { let mut path = self.server.clone(); path.path_segments_mut() @@ -223,19 +211,6 @@ impl ValidatorClientHttpClient { self.get(path).await } - /// `GET lighthouse/system/drives` - pub async fn get_lighthouse_system_drives(&self) -> Result<GenericResponse<Vec<Drive>>, Error> { - let mut path = self.server.clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("system") - .push("drives"); - - self.get(path).await - } - /// `GET lighthouse/spec` pub async fn get_lighthouse_spec(&self) -> Result<GenericResponse<YamlConfig>, Error> { let mut path = self.server.clone(); diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 7099a4842bb..6dde635c945 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -165,22 +165,8 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( }) }); - // GET lighthouse/system - let get_lighthouse_system = warp::path("lighthouse") - .and(warp::path("system")) - .and(warp::path::end()) - .and(signer.clone()) - .and_then(|signer| { - blocking_signed_json_task(signer, move || { - eth2::lighthouse::System::observe() - .map(api_types::GenericResponse::from) - .map_err(warp_utils::reject::custom_bad_request) - }) - }); - - // GET lighthouse/system/health - let get_lighthouse_system_health = warp::path("lighthouse") - .and(warp::path("system")) + // GET lighthouse/health + let get_lighthouse_health = warp::path("lighthouse") .and(warp::path("health")) .and(warp::path::end()) .and(signer.clone()) @@ -192,20 +178,6 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( }) }); - // GET lighthouse/system/drives - let get_lighthouse_system_drives = warp::path("lighthouse") - .and(warp::path("system")) - .and(warp::path("drives")) - .and(warp::path::end()) - .and(signer.clone()) - .and_then(|signer| { - blocking_signed_json_task(signer, move || { - eth2::lighthouse::Drive::observe() - .map(api_types::GenericResponse::from) - .map_err(warp_utils::reject::custom_bad_request) - }) - }); - // GET lighthouse/spec let get_lighthouse_spec = warp::path("lighthouse") .and(warp::path("spec")) @@ -452,9 +424,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( .and( warp::get().and( get_node_version - .or(get_lighthouse_system) - .or(get_lighthouse_system_health) - .or(get_lighthouse_system_drives) + .or(get_lighthouse_health) .or(get_lighthouse_spec) .or(get_lighthouse_validators) .or(get_lighthouse_validators_pubkey), From 7e180d0ba3d36202dfe642d3eab548c92b11a240 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Tue, 3 Nov 2020 10:13:48 +1100 Subject: [PATCH 26/33] Add dbs for mac --- common/eth2/src/lighthouse.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 2de2443e974..f1f7d2bd84d 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -301,6 +301,10 @@ impl Health { sys_loadavg_5: loadavg.five as f64, sys_loadavg_15: loadavg.fifteen as f64, network: Network::observe()?, + chain_database: MountInfo::for_path("/home/paul/.lighthouse/medalla/beacon/chain_db")?, + freezer_database: MountInfo::for_path( + "/home/paul/.lighthouse/medalla/beacon/freezer_db", + )?, }) } } From f6dea228752ed525a10d966784cb40646819e895 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Tue, 3 Nov 2020 10:56:21 +1100 Subject: [PATCH 27/33] Split apart health endpoints --- Cargo.lock | 1 + beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/builder.rs | 3 +- beacon_node/http_api/src/lib.rs | 27 ++++--- beacon_node/http_metrics/src/lib.rs | 5 +- beacon_node/http_metrics/src/metrics.rs | 77 ++++++++++++-------- common/eth2/src/lighthouse.rs | 67 ++++++++++++----- common/eth2/src/lighthouse_vc/http_client.rs | 4 +- common/eth2/src/lighthouse_vc/types.rs | 2 +- validator_client/src/http_api/mod.rs | 2 +- 10 files changed, 121 insertions(+), 68 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 774f1e599c8..fff2539e2dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -854,6 +854,7 @@ dependencies = [ "environment", "error-chain", "eth1", + "eth2", "eth2_config", "eth2_libp2p", "eth2_ssz", diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 8114761aaed..bfd79b9b534 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -44,3 +44,4 @@ bus = "2.2.3" directory = {path = "../../common/directory"} http_api = { path = "../http_api" } http_metrics = { path = "../http_metrics" } +eth2 = {path = "../../common/eth2"} diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 3c33f61d0c2..58b06af6c3e 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -370,8 +370,7 @@ where let ctx = Arc::new(http_metrics::Context { config: self.http_metrics_config.clone(), chain: self.beacon_chain.clone(), - db_path: Some(db_paths.chain_db), - freezer_db_path: Some(db_paths.freezer_db), + db_paths: Some(db_paths), log: log.clone(), }); diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index c0afb953565..5935963dbbf 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -35,7 +35,6 @@ use std::borrow::Cow; use std::convert::TryInto; use std::future::Future; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; -use std::path::PathBuf; use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use types::{ @@ -46,6 +45,8 @@ use types::{ use warp::{http::Response, Filter}; use warp_utils::task::{blocking_json_task, blocking_task}; +pub use eth2::lighthouse::DBPaths; + const API_PREFIX: &str = "eth"; const API_VERSION: &str = "v1"; @@ -56,12 +57,6 @@ const API_VERSION: &str = "v1"; /// finalized head. const SYNC_TOLERANCE_EPOCHS: u64 = 8; -#[derive(Debug, Clone, PartialEq)] -pub struct DBPaths { - pub chain_db: PathBuf, - pub freezer_db: PathBuf, -} - /// A wrapper around all the items required to spawn the HTTP server. /// /// The server will gracefully handle the case where any fields are `None`. @@ -308,6 +303,19 @@ pub fn serve<T: BeaconChainTypes>( } }); + // Create a `warp` filter that provides access to the database paths. + let inner_ctx = ctx.clone(); + let db_paths_filter = warp::any() + .map(move || inner_ctx.db_paths.clone()) + .and_then(|db_paths| async move { + match db_paths { + Some(db_paths) => Ok(db_paths), + None => Err(warp_utils::reject::custom_not_found( + "The database paths are unknown.".to_string(), + )), + } + }); + // Create a `warp` filter that rejects request whilst the node is syncing. let not_while_syncing_filter = warp::any() .and(network_globals.clone()) @@ -1711,9 +1719,10 @@ pub fn serve<T: BeaconChainTypes>( let get_lighthouse_health = warp::path("lighthouse") .and(warp::path("health")) .and(warp::path::end()) - .and_then(|| { + .and(db_paths_filter) + .and_then(|db_paths| { blocking_json_task(move || { - eth2::lighthouse::Health::observe() + eth2::lighthouse::BeaconHealth::observe(&db_paths) .map(api_types::GenericResponse::from) .map_err(warp_utils::reject::custom_bad_request) }) diff --git a/beacon_node/http_metrics/src/lib.rs b/beacon_node/http_metrics/src/lib.rs index ce59578b9e4..8041c6b3a3e 100644 --- a/beacon_node/http_metrics/src/lib.rs +++ b/beacon_node/http_metrics/src/lib.rs @@ -8,12 +8,12 @@ extern crate lazy_static; mod metrics; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::lighthouse::DBPaths; use lighthouse_version::version_with_platform; use serde::{Deserialize, Serialize}; use slog::{crit, info, Logger}; use std::future::Future; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; -use std::path::PathBuf; use std::sync::Arc; use warp::{http::Response, Filter}; @@ -41,8 +41,7 @@ impl From<String> for Error { pub struct Context<T: BeaconChainTypes> { pub config: Config, pub chain: Option<Arc<BeaconChain<T>>>, - pub db_path: Option<PathBuf>, - pub freezer_db_path: Option<PathBuf>, + pub db_paths: Option<DBPaths>, pub log: Logger, } diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index 82413bc031d..4ef1765c468 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -1,6 +1,6 @@ use crate::Context; use beacon_chain::BeaconChainTypes; -use eth2::lighthouse::Health; +use eth2::lighthouse::BeaconHealth; use lighthouse_metrics::{Encoder, TextEncoder}; pub use lighthouse_metrics::*; @@ -87,41 +87,54 @@ pub fn gather_prometheus_metrics<T: BeaconChainTypes>( beacon_chain::scrape_for_metrics(beacon_chain); } - if let (Some(db_path), Some(freezer_db_path)) = - (ctx.db_path.as_ref(), ctx.freezer_db_path.as_ref()) - { - store::scrape_for_metrics(db_path, freezer_db_path); + if let Some(db_paths) = ctx.db_paths.as_ref() { + store::scrape_for_metrics(&db_paths.chain_db, &db_paths.freezer_db); + + // This will silently fail if we are unable to observe the health. This is desired behaviour + // since we don't support `BeaconHealth` for all platforms. + if let Ok(health) = BeaconHealth::observe(db_paths) { + set_gauge( + &PROCESS_RES_MEM, + health.common.pid_mem_resident_set_size as i64, + ); + set_gauge( + &PROCESS_VIRT_MEM, + health.common.pid_mem_virtual_memory_size as i64, + ); + set_gauge( + &SYSTEM_VIRT_MEM_TOTAL, + health.common.sys_virt_mem_total as i64, + ); + set_gauge( + &SYSTEM_VIRT_MEM_AVAILABLE, + health.common.sys_virt_mem_available as i64, + ); + set_gauge( + &SYSTEM_VIRT_MEM_USED, + health.common.sys_virt_mem_used as i64, + ); + set_gauge( + &SYSTEM_VIRT_MEM_FREE, + health.common.sys_virt_mem_free as i64, + ); + set_float_gauge( + &SYSTEM_VIRT_MEM_PERCENTAGE, + health.common.sys_virt_mem_percent as f64, + ); + set_float_gauge(&SYSTEM_LOADAVG_1, health.common.sys_loadavg_1); + set_float_gauge(&SYSTEM_LOADAVG_5, health.common.sys_loadavg_5); + set_float_gauge(&SYSTEM_LOADAVG_15, health.common.sys_loadavg_15); + set_gauge(&SYSTEM_RX_BYTES, health.network.rx_bytes as i64); + set_gauge(&SYSTEM_RX_ERRORS, health.network.rx_errors as i64); + set_gauge(&SYSTEM_RX_PACKETS, health.network.rx_packets as i64); + set_gauge(&SYSTEM_TX_BYTES, health.network.tx_bytes as i64); + set_gauge(&SYSTEM_TX_ERRORS, health.network.tx_errors as i64); + set_gauge(&SYSTEM_TX_PACKETS, health.network.tx_packets as i64); + } } eth2_libp2p::scrape_discovery_metrics(); - // This will silently fail if we are unable to observe the health. This is desired behaviour - // since we don't support `Health` for all platforms. - if let Ok(health) = Health::observe() { - set_gauge(&PROCESS_RES_MEM, health.pid_mem_resident_set_size as i64); - set_gauge(&PROCESS_VIRT_MEM, health.pid_mem_virtual_memory_size as i64); - set_gauge(&SYSTEM_VIRT_MEM_TOTAL, health.sys_virt_mem_total as i64); - set_gauge( - &SYSTEM_VIRT_MEM_AVAILABLE, - health.sys_virt_mem_available as i64, - ); - set_gauge(&SYSTEM_VIRT_MEM_USED, health.sys_virt_mem_used as i64); - set_gauge(&SYSTEM_VIRT_MEM_FREE, health.sys_virt_mem_free as i64); - set_float_gauge( - &SYSTEM_VIRT_MEM_PERCENTAGE, - health.sys_virt_mem_percent as f64, - ); - set_float_gauge(&SYSTEM_LOADAVG_1, health.sys_loadavg_1); - set_float_gauge(&SYSTEM_LOADAVG_5, health.sys_loadavg_5); - set_float_gauge(&SYSTEM_LOADAVG_15, health.sys_loadavg_15); - set_gauge(&SYSTEM_RX_BYTES, health.network.rx_bytes as i64); - set_gauge(&SYSTEM_RX_ERRORS, health.network.rx_errors as i64); - set_gauge(&SYSTEM_RX_PACKETS, health.network.rx_packets as i64); - set_gauge(&SYSTEM_TX_BYTES, health.network.tx_bytes as i64); - set_gauge(&SYSTEM_TX_ERRORS, health.network.tx_errors as i64); - set_gauge(&SYSTEM_TX_PACKETS, health.network.tx_packets as i64); - } - encoder .encode(&lighthouse_metrics::gather(), &mut buffer) .unwrap(); diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index f1f7d2bd84d..c005100cd73 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -15,6 +15,13 @@ use systemstat::{Platform, System as SystemStat}; pub use eth2_libp2p::{types::SyncState, PeerInfo}; +/// The two paths to the two core Lighthouse databases. +#[derive(Debug, Clone, PartialEq)] +pub struct DBPaths { + pub chain_db: PathBuf, + pub freezer_db: PathBuf, +} + /// Information returned by `peers` and `connected_peers`. // TODO: this should be deserializable.. #[derive(Debug, Clone, Serialize)] @@ -198,7 +205,7 @@ impl Network { /// Reports on the health of the Lighthouse instance. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct Health { +pub struct CommonHealth { /// The pid of this process. pub pid: u32, /// The total resident memory used by this pid. @@ -221,15 +228,9 @@ pub struct Health { pub sys_loadavg_5: f64, /// System load average over 15 minutes. pub sys_loadavg_15: f64, - /// Network statistics, totals across all network interfaces. - pub network: Network, - /// Filesystem information. - pub chain_database: Option<MountInfo>, - /// Filesystem information. - pub freezer_database: Option<MountInfo>, } -impl Health { +impl CommonHealth { #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] pub fn observe() -> Result<Self, String> { Err("Health is only available on Linux and MacOS".into()) @@ -262,11 +263,6 @@ impl Health { sys_loadavg_1: loadavg.one, sys_loadavg_5: loadavg.five, sys_loadavg_15: loadavg.fifteen, - network: Network::observe()?, - chain_database: MountInfo::for_path("/home/paul/.lighthouse/medalla/beacon/chain_db")?, - freezer_database: MountInfo::for_path( - "/home/paul/.lighthouse/medalla/beacon/freezer_db", - )?, }) } @@ -300,11 +296,46 @@ impl Health { sys_loadavg_1: loadavg.one as f64, sys_loadavg_5: loadavg.five as f64, sys_loadavg_15: loadavg.fifteen as f64, + }) + } +} + +/// Reports on the health of the Lighthouse instance. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BeaconHealth { + #[serde(flatten)] + pub common: CommonHealth, + /// Network statistics, totals across all network interfaces. + pub network: Network, + /// Filesystem information. + pub chain_database: Option<MountInfo>, + /// Filesystem information. + pub freezer_database: Option<MountInfo>, +} + +impl BeaconHealth { + #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] + pub fn observe() -> Result<Self, String> { + Err("Health is only available on Linux and MacOS".into()) + } + + #[cfg(target_os = "linux")] + pub fn observe(db_paths: &DBPaths) -> Result<Self, String> { + Ok(Self { + common: CommonHealth::observe()?, + network: Network::observe()?, + chain_database: MountInfo::for_path(&db_paths.chain_db)?, + freezer_database: MountInfo::for_path(&db_paths.freezer_db)?, + }) + } + + #[cfg(target_os = "macos")] + pub fn observe(db_paths: &DBPaths) -> Result<Self, String> { + Ok(Self { + common: CommonHealth::observe()?, network: Network::observe()?, - chain_database: MountInfo::for_path("/home/paul/.lighthouse/medalla/beacon/chain_db")?, - freezer_database: MountInfo::for_path( - "/home/paul/.lighthouse/medalla/beacon/freezer_db", - )?, + chain_database: MountInfo::for_path(&db_paths.chain_db)?, + freezer_database: MountInfo::for_path(&db_paths.freezer_db)?, }) } } @@ -332,7 +363,7 @@ impl BeaconNodeHttpClient { } /// `GET lighthouse/health` - pub async fn get_lighthouse_health(&self) -> Result<GenericResponse<Health>, Error> { + pub async fn get_lighthouse_health(&self) -> Result<GenericResponse<BeaconHealth>, Error> { let mut path = self.server.clone(); path.path_segments_mut() diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index f3201b5eeed..033f3eb4b98 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -10,7 +10,7 @@ use ring::digest::{digest, SHA256}; use secp256k1::{Message, PublicKey, Signature}; use serde::{de::DeserializeOwned, Serialize}; -use crate::lighthouse::Health; +pub use super::types::ValidatorHealth; pub use reqwest; pub use reqwest::{Response, StatusCode, Url}; @@ -199,7 +199,7 @@ impl ValidatorClientHttpClient { } /// `GET lighthouse/health` - pub async fn get_lighthouse_health(&self) -> Result<GenericResponse<Health>, Error> { + pub async fn get_lighthouse_health(&self) -> Result<GenericResponse<ValidatorHealth>, Error> { let mut path = self.server.clone(); path.path_segments_mut() diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 64674e6fc5f..8c75aa349f3 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -2,7 +2,7 @@ use account_utils::ZeroizeString; use eth2_keystore::Keystore; use serde::{Deserialize, Serialize}; -pub use crate::lighthouse::Health; +pub use crate::lighthouse::CommonHealth as ValidatorHealth; pub use crate::types::{GenericResponse, VersionData}; pub use types::*; diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 6dde635c945..61e447a088c 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -172,7 +172,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( .and(signer.clone()) .and_then(|signer| { blocking_signed_json_task(signer, move || { - eth2::lighthouse::Health::observe() + api_types::ValidatorHealth::observe() .map(api_types::GenericResponse::from) .map_err(warp_utils::reject::custom_bad_request) }) From 62fb027e9555c053bc76cc6b5704e433ccaf9d7d Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Tue, 3 Nov 2020 11:32:35 +1100 Subject: [PATCH 28/33] Move health into its own crate --- Cargo.lock | 13 +- Cargo.toml | 1 + common/eth2/Cargo.toml | 9 +- common/eth2/src/lighthouse.rs | 267 +------------------------ common/eth2/src/lighthouse_vc/types.rs | 2 +- common/lighthouse_health/Cargo.toml | 17 ++ common/lighthouse_health/src/lib.rs | 267 +++++++++++++++++++++++++ 7 files changed, 301 insertions(+), 275 deletions(-) create mode 100644 common/lighthouse_health/Cargo.toml create mode 100644 common/lighthouse_health/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index fff2539e2dd..23cb75c87cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1655,8 +1655,8 @@ dependencies = [ "eth2_ssz", "hex", "libsecp256k1", + "lighthouse_health", "proto_array", - "psutil", "reqwest", "ring", "serde", @@ -3343,6 +3343,17 @@ dependencies = [ "validator_dir", ] +[[package]] +name = "lighthouse_health" +version = "0.1.0" +dependencies = [ + "psutil", + "serde", + "serde_json", + "sysinfo", + "systemstat", +] + [[package]] name = "lighthouse_metrics" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index d15b23be682..355a66ec654 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,6 +28,7 @@ members = [ "common/eth2_testnet_config", "common/eth2_wallet_manager", "common/hashset_delay", + "common/lighthouse_health", "common/lighthouse_metrics", "common/lighthouse_version", "common/logging", diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 1c414e8c3a3..9aac93a8642 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -24,13 +24,8 @@ account_utils = { path = "../../common/account_utils" } eth2_ssz = { path = "../../consensus/ssz" } sysinfo = "0.15.2" systemstat = "0.1.5" - -[target.'cfg(target_os = "linux")'.dependencies] -psutil = { version = "3.1.0", optional = true } - -[target.'cfg(target_os = "macos")'.dependencies] -psutil = "3.1.0" +lighthouse_health = { path = "../../common/lighthouse_health", optional = true } [features] default = ["lighthouse"] -lighthouse = ["proto_array", "psutil"] +lighthouse = ["proto_array", "lighthouse_health"] diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index c005100cd73..6b62e09fc54 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -9,18 +9,9 @@ use proto_array::core::ProtoArray; use reqwest::IntoUrl; use serde::{Deserialize, Serialize}; use ssz::Decode; -use std::path::{Path, PathBuf}; -use sysinfo::{NetworkExt, NetworksExt, System as SystemInfo, SystemExt}; -use systemstat::{Platform, System as SystemStat}; pub use eth2_libp2p::{types::SyncState, PeerInfo}; - -/// The two paths to the two core Lighthouse databases. -#[derive(Debug, Clone, PartialEq)] -pub struct DBPaths { - pub chain_db: PathBuf, - pub freezer_db: PathBuf, -} +pub use lighthouse_health::{BeaconHealth, DBPaths}; /// Information returned by `peers` and `connected_peers`. // TODO: this should be deserializable.. @@ -84,262 +75,6 @@ pub struct ValidatorInclusionData { pub is_previous_epoch_head_attester: bool, } -#[cfg(target_os = "macos")] -use psutil::process::Process; -#[cfg(target_os = "linux")] -use psutil::process::Process; - -/// Contains information about a file system mount. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct MountInfo { - avail: u64, - total: u64, - used: u64, - used_pct: f64, - mounted_on: PathBuf, -} - -impl MountInfo { - /// Attempts to find the `MountInfo` for the given `path`. - pub fn for_path<P: AsRef<Path>>(path: P) -> Result<Option<Self>, String> { - let system = SystemStat::new(); - let mounts = system - .mounts() - .map_err(|e| format!("Unable to enumerate mounts: {:?}", e))?; - - let mut mounts = mounts - .iter() - .filter_map(|drive| { - let mount_path = Path::new(&drive.fs_mounted_on); - let num_components = mount_path.iter().count(); - - Some((drive, mount_path, num_components)) - .filter(|_| path.as_ref().starts_with(&mount_path)) - }) - .collect::<Vec<_>>(); - - // Sort the list of mount points, such that the path with the most components is first. - // - // For example: - // - // ``` - // let mounts = ["/home/paul", "/home", "/"]; - // ``` - // - // The intention here is to find the "closest" mount-point to `path`, such that - // `/home/paul/file` matches `/home/paul`, not `/` or `/home`. - mounts.sort_unstable_by(|(_, _, a), (_, _, b)| b.cmp(a)); - - let disk_usage = mounts.first().map(|(drive, mount_path, _)| { - let avail = drive.avail.as_u64(); - let total = drive.total.as_u64(); - let used = total.saturating_sub(avail); - let mut used_pct = if total > 0 { - used as f64 / total as f64 - } else { - 0.0 - } * 100.0; - - // Round to two decimals. - used_pct = (used_pct * 100.00).round() / 100.00; - - Self { - avail, - total, - used, - used_pct, - mounted_on: mount_path.into(), - } - }); - - Ok(disk_usage) - } -} - -/// Reports information about the network on the system the Lighthouse instance is running on. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct Network { - /// Network metric for total received bytes across all network interfaces. - pub rx_bytes: u64, - /// Network metric for total received errors across all network interfaces. - pub rx_errors: u64, - /// Network metric for total received packets across all network interfaces. - pub rx_packets: u64, - /// Network metric for total transmitted bytes across all network interfaces. - pub tx_bytes: u64, - /// Network metric for total trasmitted errors across all network interfaces. - pub tx_errors: u64, - /// Network metric for total transmitted packets across all network interfaces. - pub tx_packets: u64, -} - -impl Network { - pub fn observe() -> Result<Self, String> { - let mut rx_bytes = 0; - let mut rx_errors = 0; - let mut rx_packets = 0; - let mut tx_bytes = 0; - let mut tx_errors = 0; - let mut tx_packets = 0; - - let s = SystemInfo::new_all(); - s.get_networks().iter().for_each(|(_, network)| { - rx_bytes += network.get_total_received(); - rx_errors += network.get_total_transmitted(); - rx_packets += network.get_total_packets_received(); - tx_bytes += network.get_total_packets_transmitted(); - tx_errors += network.get_total_errors_on_received(); - tx_packets += network.get_total_errors_on_transmitted(); - }); - - Ok(Network { - rx_bytes, - rx_errors, - rx_packets, - tx_bytes, - tx_errors, - tx_packets, - }) - } -} - -/// Reports on the health of the Lighthouse instance. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct CommonHealth { - /// The pid of this process. - pub pid: u32, - /// The total resident memory used by this pid. - pub pid_mem_resident_set_size: u64, - /// The total virtual memory used by this pid. - pub pid_mem_virtual_memory_size: u64, - /// Total virtual memory on the system - pub sys_virt_mem_total: u64, - /// Total virtual memory available for new processes. - pub sys_virt_mem_available: u64, - /// Total virtual memory used on the system - pub sys_virt_mem_used: u64, - /// Total virtual memory not used on the system - pub sys_virt_mem_free: u64, - /// Percentage of virtual memory used on the system - pub sys_virt_mem_percent: f32, - /// System load average over 1 minute. - pub sys_loadavg_1: f64, - /// System load average over 5 minutes. - pub sys_loadavg_5: f64, - /// System load average over 15 minutes. - pub sys_loadavg_15: f64, -} - -impl CommonHealth { - #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] - pub fn observe() -> Result<Self, String> { - Err("Health is only available on Linux and MacOS".into()) - } - - #[cfg(target_os = "linux")] - pub fn observe() -> Result<Self, String> { - let process = - Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?; - - let process_mem = process - .memory_info() - .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; - - let vm = psutil::memory::virtual_memory() - .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; - - let loadavg = - psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; - - Ok(Self { - pid: process.pid(), - pid_mem_resident_set_size: process_mem.rss(), - pid_mem_virtual_memory_size: process_mem.vms(), - sys_virt_mem_total: vm.total(), - sys_virt_mem_available: vm.available(), - sys_virt_mem_used: vm.used(), - sys_virt_mem_free: vm.free(), - sys_virt_mem_percent: vm.percent(), - sys_loadavg_1: loadavg.one, - sys_loadavg_5: loadavg.five, - sys_loadavg_15: loadavg.fifteen, - }) - } - - #[cfg(target_os = "macos")] - pub fn observe() -> Result<Self, String> { - let process = - Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?; - - let process_mem = process - .memory_info() - .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; - - let vm = psutil::memory::virtual_memory() - .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; - - let sys = SystemStat::new(); - - let loadavg = sys - .load_average() - .map_err(|e| format!("Unable to get loadavg: {:?}", e))?; - - Ok(Self { - pid: process.pid() as u32, - pid_mem_resident_set_size: process_mem.rss(), - pid_mem_virtual_memory_size: process_mem.vms(), - sys_virt_mem_total: vm.total(), - sys_virt_mem_available: vm.available(), - sys_virt_mem_used: vm.used(), - sys_virt_mem_free: vm.free(), - sys_virt_mem_percent: vm.percent(), - sys_loadavg_1: loadavg.one as f64, - sys_loadavg_5: loadavg.five as f64, - sys_loadavg_15: loadavg.fifteen as f64, - }) - } -} - -/// Reports on the health of the Lighthouse instance. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct BeaconHealth { - #[serde(flatten)] - pub common: CommonHealth, - /// Network statistics, totals across all network interfaces. - pub network: Network, - /// Filesystem information. - pub chain_database: Option<MountInfo>, - /// Filesystem information. - pub freezer_database: Option<MountInfo>, -} - -impl BeaconHealth { - #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] - pub fn observe() -> Result<Self, String> { - Err("Health is only available on Linux and MacOS".into()) - } - - #[cfg(target_os = "linux")] - pub fn observe(db_paths: &DBPaths) -> Result<Self, String> { - Ok(Self { - common: CommonHealth::observe()?, - network: Network::observe()?, - chain_database: MountInfo::for_path(&db_paths.chain_db)?, - freezer_database: MountInfo::for_path(&db_paths.freezer_db)?, - }) - } - - #[cfg(target_os = "macos")] - pub fn observe(db_paths: &DBPaths) -> Result<Self, String> { - Ok(Self { - common: CommonHealth::observe()?, - network: Network::observe()?, - chain_database: MountInfo::for_path(&db_paths.chain_db)?, - freezer_database: MountInfo::for_path(&db_paths.freezer_db)?, - }) - } -} - impl BeaconNodeHttpClient { /// Perform a HTTP GET request, returning `None` on a 404 error. async fn get_bytes_opt<U: IntoUrl>(&self, url: U) -> Result<Option<Vec<u8>>, Error> { diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 8c75aa349f3..18d176834ca 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -2,8 +2,8 @@ use account_utils::ZeroizeString; use eth2_keystore::Keystore; use serde::{Deserialize, Serialize}; -pub use crate::lighthouse::CommonHealth as ValidatorHealth; pub use crate::types::{GenericResponse, VersionData}; +pub use lighthouse_health::CommonHealth as ValidatorHealth; pub use types::*; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] diff --git a/common/lighthouse_health/Cargo.toml b/common/lighthouse_health/Cargo.toml new file mode 100644 index 00000000000..45a64681d72 --- /dev/null +++ b/common/lighthouse_health/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "lighthouse_health" +version = "0.1.0" +authors = ["Paul Hauner <paul@paulhauner.com>"] +edition = "2018" + +[dependencies] +serde = { version = "1.0.116", features = ["derive"] } +serde_json = "1.0.58" +sysinfo = "0.15.2" +systemstat = "0.1.5" + +[target.'cfg(target_os = "linux")'.dependencies] +psutil = { version = "3.1.0" } + +[target.'cfg(target_os = "macos")'.dependencies] +psutil = "3.1.0" diff --git a/common/lighthouse_health/src/lib.rs b/common/lighthouse_health/src/lib.rs new file mode 100644 index 00000000000..36ee52eb91e --- /dev/null +++ b/common/lighthouse_health/src/lib.rs @@ -0,0 +1,267 @@ +use serde::{Deserialize, Serialize}; +use std::path::{Path, PathBuf}; +use sysinfo::{NetworkExt, NetworksExt, System as SystemInfo, SystemExt}; +use systemstat::{Platform, System as SystemStat}; + +#[cfg(target_os = "macos")] +use psutil::process::Process; +#[cfg(target_os = "linux")] +use psutil::process::Process; + +/// The two paths to the two core Lighthouse databases. +#[derive(Debug, Clone, PartialEq)] +pub struct DBPaths { + pub chain_db: PathBuf, + pub freezer_db: PathBuf, +} + +/// Contains information about a file system mount. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct MountInfo { + avail: u64, + total: u64, + used: u64, + used_pct: f64, + mounted_on: PathBuf, +} + +impl MountInfo { + /// Attempts to find the `MountInfo` for the given `path`. + pub fn for_path<P: AsRef<Path>>(path: P) -> Result<Option<Self>, String> { + let system = SystemStat::new(); + let mounts = system + .mounts() + .map_err(|e| format!("Unable to enumerate mounts: {:?}", e))?; + + let mut mounts = mounts + .iter() + .filter_map(|drive| { + let mount_path = Path::new(&drive.fs_mounted_on); + let num_components = mount_path.iter().count(); + + Some((drive, mount_path, num_components)) + .filter(|_| path.as_ref().starts_with(&mount_path)) + }) + .collect::<Vec<_>>(); + + // Sort the list of mount points, such that the path with the most components is first. + // + // For example: + // + // ``` + // let mounts = ["/home/paul", "/home", "/"]; + // ``` + // + // The intention here is to find the "closest" mount-point to `path`, such that + // `/home/paul/file` matches `/home/paul`, not `/` or `/home`. + mounts.sort_unstable_by(|(_, _, a), (_, _, b)| b.cmp(a)); + + let disk_usage = mounts.first().map(|(drive, mount_path, _)| { + let avail = drive.avail.as_u64(); + let total = drive.total.as_u64(); + let used = total.saturating_sub(avail); + let mut used_pct = if total > 0 { + used as f64 / total as f64 + } else { + 0.0 + } * 100.0; + + // Round to two decimals. + used_pct = (used_pct * 100.00).round() / 100.00; + + Self { + avail, + total, + used, + used_pct, + mounted_on: mount_path.into(), + } + }); + + Ok(disk_usage) + } +} + +/// Reports information about the network on the system the Lighthouse instance is running on. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Network { + /// Network metric for total received bytes across all network interfaces. + pub rx_bytes: u64, + /// Network metric for total received errors across all network interfaces. + pub rx_errors: u64, + /// Network metric for total received packets across all network interfaces. + pub rx_packets: u64, + /// Network metric for total transmitted bytes across all network interfaces. + pub tx_bytes: u64, + /// Network metric for total trasmitted errors across all network interfaces. + pub tx_errors: u64, + /// Network metric for total transmitted packets across all network interfaces. + pub tx_packets: u64, +} + +impl Network { + pub fn observe() -> Result<Self, String> { + let mut rx_bytes = 0; + let mut rx_errors = 0; + let mut rx_packets = 0; + let mut tx_bytes = 0; + let mut tx_errors = 0; + let mut tx_packets = 0; + + let s = SystemInfo::new_all(); + s.get_networks().iter().for_each(|(_, network)| { + rx_bytes += network.get_total_received(); + rx_errors += network.get_total_transmitted(); + rx_packets += network.get_total_packets_received(); + tx_bytes += network.get_total_packets_transmitted(); + tx_errors += network.get_total_errors_on_received(); + tx_packets += network.get_total_errors_on_transmitted(); + }); + + Ok(Network { + rx_bytes, + rx_errors, + rx_packets, + tx_bytes, + tx_errors, + tx_packets, + }) + } +} + +/// Reports on the health of the Lighthouse instance. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct CommonHealth { + /// The pid of this process. + pub pid: u32, + /// The total resident memory used by this pid. + pub pid_mem_resident_set_size: u64, + /// The total virtual memory used by this pid. + pub pid_mem_virtual_memory_size: u64, + /// Total virtual memory on the system + pub sys_virt_mem_total: u64, + /// Total virtual memory available for new processes. + pub sys_virt_mem_available: u64, + /// Total virtual memory used on the system + pub sys_virt_mem_used: u64, + /// Total virtual memory not used on the system + pub sys_virt_mem_free: u64, + /// Percentage of virtual memory used on the system + pub sys_virt_mem_percent: f32, + /// System load average over 1 minute. + pub sys_loadavg_1: f64, + /// System load average over 5 minutes. + pub sys_loadavg_5: f64, + /// System load average over 15 minutes. + pub sys_loadavg_15: f64, +} + +impl CommonHealth { + #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] + pub fn observe() -> Result<Self, String> { + Err("Health is only available on Linux and MacOS".into()) + } + + #[cfg(target_os = "linux")] + pub fn observe() -> Result<Self, String> { + let process = + Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?; + + let process_mem = process + .memory_info() + .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; + + let vm = psutil::memory::virtual_memory() + .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; + + let loadavg = + psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; + + Ok(Self { + pid: process.pid(), + pid_mem_resident_set_size: process_mem.rss(), + pid_mem_virtual_memory_size: process_mem.vms(), + sys_virt_mem_total: vm.total(), + sys_virt_mem_available: vm.available(), + sys_virt_mem_used: vm.used(), + sys_virt_mem_free: vm.free(), + sys_virt_mem_percent: vm.percent(), + sys_loadavg_1: loadavg.one, + sys_loadavg_5: loadavg.five, + sys_loadavg_15: loadavg.fifteen, + }) + } + + #[cfg(target_os = "macos")] + pub fn observe() -> Result<Self, String> { + let process = + Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?; + + let process_mem = process + .memory_info() + .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; + + let vm = psutil::memory::virtual_memory() + .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; + + let sys = SystemStat::new(); + + let loadavg = sys + .load_average() + .map_err(|e| format!("Unable to get loadavg: {:?}", e))?; + + Ok(Self { + pid: process.pid() as u32, + pid_mem_resident_set_size: process_mem.rss(), + pid_mem_virtual_memory_size: process_mem.vms(), + sys_virt_mem_total: vm.total(), + sys_virt_mem_available: vm.available(), + sys_virt_mem_used: vm.used(), + sys_virt_mem_free: vm.free(), + sys_virt_mem_percent: vm.percent(), + sys_loadavg_1: loadavg.one as f64, + sys_loadavg_5: loadavg.five as f64, + sys_loadavg_15: loadavg.fifteen as f64, + }) + } +} + +/// Reports on the health of the Lighthouse instance. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BeaconHealth { + #[serde(flatten)] + pub common: CommonHealth, + /// Network statistics, totals across all network interfaces. + pub network: Network, + /// Filesystem information. + pub chain_database: Option<MountInfo>, + /// Filesystem information. + pub freezer_database: Option<MountInfo>, +} + +impl BeaconHealth { + #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] + pub fn observe() -> Result<Self, String> { + Err("Health is only available on Linux and MacOS".into()) + } + + #[cfg(target_os = "linux")] + pub fn observe(db_paths: &DBPaths) -> Result<Self, String> { + Ok(Self { + common: CommonHealth::observe()?, + network: Network::observe()?, + chain_database: MountInfo::for_path(&db_paths.chain_db)?, + freezer_database: MountInfo::for_path(&db_paths.freezer_db)?, + }) + } + + #[cfg(target_os = "macos")] + pub fn observe(db_paths: &DBPaths) -> Result<Self, String> { + Ok(Self { + common: CommonHealth::observe()?, + network: Network::observe()?, + chain_database: MountInfo::for_path(&db_paths.chain_db)?, + freezer_database: MountInfo::for_path(&db_paths.freezer_db)?, + }) + } +} From 0c5ff905ee3cfced267ef844ba9d2cfc6d8c72bd Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Tue, 3 Nov 2020 18:05:42 +1100 Subject: [PATCH 29/33] Add status --- common/lighthouse_health/src/lib.rs | 102 ++++++++++++++++++++++++---- 1 file changed, 88 insertions(+), 14 deletions(-) diff --git a/common/lighthouse_health/src/lib.rs b/common/lighthouse_health/src/lib.rs index 36ee52eb91e..5b3b12e6050 100644 --- a/common/lighthouse_health/src/lib.rs +++ b/common/lighthouse_health/src/lib.rs @@ -1,4 +1,5 @@ use serde::{Deserialize, Serialize}; +use std::cmp::Ordering; use std::path::{Path, PathBuf}; use sysinfo::{NetworkExt, NetworksExt, System as SystemInfo, SystemExt}; use systemstat::{Platform, System as SystemStat}; @@ -8,6 +9,40 @@ use psutil::process::Process; #[cfg(target_os = "linux")] use psutil::process::Process; +const GB: u64 = 1_000_000_000; +const CHAIN_DB_REQ_SIZE: u64 = 100 * GB; +const FREEZER_DB_REQ_SIZE: u64 = 20 * GB; +const TOTAL_REQ_SIZE: u64 = CHAIN_DB_REQ_SIZE + FREEZER_DB_REQ_SIZE; + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Status { + status: String, + message: String, +} + +impl Status { + pub fn error(message: String) -> Self { + Self { + status: "error".to_string(), + message, + } + } + + pub fn warning(message: String) -> Self { + Self { + status: "warning".to_string(), + message, + } + } + + pub fn ok(message: String) -> Self { + Self { + status: "ok".to_string(), + message, + } + } +} + /// The two paths to the two core Lighthouse databases. #[derive(Debug, Clone, PartialEq)] pub struct DBPaths { @@ -233,6 +268,8 @@ pub struct BeaconHealth { pub common: CommonHealth, /// Network statistics, totals across all network interfaces. pub network: Network, + /// The combined status for the chain and freezer databases. + pub database_status: Option<Status>, /// Filesystem information. pub chain_database: Option<MountInfo>, /// Filesystem information. @@ -240,28 +277,65 @@ pub struct BeaconHealth { } impl BeaconHealth { - #[cfg(all(not(target_os = "linux"), not(target_os = "macos")))] - pub fn observe() -> Result<Self, String> { - Err("Health is only available on Linux and MacOS".into()) - } - - #[cfg(target_os = "linux")] pub fn observe(db_paths: &DBPaths) -> Result<Self, String> { + let chain_database = MountInfo::for_path(&db_paths.chain_db)?; + let freezer_database = MountInfo::for_path(&db_paths.freezer_db)?; + + let database_status = chain_database + .as_ref() + .and_then(|chain| Some((chain, freezer_database.as_ref()?))) + .map(|(chain, freezer)| database_status(chain, freezer)); + Ok(Self { common: CommonHealth::observe()?, network: Network::observe()?, + database_status, chain_database: MountInfo::for_path(&db_paths.chain_db)?, freezer_database: MountInfo::for_path(&db_paths.freezer_db)?, }) } +} - #[cfg(target_os = "macos")] - pub fn observe(db_paths: &DBPaths) -> Result<Self, String> { - Ok(Self { - common: CommonHealth::observe()?, - network: Network::observe()?, - chain_database: MountInfo::for_path(&db_paths.chain_db)?, - freezer_database: MountInfo::for_path(&db_paths.freezer_db)?, - }) +fn database_status(chain: &MountInfo, freezer: &MountInfo) -> Status { + if chain.mounted_on == freezer.mounted_on { + status_for_disk(&chain.mounted_on, chain.avail, TOTAL_REQ_SIZE) + } else { + match ( + chain.avail.cmp(&CHAIN_DB_REQ_SIZE), + freezer.avail.cmp(&FREEZER_DB_REQ_SIZE), + ) { + (Ordering::Less, Ordering::Less) => Status::error(format!( + "Insufficient size for {} and {}; {} and {} additional GB recommended, + respectively.", + chain.mounted_on.to_string_lossy(), + freezer.mounted_on.to_string_lossy(), + CHAIN_DB_REQ_SIZE - chain.avail, + FREEZER_DB_REQ_SIZE - freezer.avail + )), + (Ordering::Less, _) => { + status_for_disk(&chain.mounted_on, chain.avail, CHAIN_DB_REQ_SIZE) + } + (_, Ordering::Less) => { + status_for_disk(&freezer.mounted_on, freezer.avail, FREEZER_DB_REQ_SIZE) + } + _ => Status::ok(format!( + "{} and {} exceed the recommended capacity.", + chain.mounted_on.to_string_lossy(), + freezer.mounted_on.to_string_lossy() + )), + } + } +} + +fn status_for_disk(mount: &PathBuf, avail: u64, req: u64) -> Status { + if req > avail { + Status::error(format!( + "Insufficient size for {}; {} GB recommended but {} GB available.", + mount.to_string_lossy(), + req / GB, + avail / GB + )) + } else { + Status::ok(format!("{:?} has sufficient capacity.", mount)) } } From f0f21a1747ac26c9bc8bfb9127eb536e2b978a99 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Tue, 3 Nov 2020 19:10:06 +1100 Subject: [PATCH 30/33] Add CPU, memory guages --- Cargo.lock | 1 + common/lighthouse_health/Cargo.toml | 1 + common/lighthouse_health/src/lib.rs | 147 +++++++++++++++++++++++----- 3 files changed, 125 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 23cb75c87cc..9e5757900db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3347,6 +3347,7 @@ dependencies = [ name = "lighthouse_health" version = "0.1.0" dependencies = [ + "num_cpus", "psutil", "serde", "serde_json", diff --git a/common/lighthouse_health/Cargo.toml b/common/lighthouse_health/Cargo.toml index 45a64681d72..6bbce0c0d95 100644 --- a/common/lighthouse_health/Cargo.toml +++ b/common/lighthouse_health/Cargo.toml @@ -9,6 +9,7 @@ serde = { version = "1.0.116", features = ["derive"] } serde_json = "1.0.58" sysinfo = "0.15.2" systemstat = "0.1.5" +num_cpus = "1.13.0" [target.'cfg(target_os = "linux")'.dependencies] psutil = { version = "3.1.0" } diff --git a/common/lighthouse_health/src/lib.rs b/common/lighthouse_health/src/lib.rs index 5b3b12e6050..ee540d0dbfe 100644 --- a/common/lighthouse_health/src/lib.rs +++ b/common/lighthouse_health/src/lib.rs @@ -9,17 +9,29 @@ use psutil::process::Process; #[cfg(target_os = "linux")] use psutil::process::Process; +const MB: u64 = 1_000_000; const GB: u64 = 1_000_000_000; +const MIN_SAFE_DB_SIZE: u64 = 1 * GB; const CHAIN_DB_REQ_SIZE: u64 = 100 * GB; const FREEZER_DB_REQ_SIZE: u64 = 20 * GB; const TOTAL_REQ_SIZE: u64 = CHAIN_DB_REQ_SIZE + FREEZER_DB_REQ_SIZE; +const LOAD_AVG_PCT_WARN: f64 = 85.0; +const LOAD_AVG_PCT_ERROR: f64 = 100.0; + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Status { status: String, message: String, } +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct StatusGauge { + status: String, + message: String, + gauge_pct: f64, +} + impl Status { pub fn error(message: String) -> Self { Self { @@ -28,9 +40,9 @@ impl Status { } } - pub fn warning(message: String) -> Self { + pub fn warn(message: String) -> Self { Self { - status: "warning".to_string(), + status: "warn".to_string(), message, } } @@ -41,6 +53,14 @@ impl Status { message, } } + + pub fn gauge(self, gauge_pct: f64) -> StatusGauge { + StatusGauge { + status: self.status, + message: self.message, + gauge_pct, + } + } } /// The two paths to the two core Lighthouse databases. @@ -95,20 +115,17 @@ impl MountInfo { let avail = drive.avail.as_u64(); let total = drive.total.as_u64(); let used = total.saturating_sub(avail); - let mut used_pct = if total > 0 { + let used_pct = if total > 0 { used as f64 / total as f64 } else { 0.0 } * 100.0; - // Round to two decimals. - used_pct = (used_pct * 100.00).round() / 100.00; - Self { avail, total, used, - used_pct, + used_pct: round(used_pct, 2), mounted_on: mount_path.into(), } }); @@ -264,12 +281,16 @@ impl CommonHealth { /// Reports on the health of the Lighthouse instance. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BeaconHealth { + /// A rough status of the CPU usage. + pub cpu_status: StatusGauge, + /// RAM usage. + pub memory_status: StatusGauge, + /// The combined status for the chain and freezer databases. + pub database_status: Option<StatusGauge>, #[serde(flatten)] pub common: CommonHealth, /// Network statistics, totals across all network interfaces. pub network: Network, - /// The combined status for the chain and freezer databases. - pub database_status: Option<Status>, /// Filesystem information. pub chain_database: Option<MountInfo>, /// Filesystem information. @@ -278,6 +299,10 @@ pub struct BeaconHealth { impl BeaconHealth { pub fn observe(db_paths: &DBPaths) -> Result<Self, String> { + let common = CommonHealth::observe()?; + let cpu_status = cpu_status(&common); + let memory_status = memory_status(&common); + let chain_database = MountInfo::for_path(&db_paths.chain_db)?; let freezer_database = MountInfo::for_path(&db_paths.freezer_db)?; @@ -287,55 +312,129 @@ impl BeaconHealth { .map(|(chain, freezer)| database_status(chain, freezer)); Ok(Self { + cpu_status, + memory_status, + database_status, common: CommonHealth::observe()?, network: Network::observe()?, - database_status, chain_database: MountInfo::for_path(&db_paths.chain_db)?, freezer_database: MountInfo::for_path(&db_paths.freezer_db)?, }) } } -fn database_status(chain: &MountInfo, freezer: &MountInfo) -> Status { +fn cpu_status(health: &CommonHealth) -> StatusGauge { + // Disallow 0 CPUs to avoid a divide-by-zero. + // + // Note: we're using one library to detect loadavg and another to detect CPU count. I can + // imagine this might cause issues on some platforms, but I don't know how to resolve it. + let num_cpus = std::cmp::max(1, num_cpus::get()) as f64; + let pct = round(health.sys_loadavg_5 as f64 / num_cpus, 2); + + if pct > LOAD_AVG_PCT_ERROR { + Status::error("CPU is overloaded.".to_string()).gauge(pct) + } else if pct > LOAD_AVG_PCT_WARN { + Status::warn("CPU has high load.".to_string()).gauge(pct) + } else { + Status::ok(format!("CPU below {:0}%", LOAD_AVG_PCT_WARN)).gauge(pct) + } +} + +const MEMORY_AVAILABLE_ERROR: u64 = 512 * MB; +const MEMORY_AVAILABLE_WARN: u64 = 1 * GB; +const MEMORY_RECOMMENDED_TOTAL: u64 = 8 * GB; + +fn memory_status(health: &CommonHealth) -> StatusGauge { + let avail = health.sys_virt_mem_available; + let total = health.sys_virt_mem_total; + + let status = if avail < MEMORY_AVAILABLE_ERROR { + Status::error(format!( + "Available system memory critically low: {} MB.", + avail / MB + )) + } else if avail < MEMORY_AVAILABLE_WARN { + Status::warn(format!( + "Available system memory is low: {} GB.", + avail / GB + )) + } else if total < MEMORY_RECOMMENDED_TOTAL { + Status::warn(format!( + "Total system memory {} GB is less than the recommended {} GB.", + total / GB, + MEMORY_RECOMMENDED_TOTAL / GB + )) + } else { + Status::ok(format!("{} GB available memory", avail / GB)) + }; + + status.gauge(round(health.sys_virt_mem_percent as f64, 2)) +} + +fn database_status(chain: &MountInfo, freezer: &MountInfo) -> StatusGauge { if chain.mounted_on == freezer.mounted_on { - status_for_disk(&chain.mounted_on, chain.avail, TOTAL_REQ_SIZE) + status_for_disk(&chain.mounted_on, chain.avail, TOTAL_REQ_SIZE).gauge(chain.used_pct) } else { match ( chain.avail.cmp(&CHAIN_DB_REQ_SIZE), freezer.avail.cmp(&FREEZER_DB_REQ_SIZE), ) { - (Ordering::Less, Ordering::Less) => Status::error(format!( - "Insufficient size for {} and {}; {} and {} additional GB recommended, + (Ordering::Less, Ordering::Less) => { + // Indicate using the lowest percentage. + let pct = if chain.used_pct > freezer.used_pct { + freezer.used_pct + } else { + chain.used_pct + }; + + Status::error(format!( + "Insufficient size for {} and {}; {} and {} additional GB recommended, respectively.", - chain.mounted_on.to_string_lossy(), - freezer.mounted_on.to_string_lossy(), - CHAIN_DB_REQ_SIZE - chain.avail, - FREEZER_DB_REQ_SIZE - freezer.avail - )), + chain.mounted_on.to_string_lossy(), + freezer.mounted_on.to_string_lossy(), + CHAIN_DB_REQ_SIZE - chain.avail, + FREEZER_DB_REQ_SIZE - freezer.avail + )) + .gauge(pct) + } (Ordering::Less, _) => { status_for_disk(&chain.mounted_on, chain.avail, CHAIN_DB_REQ_SIZE) + .gauge(chain.used_pct) } (_, Ordering::Less) => { status_for_disk(&freezer.mounted_on, freezer.avail, FREEZER_DB_REQ_SIZE) + .gauge(freezer.used_pct) } _ => Status::ok(format!( "{} and {} exceed the recommended capacity.", chain.mounted_on.to_string_lossy(), freezer.mounted_on.to_string_lossy() - )), + )) + .gauge(100.0), } } } -fn status_for_disk(mount: &PathBuf, avail: u64, req: u64) -> Status { - if req > avail { +fn status_for_disk(mount: &PathBuf, avail: u64, recommended: u64) -> Status { + if avail < MIN_SAFE_DB_SIZE { Status::error(format!( - "Insufficient size for {}; {} GB recommended but {} GB available.", + "Critically low disk space on {}; {} MB available.", mount.to_string_lossy(), - req / GB, + avail / MB + )) + } else if recommended > avail { + Status::warn(format!( + "Low disk space on {}; {} GB recommended but {} GB available.", + mount.to_string_lossy(), + recommended / GB, avail / GB )) } else { Status::ok(format!("{:?} has sufficient capacity.", mount)) } } + +fn round(x: f64, decimals: i32) -> f64 { + let precision = 10.0_f64.powi(decimals); + (x * precision).round() / precision +} From 6374c88bbe59b277217f2b9959f9ad6fdbb1ffd3 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Wed, 4 Nov 2020 11:26:12 +1100 Subject: [PATCH 31/33] Start threading peer count, fix bug --- beacon_node/http_api/src/lib.rs | 22 +++++++++++++++------- beacon_node/http_metrics/src/metrics.rs | 2 +- common/lighthouse_health/src/lib.rs | 4 ++-- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5935963dbbf..331375425e9 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -276,6 +276,10 @@ pub fn serve<T: BeaconChainTypes>( } }); + // Create a `warp` filter that provides optional access to the network globals. + let inner_network_globals = ctx.network_globals.clone(); + let network_globals_opt = warp::any().map(move || inner_network_globals.clone()); + // Create a `warp` filter that provides access to the beacon chain. let inner_ctx = ctx.clone(); let chain_filter = @@ -1720,13 +1724,17 @@ pub fn serve<T: BeaconChainTypes>( .and(warp::path("health")) .and(warp::path::end()) .and(db_paths_filter) - .and_then(|db_paths| { - blocking_json_task(move || { - eth2::lighthouse::BeaconHealth::observe(&db_paths) - .map(api_types::GenericResponse::from) - .map_err(warp_utils::reject::custom_bad_request) - }) - }); + .and(network_globals_opt) + .and_then( + |db_paths, network_globals_opt: Option<Arc<NetworkGlobals<T::EthSpec>>>| { + blocking_json_task(move || { + let connected_peers = network_globals_opt.as_ref().map(|g| g.connected_peers()); + eth2::lighthouse::BeaconHealth::observe(&db_paths, connected_peers) + .map(api_types::GenericResponse::from) + .map_err(warp_utils::reject::custom_bad_request) + }) + }, + ); // GET lighthouse/syncing let get_lighthouse_syncing = warp::path("lighthouse") diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index 4ef1765c468..18d830ff99c 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -92,7 +92,7 @@ pub fn gather_prometheus_metrics<T: BeaconChainTypes>( // This will silently fail if we are unable to observe the health. This is desired behaviour // since we don't support `BeaconHealth` for all platforms. - if let Ok(health) = BeaconHealth::observe(db_paths) { + if let Ok(health) = BeaconHealth::observe(db_paths, None) { set_gauge( &PROCESS_RES_MEM, health.common.pid_mem_resident_set_size as i64, diff --git a/common/lighthouse_health/src/lib.rs b/common/lighthouse_health/src/lib.rs index ee540d0dbfe..26dc45953bc 100644 --- a/common/lighthouse_health/src/lib.rs +++ b/common/lighthouse_health/src/lib.rs @@ -298,7 +298,7 @@ pub struct BeaconHealth { } impl BeaconHealth { - pub fn observe(db_paths: &DBPaths) -> Result<Self, String> { + pub fn observe(db_paths: &DBPaths, peer_count_opt: Option<usize>) -> Result<Self, String> { let common = CommonHealth::observe()?; let cpu_status = cpu_status(&common); let memory_status = memory_status(&common); @@ -365,7 +365,7 @@ fn memory_status(health: &CommonHealth) -> StatusGauge { MEMORY_RECOMMENDED_TOTAL / GB )) } else { - Status::ok(format!("{} GB available memory", avail / GB)) + Status::ok(format!("{} GB available memory", avail)) }; status.gauge(round(health.sys_virt_mem_percent as f64, 2)) From c606666640f2cab9827d96d3ebd0d0de8a5a62a1 Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Wed, 4 Nov 2020 12:56:34 +1100 Subject: [PATCH 32/33] Add eth1 and p2p status --- Cargo.lock | 1 + beacon_node/http_api/Cargo.toml | 1 + beacon_node/http_api/src/lib.rs | 38 ++++++++++++- beacon_node/http_metrics/src/metrics.rs | 2 +- common/lighthouse_health/src/lib.rs | 73 ++++++++++++++++++++++++- 5 files changed, 110 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 96de5bb4455..25fd068c344 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2567,6 +2567,7 @@ dependencies = [ "fork_choice", "hex", "lazy_static", + "lighthouse_health", "lighthouse_metrics", "lighthouse_version", "network", diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index acaea73999c..a47689d0521 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -26,6 +26,7 @@ warp_utils = { path = "../../common/warp_utils" } slot_clock = { path = "../../common/slot_clock" } eth2_ssz = { path = "../../consensus/ssz" } bs58 = "0.3.1" +lighthouse_health = { path = "../../common/lighthouse_health" } [dev-dependencies] store = { path = "../store" } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 51d812a3228..10b756280e0 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -295,6 +295,10 @@ pub fn serve<T: BeaconChainTypes>( } }); + // Create a `warp` filter that provides optional access to the beacon chain. + let inner_ctx = ctx.clone(); + let chain_opt_filter = warp::any().map(move || inner_ctx.chain.clone()); + // Create a `warp` filter that provides access to the network sender channel. let inner_ctx = ctx.clone(); let network_tx_filter = warp::any() @@ -1739,11 +1743,41 @@ pub fn serve<T: BeaconChainTypes>( .and(warp::path::end()) .and(db_paths_filter) .and(network_globals_opt) + .and(chain_opt_filter) .and_then( - |db_paths, network_globals_opt: Option<Arc<NetworkGlobals<T::EthSpec>>>| { + |db_paths, + network_globals_opt: Option<Arc<NetworkGlobals<T::EthSpec>>>, + chain: Option<Arc<BeaconChain<T>>>| { blocking_json_task(move || { + let sync_status = chain + .as_ref() + .and_then(|chain| chain.eth1_chain.as_ref().map(|eth1| (chain, eth1))) + .map(|(chain, eth1)| { + let head_info = chain + .head_info() + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_slot = chain + .slot() + .map_err(warp_utils::reject::beacon_chain_error)?; + + eth1.sync_status(head_info.genesis_time, current_slot, &chain.spec) + .ok_or_else(|| { + warp_utils::reject::custom_server_error( + "Unable to determine Eth1 sync status".to_string(), + ) + }) + }) + .transpose()? + .map(|sync_status| lighthouse_health::Eth1SyncInfo { + eth1_node_sync_status_percentage: sync_status + .eth1_node_sync_status_percentage, + lighthouse_is_cached_and_ready: sync_status + .lighthouse_is_cached_and_ready, + }); + let connected_peers = network_globals_opt.as_ref().map(|g| g.connected_peers()); - eth2::lighthouse::BeaconHealth::observe(&db_paths, connected_peers) + + eth2::lighthouse::BeaconHealth::observe(&db_paths, connected_peers, sync_status) .map(api_types::GenericResponse::from) .map_err(warp_utils::reject::custom_bad_request) }) diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index 18d830ff99c..8b9dbca4fcd 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -92,7 +92,7 @@ pub fn gather_prometheus_metrics<T: BeaconChainTypes>( // This will silently fail if we are unable to observe the health. This is desired behaviour // since we don't support `BeaconHealth` for all platforms. - if let Ok(health) = BeaconHealth::observe(db_paths, None) { + if let Ok(health) = BeaconHealth::observe(db_paths, None, None) { set_gauge( &PROCESS_RES_MEM, health.common.pid_mem_resident_set_size as i64, diff --git a/common/lighthouse_health/src/lib.rs b/common/lighthouse_health/src/lib.rs index 26dc45953bc..4c8e541a50d 100644 --- a/common/lighthouse_health/src/lib.rs +++ b/common/lighthouse_health/src/lib.rs @@ -19,6 +19,9 @@ const TOTAL_REQ_SIZE: u64 = CHAIN_DB_REQ_SIZE + FREEZER_DB_REQ_SIZE; const LOAD_AVG_PCT_WARN: f64 = 85.0; const LOAD_AVG_PCT_ERROR: f64 = 100.0; +const SAFE_PEER_COUNT: usize = 4; +const EXPECTED_PEER_COUNT: usize = 55; // TODO: get this dynamically. + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Status { status: String, @@ -32,6 +35,11 @@ pub struct StatusGauge { gauge_pct: f64, } +pub struct Eth1SyncInfo { + pub eth1_node_sync_status_percentage: f64, + pub lighthouse_is_cached_and_ready: bool, +} + impl Status { pub fn error(message: String) -> Self { Self { @@ -285,6 +293,10 @@ pub struct BeaconHealth { pub cpu_status: StatusGauge, /// RAM usage. pub memory_status: StatusGauge, + /// Info about the eth1 chain. + pub eth1_status: StatusGauge, + /// Info about the libp2p network. + pub p2p_status: Option<StatusGauge>, /// The combined status for the chain and freezer databases. pub database_status: Option<StatusGauge>, #[serde(flatten)] @@ -298,10 +310,16 @@ pub struct BeaconHealth { } impl BeaconHealth { - pub fn observe(db_paths: &DBPaths, peer_count_opt: Option<usize>) -> Result<Self, String> { + pub fn observe( + db_paths: &DBPaths, + peer_count_opt: Option<usize>, + eth1_opt: Option<Eth1SyncInfo>, + ) -> Result<Self, String> { let common = CommonHealth::observe()?; let cpu_status = cpu_status(&common); let memory_status = memory_status(&common); + let eth1_status = eth1_status(eth1_opt); + let p2p_status = peer_count_opt.map(p2p_status); let chain_database = MountInfo::for_path(&db_paths.chain_db)?; let freezer_database = MountInfo::for_path(&db_paths.freezer_db)?; @@ -314,6 +332,8 @@ impl BeaconHealth { Ok(Self { cpu_status, memory_status, + eth1_status, + p2p_status, database_status, common: CommonHealth::observe()?, network: Network::observe()?, @@ -371,6 +391,52 @@ fn memory_status(health: &CommonHealth) -> StatusGauge { status.gauge(round(health.sys_virt_mem_percent as f64, 2)) } +fn eth1_status(eth1_opt: Option<Eth1SyncInfo>) -> StatusGauge { + if let Some(eth1) = eth1_opt { + let ready = eth1.lighthouse_is_cached_and_ready; + let pct = round(eth1.eth1_node_sync_status_percentage, 2); + + if ready { + if pct == 100.0 { + Status::ok("Eth1 is fully synced.".to_string()) + } else { + Status::warn(format!("Eth1 is adequately synced at {}%.", pct)) + } + } else { + if pct == 100.0 { + Status::warn("Eth1 is fully synced but caches are still being built.".to_string()) + } else { + Status::warn(format!( + "Eth1 is not adequately synced. Estimated progress: {}%.", + pct, + )) + } + } + .gauge(pct) + } else { + Status::error( + "Eth1 sync is disabled, use the --eth1 CLI flag to enable. Eth1 is only \ + required for validators." + .to_string(), + ) + .gauge(0.0) + } +} + +fn p2p_status(peer_count: usize) -> StatusGauge { + let peer_count = std::cmp::min(peer_count, EXPECTED_PEER_COUNT); + let pct = round((peer_count as f64 / EXPECTED_PEER_COUNT as f64) * 100.0, 2); + + if peer_count == 0 { + Status::error("No connected peers.".to_string()) + } else if peer_count < SAFE_PEER_COUNT { + Status::warn(format!("Low peer count ({}).", peer_count)) + } else { + Status::warn(format!("Peer count sufficient ({}).", peer_count)) + } + .gauge(pct) +} + fn database_status(chain: &MountInfo, freezer: &MountInfo) -> StatusGauge { if chain.mounted_on == freezer.mounted_on { status_for_disk(&chain.mounted_on, chain.avail, TOTAL_REQ_SIZE).gauge(chain.used_pct) @@ -430,7 +496,10 @@ fn status_for_disk(mount: &PathBuf, avail: u64, recommended: u64) -> Status { avail / GB )) } else { - Status::ok(format!("{:?} has sufficient capacity.", mount)) + Status::ok(format!( + "{} has sufficient capacity.", + mount.to_string_lossy() + )) } } From 4e3c39f1e91c243fdd313199e1f74afc6aed732d Mon Sep 17 00:00:00 2001 From: Paul Hauner <paul@paulhauner.com> Date: Wed, 4 Nov 2020 13:01:02 +1100 Subject: [PATCH 33/33] Fix GB display --- common/lighthouse_health/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/lighthouse_health/src/lib.rs b/common/lighthouse_health/src/lib.rs index 4c8e541a50d..7cafa81ec56 100644 --- a/common/lighthouse_health/src/lib.rs +++ b/common/lighthouse_health/src/lib.rs @@ -385,7 +385,7 @@ fn memory_status(health: &CommonHealth) -> StatusGauge { MEMORY_RECOMMENDED_TOTAL / GB )) } else { - Status::ok(format!("{} GB available memory", avail)) + Status::ok(format!("{} GB available memory", avail / GB)) }; status.gauge(round(health.sys_virt_mem_percent as f64, 2))