diff --git a/Cargo.lock b/Cargo.lock index 78ea2cead39..7064a203aa2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -83,31 +83,11 @@ checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" [[package]] name = "aead" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "aead" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "922b33332f54fc0ad13fa3e514601e8d30fb54e1f3eadc36643f6526db645621" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "aes" -version = "0.5.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd2bc6d3f370b5666245ff421e231cba4353df936e26986d2918e61a8fd6aef6" +checksum = "6e3e798aa0c8239776f54415bc06f3d74b1850f3f830b45c35cfc80556973f70" dependencies = [ - "aes-soft", - "aesni", - "block-cipher", + "generic-array", ] [[package]] @@ -120,20 +100,7 @@ dependencies = [ "cipher 0.3.0", "cpufeatures", "ctr", - "opaque-debug 0.3.0", -] - -[[package]] -name = "aes-gcm" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0301c9e9c443494d970a07885e8cf3e587bae8356a1d5abd0999068413f7205f" -dependencies = [ - "aead 0.3.2", - "aes 0.5.0", - "block-cipher", - "ghash 0.3.1", - "subtle 2.4.0", + "opaque-debug", ] [[package]] @@ -142,33 +109,12 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" dependencies = [ - "aead 0.4.1", - "aes 0.7.4", + "aead", + "aes", "cipher 0.3.0", "ctr", - "ghash 0.4.2", - "subtle 2.4.0", -] - -[[package]] -name = "aes-soft" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63dd91889c49327ad7ef3b500fd1109dbd3c509a03db0d4a9ce413b79f575cb6" -dependencies = [ - "block-cipher", - "byteorder", - "opaque-debug 0.3.0", -] - -[[package]] -name = "aesni" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6fe808308bb07d393e2ea47780043ec47683fcf19cf5efc8ca51c50cc8c68a" -dependencies = [ - "block-cipher", - "opaque-debug 0.3.0", + "ghash", + "subtle", ] [[package]] @@ -211,9 +157,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15af2628f6890fe2609a3b91bef4c83450512802e59489f9c1cb1fa5df064a61" +checksum = "595d3cfa7a60d4555cb5067b99f07142a08ea778de5cf993f7b75c7d8fabc486" [[package]] name = "arbitrary" @@ -257,31 +203,12 @@ dependencies = [ "term", ] -[[package]] -name = "asn1_der" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fce6b6a0ffdafebd82c87e79e3f40e8d2c523e5fea5566ff6b90509bf98d638" -dependencies = [ - "asn1_der_derive", -] - [[package]] name = "asn1_der" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d6e24d2cce90c53b948c46271bfb053e4bdc2db9b5d3f65e20f8cf28a1b7fc3" -[[package]] -name = "asn1_der_derive" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" -dependencies = [ - "quote", - "syn", -] - [[package]] name = "assert-json-diff" version = "2.0.1" @@ -424,6 +351,20 @@ dependencies = [ "wasm-bindgen-futures", ] +[[package]] +name = "async-std-resolver" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed4e2c3da14d8ad45acb1e3191db7a918e9505b6f155b218e70a7c9a1a48c638" +dependencies = [ + "async-std", + "async-trait", + "futures-io", + "futures-util", + "pin-utils", + "trust-dns-resolver", +] + [[package]] name = "async-task" version = "4.0.3" @@ -711,20 +652,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10a5720225ef5daecf08657f23791354e1685a8c91a4c60c7f3d3b2892f978f4" dependencies = [ "crypto-mac 0.8.0", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding 0.1.5", - "byte-tools", - "byteorder", - "generic-array 0.12.4", + "digest", + "opaque-debug", ] [[package]] @@ -733,26 +662,8 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding 0.2.1", - "generic-array 0.14.4", -] - -[[package]] -name = "block-cipher" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f337a3e6da609650eb74e02bc9fac7b735049f7623ab12f2e4c719316fcc7e80" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", + "block-padding", + "generic-array", ] [[package]] @@ -875,12 +786,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65c1bf4a04a88c54f589125563643d773f3254b5c38571395e2b591c693bbc81" -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - [[package]] name = "byteorder" version = "1.4.3" @@ -943,18 +848,18 @@ dependencies = [ [[package]] name = "cast" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57cdfa5d50aad6cb4d44dcab6101a7f79925bd59d82ca42f38a9856a28865374" +checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a" dependencies = [ - "rustc_version 0.3.3", + "rustc_version 0.4.0", ] [[package]] name = "cc" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787" +checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2" [[package]] name = "cfg-if" @@ -970,24 +875,26 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chacha20" -version = "0.5.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "244fbce0d47e97e8ef2f63b81d5e05882cb518c68531eb33194990d7b7e85845" +checksum = "fee7ad89dc1128635074c268ee661f90c3f7e83d9fd12910608c36b47d6c3412" dependencies = [ - "stream-cipher", + "cfg-if 1.0.0", + "cipher 0.3.0", + "cpufeatures", "zeroize", ] [[package]] name = "chacha20poly1305" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bf18d374d66df0c05cdddd528a7db98f78c28e2519b120855c4f84c5027b1f5" +checksum = "1580317203210c517b6d44794abfbe600698276db18127e37ad3e69bf5e848e5" dependencies = [ - "aead 0.3.2", + "aead", "chacha20", + "cipher 0.3.0", "poly1305", - "stream-cipher", "zeroize", ] @@ -1010,7 +917,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" dependencies = [ - "generic-array 0.14.4", + "generic-array", ] [[package]] @@ -1019,7 +926,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" dependencies = [ - "generic-array 0.14.4", + "generic-array", ] [[package]] @@ -1190,12 +1097,6 @@ dependencies = [ "libc", ] -[[package]] -name = "cpuid-bool" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" - [[package]] name = "crc32fast" version = "1.2.1" @@ -1291,24 +1192,14 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" -[[package]] -name = "crypto-mac" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" -dependencies = [ - "generic-array 0.12.4", - "subtle 1.0.0", -] - [[package]] name = "crypto-mac" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", + "generic-array", + "subtle", ] [[package]] @@ -1317,8 +1208,8 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4857fd85a0c34b3c3297875b747c1e02e06b6a0ea32dd892d8192b9ce0813ea6" dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", + "generic-array", + "subtle", ] [[package]] @@ -1327,8 +1218,8 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25fab6889090c8133f3deb8f73ba3c65a7f456f66436fc012a1b1e272b1e103e" dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", + "generic-array", + "subtle", ] [[package]] @@ -1382,6 +1273,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "cuckoofilter" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b810a8449931679f64cd7eef1bbd0fa315801b6d5d9cdc1ace2804d6529eee18" +dependencies = [ + "byteorder", + "fnv", + "rand 0.7.3", +] + [[package]] name = "curl" version = "0.4.38" @@ -1420,9 +1322,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "639891fde0dbea823fc3d798a0fdf9d2f9440a42d64a78ab3488b0ca025117b3" dependencies = [ "byteorder", - "digest 0.9.0", + "digest", "rand_core 0.5.1", - "subtle 2.4.0", + "subtle", "zeroize", ] @@ -1502,7 +1404,7 @@ dependencies = [ "hex", "reqwest", "serde_json", - "sha2 0.9.5", + "sha2", "tree_hash", "types", ] @@ -1541,13 +1443,14 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.14" +version = "0.99.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc7b9cef1e351660e5443924e4f43ab25fbbed3e9a5f052df3677deb4d6b320" +checksum = "40eebddd2156ce1bb37b20bbe5151340a31828b1f2d22ba4141f3531710e38df" dependencies = [ "convert_case", "proc-macro2", "quote", + "rustc_version 0.3.3", "syn", ] @@ -1563,22 +1466,13 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198" -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array 0.12.4", -] - [[package]] name = "digest" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.4", + "generic-array", ] [[package]] @@ -1640,27 +1534,27 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" [[package]] name = "discv5" -version = "0.1.0-beta.5" +version = "0.1.0-beta.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f5a5132ff1173d356fd78d09cd33d82fe8f7e6b4016d8c891edf2680a8cebe6" +checksum = "7ea68ad7b3b04274980a33fd1579517540b9d341dfe634b17b6b49fa972cfb86" dependencies = [ - "aes 0.7.4", - "aes-gcm 0.9.2", + "aes", + "aes-gcm", "arrayvec 0.7.1", - "digest 0.9.0", + "digest", "enr", "fnv", "futures", "hex", "hkdf", "lazy_static", - "libp2p-core 0.28.3", + "libp2p-core 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", "lru", "lru_time_cache", "parking_lot", "rand 0.8.4", "rlp 0.5.0", - "sha2 0.9.5", + "sha2", "smallvec", "tokio 1.8.1", "tokio-stream", @@ -1671,6 +1565,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "dns-parser" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" +dependencies = [ + "byteorder", + "quick-error", +] + [[package]] name = "dtoa" version = "0.4.8" @@ -1708,7 +1612,7 @@ dependencies = [ "ed25519", "rand 0.7.3", "serde", - "sha2 0.9.5", + "sha2", "zeroize", ] @@ -1754,11 +1658,11 @@ checksum = "c13e9b0c3c4170dcc2a12783746c4205d98e18957f57854251eea3f9750fe005" dependencies = [ "bitvec 0.20.4", "ff", - "generic-array 0.14.4", + "generic-array", "group", "pkcs8", "rand_core 0.6.3", - "subtle 2.4.0", + "subtle", "zeroize", ] @@ -1800,6 +1704,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "enum-as-inner" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "env_logger" version = "0.7.1" @@ -1914,7 +1830,7 @@ dependencies = [ "futures", "futures-util", "hex", - "libsecp256k1 0.5.0", + "libsecp256k1", "procinfo", "proto_array", "psutil", @@ -1945,7 +1861,7 @@ dependencies = [ "lazy_static", "ring", "rustc-hex", - "sha2 0.9.5", + "sha2", "wasm-bindgen-test", ] @@ -1972,7 +1888,7 @@ dependencies = [ "hex", "num-bigint-dig", "ring", - "sha2 0.9.5", + "sha2", "zeroize", ] @@ -1980,7 +1896,7 @@ dependencies = [ name = "eth2_keystore" version = "0.1.0" dependencies = [ - "aes 0.7.4", + "aes", "bls", "eth2_key_derivation", "eth2_ssz", @@ -1992,7 +1908,7 @@ dependencies = [ "serde", "serde_json", "serde_repr", - "sha2 0.9.5", + "sha2", "tempfile", "unicode-normalization", "uuid", @@ -2027,7 +1943,7 @@ dependencies = [ "regex", "serde", "serde_derive", - "sha2 0.9.5", + "sha2", "slog", "slog-async", "slog-term", @@ -2134,9 +2050,9 @@ dependencies = [ [[package]] name = "ethabi" -version = "14.0.0" +version = "14.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c52991643379afc90bfe2df3c64d53983e59c35a82ba6e75c997cfc2880d8524" +checksum = "a01317735d563b3bad2d5f90d2e1799f414165408251abb762510f40e790e69a" dependencies = [ "anyhow", "ethereum-types 0.11.0", @@ -2217,12 +2133,6 @@ dependencies = [ "futures", ] -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - [[package]] name = "fallback" version = "0.1.0" @@ -2259,7 +2169,7 @@ checksum = "72a4d941a5b7c2a75222e2d44fcdf634a67133d9db31e177ae5ff6ecda852bfe" dependencies = [ "bitvec 0.20.4", "rand_core 0.6.3", - "subtle 2.4.0", + "subtle", ] [[package]] @@ -2514,15 +2424,6 @@ dependencies = [ "slab", ] -[[package]] -name = "generic-array" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" -dependencies = [ - "typenum", -] - [[package]] name = "generic-array" version = "0.14.4" @@ -2581,24 +2482,14 @@ dependencies = [ "wasi 0.10.2+wasi-snapshot-preview1", ] -[[package]] -name = "ghash" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" -dependencies = [ - "opaque-debug 0.3.0", - "polyval 0.4.5", -] - [[package]] name = "ghash" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7bbd60caa311237d508927dbba7594b483db3ef05faa55172fcf89b1bcda7853" dependencies = [ - "opaque-debug 0.3.0", - "polyval 0.5.1", + "opaque-debug", + "polyval", ] [[package]] @@ -2656,7 +2547,7 @@ checksum = "61b3c1e8b4f1ca07e6605ea1be903a5f6956aec5c8a67fd44d56076631675ed8" dependencies = [ "ff", "rand_core 0.6.3", - "subtle 2.4.0", + "subtle", ] [[package]] @@ -2778,20 +2669,10 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01706d578d5c281058480e673ae4086a9f4710d8df1ad80a5b03e39ece5f886b" dependencies = [ - "digest 0.9.0", + "digest", "hmac 0.11.0", ] -[[package]] -name = "hmac" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" -dependencies = [ - "crypto-mac 0.7.0", - "digest 0.8.1", -] - [[package]] name = "hmac" version = "0.8.1" @@ -2799,7 +2680,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" dependencies = [ "crypto-mac 0.8.0", - "digest 0.9.0", + "digest", ] [[package]] @@ -2809,7 +2690,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" dependencies = [ "crypto-mac 0.10.0", - "digest 0.9.0", + "digest", ] [[package]] @@ -2819,29 +2700,29 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ "crypto-mac 0.11.0", - "digest 0.9.0", + "digest", ] [[package]] name = "hmac-drbg" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ - "digest 0.8.1", - "generic-array 0.12.4", - "hmac 0.7.1", + "digest", + "generic-array", + "hmac 0.8.1", ] [[package]] -name = "hmac-drbg" -version = "0.3.0" +name = "hostname" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ - "digest 0.9.0", - "generic-array 0.14.4", - "hmac 0.8.1", + "libc", + "match_cfg", + "winapi", ] [[package]] @@ -2872,7 +2753,6 @@ version = "0.1.0" dependencies = [ "beacon_chain", "bs58", - "discv5", "environment", "eth1", "eth2", @@ -2972,9 +2852,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.9" +version = "0.14.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07d6baa1b441335f3ce5098ac421fb6547c46dda735ca1bc6d0153c838f9dd83" +checksum = "7728a72c4c7d72665fde02204bcbd93b247721025b222ef78606f14513e0fd03" dependencies = [ "bytes 1.0.1", "futures-channel", @@ -3047,9 +2927,9 @@ dependencies = [ [[package]] name = "if-watch" -version = "0.1.8" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b8538953a3f0d0d3868f0a706eb4273535e10d72acb5c82c1c23ae48835c85" +checksum = "ae8ab7f67bad3240049cb24fb9cb0b4c2c6af4c245840917fbbdededeee91179" dependencies = [ "async-io", "futures", @@ -3151,9 +3031,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" dependencies = [ "cfg-if 1.0.0", ] @@ -3176,6 +3056,18 @@ dependencies = [ "num-traits", ] +[[package]] +name = "ipconfig" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +dependencies = [ + "socket2 0.3.19", + "widestring", + "winapi", + "winreg 0.6.2", +] + [[package]] name = "ipnet" version = "2.3.1" @@ -3265,7 +3157,7 @@ dependencies = [ "cfg-if 1.0.0", "ecdsa", "elliptic-curve", - "sha2 0.9.5", + "sha2", ] [[package]] @@ -3391,9 +3283,9 @@ checksum = "db13adb97ab515a3691f56e4dbab09283d0b86cb45abd991d8634a9d6f501760" [[package]] name = "libc" -version = "0.2.97" +version = "0.2.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6" +checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" [[package]] name = "libflate" @@ -3433,25 +3325,36 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.35.1" -source = "git+https://github.com/pawanjay176/rust-libp2p?branch=message-id-gossipsub#388181aae3dacc8e6cf58aca2206e0a3dec41a6c" +version = "0.39.1" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "atomic", "bytes 1.0.1", "futures", "lazy_static", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", + "libp2p-deflate", "libp2p-dns", + "libp2p-floodsub", "libp2p-gossipsub", "libp2p-identify", + "libp2p-kad", + "libp2p-mdns", "libp2p-mplex", "libp2p-noise", + "libp2p-ping", + "libp2p-plaintext", + "libp2p-pnet", + "libp2p-relay", + "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-derive", "libp2p-tcp", + "libp2p-uds", + "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", - "parity-multiaddr 0.11.1", + "multiaddr", "parking_lot", "pin-project 1.0.7", "smallvec", @@ -3460,10 +3363,11 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.27.1" -source = "git+https://github.com/pawanjay176/rust-libp2p?branch=message-id-gossipsub#388181aae3dacc8e6cf58aca2206e0a3dec41a6c" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af9b4abdeaa420593a297c8592f63fad4234f4b88dc9343b8fd8e736c35faa59" dependencies = [ - "asn1_der 0.6.3", + "asn1_der", "bs58", "ed25519-dalek", "either", @@ -3471,11 +3375,11 @@ dependencies = [ "futures", "futures-timer", "lazy_static", - "libsecp256k1 0.3.5", + "libsecp256k1", "log", + "multiaddr", "multihash", - "multistream-select 0.10.1", - "parity-multiaddr 0.11.1", + "multistream-select 0.10.2", "parking_lot", "pin-project 1.0.7", "prost", @@ -3483,7 +3387,7 @@ dependencies = [ "rand 0.7.3", "ring", "rw-stream-sink", - "sha2 0.9.5", + "sha2", "smallvec", "thiserror", "unsigned-varint 0.7.0", @@ -3493,11 +3397,10 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.28.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "554d3e7e9e65f939d66b75fd6a4c67f258fe250da61b91f46c545fc4a89b51d9" +version = "0.29.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ - "asn1_der 0.7.4", + "asn1_der", "bs58", "ed25519-dalek", "either", @@ -3505,11 +3408,11 @@ dependencies = [ "futures", "futures-timer", "lazy_static", - "libsecp256k1 0.3.5", + "libsecp256k1", "log", + "multiaddr", "multihash", - "multistream-select 0.10.2", - "parity-multiaddr 0.11.2", + "multistream-select 0.10.3", "parking_lot", "pin-project 1.0.7", "prost", @@ -3517,7 +3420,7 @@ dependencies = [ "rand 0.7.3", "ring", "rw-stream-sink", - "sha2 0.9.5", + "sha2", "smallvec", "thiserror", "unsigned-varint 0.7.0", @@ -3525,20 +3428,50 @@ dependencies = [ "zeroize", ] +[[package]] +name = "libp2p-deflate" +version = "0.29.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" +dependencies = [ + "flate2", + "futures", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", +] + [[package]] name = "libp2p-dns" -version = "0.27.0" -source = "git+https://github.com/pawanjay176/rust-libp2p?branch=message-id-gossipsub#388181aae3dacc8e6cf58aca2206e0a3dec41a6c" +version = "0.29.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" +dependencies = [ + "async-std-resolver", + "futures", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", + "log", + "smallvec", + "trust-dns-resolver", +] + +[[package]] +name = "libp2p-floodsub" +version = "0.30.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ + "cuckoofilter", + "fnv", "futures", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", + "libp2p-swarm", "log", + "prost", + "prost-build", + "rand 0.7.3", + "smallvec", ] [[package]] name = "libp2p-gossipsub" -version = "0.28.0" -source = "git+https://github.com/pawanjay176/rust-libp2p?branch=message-id-gossipsub#388181aae3dacc8e6cf58aca2206e0a3dec41a6c" +version = "0.32.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "asynchronous-codec", "base64 0.13.0", @@ -3547,14 +3480,14 @@ dependencies = [ "fnv", "futures", "hex_fmt", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", "libp2p-swarm", "log", "prost", "prost-build", "rand 0.7.3", "regex", - "sha2 0.9.5", + "sha2", "smallvec", "unsigned-varint 0.7.0", "wasm-timer", @@ -3562,28 +3495,73 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.27.0" -source = "git+https://github.com/pawanjay176/rust-libp2p?branch=message-id-gossipsub#388181aae3dacc8e6cf58aca2206e0a3dec41a6c" +version = "0.30.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" +dependencies = [ + "futures", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", + "libp2p-swarm", + "log", + "prost", + "prost-build", + "smallvec", + "wasm-timer", +] + +[[package]] +name = "libp2p-kad" +version = "0.31.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ + "arrayvec 0.5.2", + "asynchronous-codec", + "bytes 1.0.1", + "either", + "fnv", "futures", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", "libp2p-swarm", "log", "prost", "prost-build", + "rand 0.7.3", + "sha2", "smallvec", + "uint 0.9.1", + "unsigned-varint 0.7.0", + "void", "wasm-timer", ] +[[package]] +name = "libp2p-mdns" +version = "0.31.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" +dependencies = [ + "async-io", + "data-encoding", + "dns-parser", + "futures", + "if-watch", + "lazy_static", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", + "libp2p-swarm", + "log", + "rand 0.8.4", + "smallvec", + "socket2 0.4.0", + "void", +] + [[package]] name = "libp2p-mplex" -version = "0.27.1" -source = "git+https://github.com/pawanjay176/rust-libp2p?branch=message-id-gossipsub#388181aae3dacc8e6cf58aca2206e0a3dec41a6c" +version = "0.29.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "asynchronous-codec", "bytes 1.0.1", "futures", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", "log", "nohash-hasher", "parking_lot", @@ -3594,33 +3572,117 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.29.0" -source = "git+https://github.com/pawanjay176/rust-libp2p?branch=message-id-gossipsub#388181aae3dacc8e6cf58aca2206e0a3dec41a6c" +version = "0.32.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "bytes 1.0.1", "curve25519-dalek", "futures", "lazy_static", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", "log", "prost", "prost-build", - "rand 0.7.3", - "sha2 0.9.5", + "rand 0.8.4", + "sha2", "snow", "static_assertions", "x25519-dalek", "zeroize", ] +[[package]] +name = "libp2p-ping" +version = "0.30.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" +dependencies = [ + "futures", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", + "libp2p-swarm", + "log", + "rand 0.7.3", + "void", + "wasm-timer", +] + +[[package]] +name = "libp2p-plaintext" +version = "0.29.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" +dependencies = [ + "asynchronous-codec", + "bytes 1.0.1", + "futures", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", + "log", + "prost", + "prost-build", + "unsigned-varint 0.7.0", + "void", +] + +[[package]] +name = "libp2p-pnet" +version = "0.21.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" +dependencies = [ + "futures", + "log", + "pin-project 1.0.7", + "rand 0.7.3", + "salsa20 0.8.0", + "sha3", +] + +[[package]] +name = "libp2p-relay" +version = "0.3.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" +dependencies = [ + "asynchronous-codec", + "bytes 1.0.1", + "futures", + "futures-timer", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", + "libp2p-swarm", + "log", + "pin-project 1.0.7", + "prost", + "prost-build", + "rand 0.7.3", + "smallvec", + "unsigned-varint 0.7.0", + "void", + "wasm-timer", +] + +[[package]] +name = "libp2p-request-response" +version = "0.12.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" +dependencies = [ + "async-trait", + "bytes 1.0.1", + "futures", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", + "libp2p-swarm", + "log", + "lru", + "minicbor", + "rand 0.7.3", + "smallvec", + "unsigned-varint 0.7.0", + "wasm-timer", +] + [[package]] name = "libp2p-swarm" -version = "0.27.2" -source = "git+https://github.com/pawanjay176/rust-libp2p?branch=message-id-gossipsub#388181aae3dacc8e6cf58aca2206e0a3dec41a6c" +version = "0.30.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "either", "futures", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", "log", "rand 0.7.3", "smallvec", @@ -3630,8 +3692,8 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.22.0" -source = "git+https://github.com/pawanjay176/rust-libp2p?branch=message-id-gossipsub#388181aae3dacc8e6cf58aca2206e0a3dec41a6c" +version = "0.24.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "quote", "syn", @@ -3639,8 +3701,8 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.27.1" -source = "git+https://github.com/pawanjay176/rust-libp2p?branch=message-id-gossipsub#388181aae3dacc8e6cf58aca2206e0a3dec41a6c" +version = "0.29.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "async-io", "futures", @@ -3649,21 +3711,45 @@ dependencies = [ "if-watch", "ipnet", "libc", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", "log", - "socket2 0.3.19", + "socket2 0.4.0", "tokio 1.8.1", ] +[[package]] +name = "libp2p-uds" +version = "0.29.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" +dependencies = [ + "async-std", + "futures", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", + "log", +] + +[[package]] +name = "libp2p-wasm-ext" +version = "0.29.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" +dependencies = [ + "futures", + "js-sys", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", + "parity-send-wrapper", + "wasm-bindgen", + "wasm-bindgen-futures", +] + [[package]] name = "libp2p-websocket" -version = "0.28.0" -source = "git+https://github.com/pawanjay176/rust-libp2p?branch=message-id-gossipsub#388181aae3dacc8e6cf58aca2206e0a3dec41a6c" +version = "0.30.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", "log", "quicksink", "rw-stream-sink", @@ -3674,32 +3760,16 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.30.1" -source = "git+https://github.com/pawanjay176/rust-libp2p?branch=message-id-gossipsub#388181aae3dacc8e6cf58aca2206e0a3dec41a6c" +version = "0.33.0" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "futures", - "libp2p-core 0.27.1", + "libp2p-core 0.29.0 (git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit)", "parking_lot", "thiserror", "yamux", ] -[[package]] -name = "libsecp256k1" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" -dependencies = [ - "arrayref", - "crunchy", - "digest 0.8.1", - "hmac-drbg 0.2.0", - "rand 0.7.3", - "sha2 0.8.2", - "subtle 2.4.0", - "typenum", -] - [[package]] name = "libsecp256k1" version = "0.5.0" @@ -3708,14 +3778,14 @@ checksum = "bd1137239ab33b41aa9637a88a28249e5e70c40a42ccc92db7f12cc356c1fcd7" dependencies = [ "arrayref", "base64 0.12.3", - "digest 0.9.0", - "hmac-drbg 0.3.0", + "digest", + "hmac-drbg", "libsecp256k1-core", "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", "rand 0.7.3", "serde", - "sha2 0.9.5", + "sha2", "typenum", ] @@ -3726,8 +3796,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ee11012b293ea30093c129173cac4335513064094619f4639a25b310fd33c11" dependencies = [ "crunchy", - "digest 0.9.0", - "subtle 2.4.0", + "digest", + "subtle", ] [[package]] @@ -3901,6 +3971,15 @@ dependencies = [ "hashbrown 0.9.1", ] +[[package]] +name = "lru-cache" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "lru_cache" version = "0.1.0" @@ -3939,6 +4018,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + [[package]] name = "matchers" version = "0.0.1" @@ -4009,6 +4094,26 @@ dependencies = [ "unicase", ] +[[package]] +name = "minicbor" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51aa5bb0ca22415daca596a227b507f880ad1b2318a87fa9325312a5d285ca0d" +dependencies = [ + "minicbor-derive", +] + +[[package]] +name = "minicbor-derive" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f2b9e8883d58e34b18facd16c4564a77ea50fce028ad3d0ee6753440e37acc8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "miniz_oxide" version = "0.4.4" @@ -4062,17 +4167,35 @@ dependencies = [ "tokio 1.8.1", ] +[[package]] +name = "multiaddr" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48ee4ea82141951ac6379f964f71b20876d43712bea8faf6dd1a375e08a46499" +dependencies = [ + "arrayref", + "bs58", + "byteorder", + "data-encoding", + "multihash", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint 0.7.0", + "url", +] + [[package]] name = "multihash" -version = "0.13.2" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dac63698b887d2d929306ea48b63760431ff8a24fac40ddb22f9c7f49fb7cab" +checksum = "752a61cd890ff691b4411423d23816d5866dd5621e4d1c5687a53b94b5a979d8" dependencies = [ - "digest 0.9.0", - "generic-array 0.14.4", + "digest", + "generic-array", "multihash-derive", - "sha2 0.9.5", - "unsigned-varint 0.5.1", + "sha2", + "unsigned-varint 0.7.0", ] [[package]] @@ -4115,8 +4238,9 @@ dependencies = [ [[package]] name = "multistream-select" -version = "0.10.1" -source = "git+https://github.com/pawanjay176/rust-libp2p?branch=message-id-gossipsub#388181aae3dacc8e6cf58aca2206e0a3dec41a6c" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d91ec0a2440aaff5f78ec35631a7027d50386c6163aa975f7caa0d5da4b6ff8" dependencies = [ "bytes 1.0.1", "futures", @@ -4128,9 +4252,8 @@ dependencies = [ [[package]] name = "multistream-select" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d91ec0a2440aaff5f78ec35631a7027d50386c6163aa975f7caa0d5da4b6ff8" +version = "0.10.3" +source = "git+https://github.com/pawanjay176/rust-libp2p?branch=total-connection-limit#75fd53ec5407a58ae1ff600fd1c68ea49079364a" dependencies = [ "bytes 1.0.1", "futures", @@ -4163,7 +4286,6 @@ name = "network" version = "0.2.0" dependencies = [ "beacon_chain", - "discv5", "environment", "error-chain", "eth2_libp2p", @@ -4371,12 +4493,6 @@ version = "11.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - [[package]] name = "opaque-debug" version = "0.3.0" @@ -4449,41 +4565,6 @@ dependencies = [ "types", ] -[[package]] -name = "parity-multiaddr" -version = "0.11.1" -source = "git+https://github.com/pawanjay176/rust-libp2p?branch=message-id-gossipsub#388181aae3dacc8e6cf58aca2206e0a3dec41a6c" -dependencies = [ - "arrayref", - "bs58", - "byteorder", - "data-encoding", - "multihash", - "percent-encoding", - "serde", - "static_assertions", - "unsigned-varint 0.7.0", - "url", -] - -[[package]] -name = "parity-multiaddr" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58341485071825827b7f03cf7efd1cb21e6a709bea778fb50227fd45d2f361b4" -dependencies = [ - "arrayref", - "bs58", - "byteorder", - "data-encoding", - "multihash", - "percent-encoding", - "serde", - "static_assertions", - "unsigned-varint 0.7.0", - "url", -] - [[package]] name = "parity-scale-codec" version = "1.3.7" @@ -4522,6 +4603,12 @@ dependencies = [ "syn", ] +[[package]] +name = "parity-send-wrapper" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" + [[package]] name = "parking" version = "2.0.0" @@ -4706,9 +4793,9 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd8be10f7485c8a323ea100b20d6052c27cf5968f08f8e3a56ee9f0cf38ebd3d" +checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c" [[package]] name = "plotters-svg" @@ -4734,22 +4821,12 @@ dependencies = [ [[package]] name = "poly1305" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7456bc1ad2d4cf82b3a016be4c2ac48daf11bf990c1603ebd447fe6f30fca8" -dependencies = [ - "cpuid-bool", - "universal-hash", -] - -[[package]] -name = "polyval" -version = "0.4.5" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" +checksum = "4fe800695325da85083cd23b56826fccb2e2dc29b218e7811a6f33bc93f414be" dependencies = [ - "cpuid-bool", - "opaque-debug 0.3.0", + "cpufeatures", + "opaque-debug", "universal-hash", ] @@ -4761,7 +4838,7 @@ checksum = "e597450cbf209787f0e6de80bf3795c6b2356a380ee87837b545aded8dbc1823" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "opaque-debug 0.3.0", + "opaque-debug", "universal-hash", ] @@ -4887,9 +4964,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2" +checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" dependencies = [ "bytes 1.0.1", "prost-derive", @@ -4897,13 +4974,13 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d3ebd75ac2679c2af3a92246639f9fcc8a442ee420719cc4fe195b98dd5fa3" +checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603" dependencies = [ "bytes 1.0.1", "heck", - "itertools 0.9.0", + "itertools 0.10.1", "log", "multimap", "petgraph", @@ -4915,12 +4992,12 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4" +checksum = "600d2f334aa05acb02a755e217ef1ab6dea4d51b58b7846588b747edec04efba" dependencies = [ "anyhow", - "itertools 0.9.0", + "itertools 0.10.1", "proc-macro2", "quote", "syn", @@ -4928,9 +5005,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb" +checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" dependencies = [ "bytes 1.0.1", "prost", @@ -5355,7 +5432,17 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg", + "winreg 0.7.0", +] + +[[package]] +name = "resolv-conf" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +dependencies = [ + "hostname", + "quick-error", ] [[package]] @@ -5459,6 +5546,15 @@ dependencies = [ "semver 0.11.0", ] +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver 1.0.3", +] + [[package]] name = "rustls" version = "0.19.1" @@ -5514,6 +5610,15 @@ dependencies = [ "cipher 0.2.5", ] +[[package]] +name = "salsa20" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c7c5f10864beba947e1a1b43f3ef46c8cc58d1c2ae549fa471713e8ff60787a" +dependencies = [ + "cipher 0.3.0", +] + [[package]] name = "same-file" version = "1.0.6" @@ -5562,8 +5667,8 @@ checksum = "8da492dab03f925d977776a0b7233d7b934d6dc2b94faead48928e2e9bacedb9" dependencies = [ "hmac 0.10.1", "pbkdf2 0.6.0", - "salsa20", - "sha2 0.9.5", + "salsa20 0.7.2", + "sha2", ] [[package]] @@ -5635,6 +5740,12 @@ dependencies = [ "semver-parser 0.10.2", ] +[[package]] +name = "semver" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f3aac57ee7f3272d8395c6e4f502f434f0e289fcd62876f70daa008c20dcabe" + [[package]] name = "semver-parser" version = "0.7.0" @@ -5760,11 +5871,11 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c4cfa741c5832d0ef7fab46cabed29c2aae926db0b11bb2069edd8db5e64e16" dependencies = [ - "block-buffer 0.9.0", + "block-buffer", "cfg-if 1.0.0", "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", + "digest", + "opaque-debug", ] [[package]] @@ -5773,29 +5884,17 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" -[[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - [[package]] name = "sha2" version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b362ae5752fd2137731f9fa25fd4d9058af34666ca1966fb969119cc35719f12" dependencies = [ - "block-buffer 0.9.0", + "block-buffer", "cfg-if 1.0.0", "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", + "digest", + "opaque-debug", ] [[package]] @@ -5804,10 +5903,10 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", + "block-buffer", + "digest", "keccak", - "opaque-debug 0.3.0", + "opaque-debug", ] [[package]] @@ -5844,7 +5943,7 @@ version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c19772be3c4dd2ceaacf03cb41d5885f2a02c4d8804884918e3a258480803335" dependencies = [ - "digest 0.9.0", + "digest", "rand_core 0.6.3", ] @@ -5913,7 +6012,7 @@ dependencies = [ "serde", "serde_derive", "slog", - "sloggers 2.0.0", + "sloggers 2.0.1", "tempfile", "tree_hash", "tree_hash_derive", @@ -6054,9 +6153,9 @@ dependencies = [ [[package]] name = "sloggers" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92ee08a52260ed355f96069884bf8873f2439231f8754cbd545291d647ebbd75" +checksum = "7071b1119e436e93157c2e9e134138d9d8716dfe5e2f472500119bcbe4f45a4e" dependencies = [ "chrono", "libc", @@ -6108,19 +6207,19 @@ checksum = "45456094d1983e2ee2a18fdfebce3189fa451699d0502cb8e3b49dba5ba41451" [[package]] name = "snow" -version = "0.7.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "795dd7aeeee24468e5a32661f6d27f7b5cbed802031b2d7640c7b10f8fb2dd50" +checksum = "6142f7c25e94f6fd25a32c3348ec230df9109b463f59c8c7acc4bd34936babb7" dependencies = [ - "aes-gcm 0.7.0", + "aes-gcm", "blake2", "chacha20poly1305", - "rand 0.7.3", - "rand_core 0.5.1", + "rand 0.8.4", + "rand_core 0.6.3", "ring", - "rustc_version 0.2.3", - "sha2 0.9.5", - "subtle 2.4.0", + "rustc_version 0.3.3", + "sha2", + "subtle", "x25519-dalek", ] @@ -6304,16 +6403,6 @@ dependencies = [ "types", ] -[[package]] -name = "stream-cipher" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c80e15f898d8d8f25db24c253ea615cc14acf418ff307822995814e7d42cfa89" -dependencies = [ - "block-cipher", - "generic-array 0.14.4", -] - [[package]] name = "string_cache" version = "0.8.1" @@ -6361,15 +6450,9 @@ dependencies = [ [[package]] name = "subtle" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" - -[[package]] -name = "subtle" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "superstruct" @@ -6406,9 +6489,9 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +checksum = "474aaa926faa1603c40b7885a9eaea29b444d1cb2850cb7c0e37bb1a4182f4fa" dependencies = [ "proc-macro2", "quote", @@ -6506,18 +6589,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa6f76457f59514c7eeb4e59d891395fab0b2fd1d40723ae737d64153392e9c6" +checksum = "93119e4feac1cbe6c798c34d3a53ea0026b0b1de6a120deef895137c0529bfe2" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a36768c0fbf1bb15eca10defa29526bda730a2376c2ab4393ccfa16fb1a318d" +checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745" dependencies = [ "proc-macro2", "quote", @@ -6616,7 +6699,7 @@ dependencies = [ "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2 0.9.5", + "sha2", "thiserror", "unicode-normalization", "zeroize", @@ -6723,9 +6806,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c49e3df43841dafb86046472506755d8501c5615673955f6aa17181125d13c37" +checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" dependencies = [ "proc-macro2", "quote", @@ -6744,9 +6827,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8864d706fdb3cc0843a49647ac892720dac98a6eeb818b77190592cf4994066" +checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" dependencies = [ "futures-core", "pin-project-lite 0.2.7", @@ -6925,6 +7008,51 @@ dependencies = [ "syn", ] +[[package]] +name = "trust-dns-proto" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0d7f5db438199a6e2609debe3f69f808d074e0a2888ee0bccb45fe234d03f4" +dependencies = [ + "async-trait", + "cfg-if 1.0.0", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "lazy_static", + "log", + "rand 0.8.4", + "smallvec", + "thiserror", + "tinyvec", + "tokio 1.8.1", + "url", +] + +[[package]] +name = "trust-dns-resolver" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ad17b608a64bd0735e67bde16b0636f8aa8591f831a25d18443ed00a699770" +dependencies = [ + "cfg-if 1.0.0", + "futures-util", + "ipconfig", + "lazy_static", + "log", + "lru-cache", + "parking_lot", + "resolv-conf", + "smallvec", + "thiserror", + "tokio 1.8.1", + "trust-dns-proto", +] + [[package]] name = "try-lock" version = "0.2.3" @@ -7076,9 +7204,9 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" +checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" [[package]] name = "unicode-width" @@ -7098,16 +7226,10 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", + "generic-array", + "subtle", ] -[[package]] -name = "unsigned-varint" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" - [[package]] name = "unsigned-varint" version = "0.6.0" @@ -7126,6 +7248,8 @@ checksum = "5f8d425fafb8cd76bc3f22aace4af471d3156301d7508f2107e98fbeae10bc7f" dependencies = [ "asynchronous-codec", "bytes 1.0.1", + "futures-io", + "futures-util", ] [[package]] @@ -7189,7 +7313,7 @@ dependencies = [ "hyper", "lazy_static", "libc", - "libsecp256k1 0.5.0", + "libsecp256k1", "lighthouse_metrics", "lighthouse_version", "lockfile", @@ -7486,7 +7610,7 @@ dependencies = [ "base64 0.13.0", "bytes 1.0.1", "derive_more", - "ethabi 14.0.0", + "ethabi 14.1.0", "ethereum-types 0.11.0", "futures", "futures-timer", @@ -7608,6 +7732,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "winreg" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +dependencies = [ + "winapi", +] + [[package]] name = "winreg" version = "0.7.0" @@ -7660,15 +7793,15 @@ dependencies = [ [[package]] name = "yamux" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cc7bd8c983209ed5d527f44b01c41b7dc146fd960c61cf9e1d25399841dc271" +checksum = "e7d9028f208dd5e63c614be69f115c1b53cacc1111437d4c765185856666c107" dependencies = [ "futures", "log", "nohash-hasher", "parking_lot", - "rand 0.7.3", + "rand 0.8.4", "static_assertions", ] diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 4986151c038..ceb8cb8abb9 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -27,7 +27,7 @@ eth2_wallet = { path = "../crypto/eth2_wallet" } eth2_wallet_manager = { path = "../common/eth2_wallet_manager" } rand = "0.7.3" validator_dir = { path = "../common/validator_dir" } -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } eth2_keystore = { path = "../crypto/eth2_keystore" } account_utils = { path = "../common/account_utils" } slashing_protection = { path = "../validator_client/slashing_protection" } diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index f170ee86f51..2b6feec5aad 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -26,7 +26,7 @@ slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_tr slog-term = "2.6.0" slog-async = "2.5.0" ctrlc = { version = "3.1.6", features = ["termination"] } -tokio = { version = "1.1.0", features = ["time"] } +tokio = { version = "1.7.1", features = ["time"] } exit-future = "0.2.0" dirs = "3.0.1" logging = { path = "../common/logging" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index a7cbc2061fb..21f6711960c 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -40,7 +40,7 @@ eth2_ssz_derive = "0.1.0" state_processing = { path = "../../consensus/state_processing" } tree_hash = "0.1.1" types = { path = "../../consensus/types" } -tokio = "1.1.0" +tokio = "1.7.1" eth1 = { path = "../eth1" } futures = "0.3.7" genesis = { path = "../genesis" } diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 3e67624461d..b9aa725ad74 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -26,7 +26,7 @@ error-chain = "0.12.4" serde_yaml = "0.8.13" slog = { version = "2.5.2", features = ["max_level_trace"] } slog-async = "2.5.0" -tokio = "1.1.0" +tokio = "1.7.1" dirs = "3.0.1" futures = "0.3.7" reqwest = { version = "0.11.0", features = ["native-tls-vendored"] } diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 8b80eee1098..730af0bf503 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -26,7 +26,7 @@ tree_hash = "0.1.1" eth2_hashing = "0.1.0" parking_lot = "0.11.0" slog = "2.5.2" -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } state_processing = { path = "../../consensus/state_processing" } libflate = "1.0.2" lighthouse_metrics = { path = "../../common/lighthouse_metrics"} diff --git a/beacon_node/eth2_libp2p/Cargo.toml b/beacon_node/eth2_libp2p/Cargo.toml index df570f647d3..344d93a6728 100644 --- a/beacon_node/eth2_libp2p/Cargo.toml +++ b/beacon_node/eth2_libp2p/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2018" [dependencies] -discv5 = { version = "0.1.0-beta.5", features = ["libp2p"] } +discv5 = { version = "0.1.0-beta.7", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } hashset_delay = { path = "../../common/hashset_delay" } @@ -16,7 +16,7 @@ eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" slog = { version = "2.5.2", features = ["max_level_trace"] } lighthouse_version = { path = "../../common/lighthouse_version" } -tokio = { version = "1.1.0", features = ["time", "macros"] } +tokio = { version = "1.7.1", features = ["time", "macros"] } futures = "0.3.7" futures-io = "0.3.7" error-chain = "0.12.4" @@ -43,15 +43,15 @@ strum = { version = "0.20", features = ["derive"] } superstruct = "0.2.0" [dependencies.libp2p] -# version = "0.35.1" +#version = "0.39.1" +#default-features = false # TODO: Update once https://github.com/libp2p/rust-libp2p/pull/2103 is merged. git = "https://github.com/pawanjay176/rust-libp2p" -branch = "message-id-gossipsub" -default-features = false -features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp-tokio"] +branch = "total-connection-limit" +features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio"] [dev-dependencies] -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } slog-term = "2.6.0" slog-async = "2.5.0" tempfile = "3.1.0" diff --git a/beacon_node/eth2_libp2p/src/behaviour/gossipsub_scoring_parameters.rs b/beacon_node/eth2_libp2p/src/behaviour/gossipsub_scoring_parameters.rs index 80313938c1d..71a3953ece1 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/gossipsub_scoring_parameters.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/gossipsub_scoring_parameters.rs @@ -17,6 +17,23 @@ const VOLUNTARY_EXIT_WEIGHT: f64 = 0.05; const PROPOSER_SLASHING_WEIGHT: f64 = 0.05; const ATTESTER_SLASHING_WEIGHT: f64 = 0.05; +/// The time window (seconds) that we expect messages to be forwarded to us in the mesh. +const MESH_MESSAGE_DELIVERIES_WINDOW: u64 = 2; + +// Const as this is used in the peer manager to prevent gossip from disconnecting peers. +pub const GREYLIST_THRESHOLD: f64 = -16000.0; + +/// Builds the peer score thresholds. +pub fn lighthouse_gossip_thresholds() -> PeerScoreThresholds { + PeerScoreThresholds { + gossip_threshold: -4000.0, + publish_threshold: -8000.0, + graylist_threshold: GREYLIST_THRESHOLD, + accept_px_threshold: 100.0, + opportunistic_graft_threshold: 5.0, + } +} + pub struct PeerScoreSettings { slot: Duration, epoch: Duration, @@ -75,7 +92,7 @@ impl PeerScoreSettings { decay_to_zero: self.decay_to_zero, retain_score: self.epoch * 100, app_specific_weight: 1.0, - ip_colocation_factor_threshold: 3.0, + ip_colocation_factor_threshold: 8.0, // Allow up to 8 nodes per IP behaviour_penalty_threshold: 6.0, behaviour_penalty_decay: self.score_parameter_decay(self.epoch * 10), ..Default::default() @@ -313,10 +330,10 @@ impl PeerScoreSettings { cap_factor * t_params.mesh_message_deliveries_threshold }; t_params.mesh_message_deliveries_activation = activation_window; - t_params.mesh_message_deliveries_window = Duration::from_secs(2); + t_params.mesh_message_deliveries_window = + Duration::from_secs(MESH_MESSAGE_DELIVERIES_WINDOW); t_params.mesh_failure_penalty_decay = t_params.mesh_message_deliveries_decay; - t_params.mesh_message_deliveries_weight = -self.max_positive_score - / (t_params.topic_weight * t_params.mesh_message_deliveries_threshold.powi(2)); + t_params.mesh_message_deliveries_weight = -t_params.topic_weight; t_params.mesh_failure_penalty_weight = t_params.mesh_message_deliveries_weight; if decay_slots >= current_slot.as_u64() { t_params.mesh_message_deliveries_threshold = 0.0; diff --git a/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs b/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs deleted file mode 100644 index f849114f311..00000000000 --- a/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs +++ /dev/null @@ -1,368 +0,0 @@ -use crate::behaviour::Gossipsub; -use crate::rpc::*; -use libp2p::{ - core::either::{EitherError, EitherOutput}, - core::upgrade::{EitherUpgrade, InboundUpgrade, OutboundUpgrade, SelectUpgrade, UpgradeError}, - identify::Identify, - swarm::{ - protocols_handler::{ - KeepAlive, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol, - }, - NegotiatedSubstream, NetworkBehaviour, ProtocolsHandler, - }, -}; -use std::task::{Context, Poll}; -use types::EthSpec; - -/* Auxiliary types for simplicity */ -type GossipHandler = ::ProtocolsHandler; -type RPCHandler = as NetworkBehaviour>::ProtocolsHandler; -type IdentifyHandler = ::ProtocolsHandler; - -/// Handler that combines Lighthouse's Behaviours' handlers in a delegating manner. -pub(super) struct DelegatingHandler { - /// Handler for the Gossipsub protocol. - gossip_handler: GossipHandler, - /// Handler for the RPC protocol. - rpc_handler: RPCHandler, - /// Handler for the Identify protocol. - identify_handler: IdentifyHandler, -} - -impl DelegatingHandler { - pub fn new(gossipsub: &mut Gossipsub, rpc: &mut RPC, identify: &mut Identify) -> Self { - DelegatingHandler { - gossip_handler: gossipsub.new_handler(), - rpc_handler: rpc.new_handler(), - identify_handler: identify.new_handler(), - } - } - - /// Gives mutable access to the rpc handler. - pub fn rpc_mut(&mut self) -> &mut RPCHandler { - &mut self.rpc_handler - } - - /// Gives access to the rpc handler. - pub fn rpc(&self) -> &RPCHandler { - &self.rpc_handler - } - - /// Gives access to identify's handler. - pub fn _identify(&self) -> &IdentifyHandler { - &self.identify_handler - } -} - -/// Wrapper around the `ProtocolsHandler::InEvent` types of the handlers. -/// Simply delegated to the corresponding behaviour's handler. -#[derive(Debug, Clone)] -pub enum DelegateIn { - Gossipsub(::InEvent), - RPC( as ProtocolsHandler>::InEvent), - Identify(::InEvent), -} - -/// Wrapper around the `ProtocolsHandler::OutEvent` types of the handlers. -/// Simply delegated to the corresponding behaviour's handler. -pub enum DelegateOut { - Gossipsub(::OutEvent), - RPC( as ProtocolsHandler>::OutEvent), - Identify(Box<::OutEvent>), -} - -/// Wrapper around the `ProtocolsHandler::Error` types of the handlers. -/// Simply delegated to the corresponding behaviour's handler. -#[derive(Debug)] -pub enum DelegateError { - Gossipsub(::Error), - RPC( as ProtocolsHandler>::Error), - Identify(::Error), - Disconnected, -} - -impl std::error::Error for DelegateError {} - -impl std::fmt::Display for DelegateError { - fn fmt( - &self, - formater: &mut std::fmt::Formatter<'_>, - ) -> std::result::Result<(), std::fmt::Error> { - match self { - DelegateError::Gossipsub(err) => err.fmt(formater), - DelegateError::RPC(err) => err.fmt(formater), - DelegateError::Identify(err) => err.fmt(formater), - DelegateError::Disconnected => write!(formater, "Disconnected"), - } - } -} - -pub type DelegateInProto = SelectUpgrade< - ::InboundProtocol, - SelectUpgrade< - as ProtocolsHandler>::InboundProtocol, - ::InboundProtocol, - >, ->; - -pub type DelegateOutProto = EitherUpgrade< - ::OutboundProtocol, - EitherUpgrade< - as ProtocolsHandler>::OutboundProtocol, - ::OutboundProtocol, - >, ->; - -pub type DelegateOutInfo = EitherOutput< - ::OutboundOpenInfo, - EitherOutput< - as ProtocolsHandler>::OutboundOpenInfo, - ::OutboundOpenInfo, - >, ->; - -impl ProtocolsHandler for DelegatingHandler { - type InEvent = DelegateIn; - type OutEvent = DelegateOut; - type Error = DelegateError; - type InboundProtocol = DelegateInProto; - type OutboundProtocol = DelegateOutProto; - type OutboundOpenInfo = DelegateOutInfo; - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - let gossip_proto = self.gossip_handler.listen_protocol(); - let rpc_proto = self.rpc_handler.listen_protocol(); - let identify_proto = self.identify_handler.listen_protocol(); - - let timeout = *gossip_proto - .timeout() - .max(rpc_proto.timeout()) - .max(identify_proto.timeout()); - - let select = SelectUpgrade::new( - gossip_proto.into_upgrade().1, - SelectUpgrade::new(rpc_proto.into_upgrade().1, identify_proto.into_upgrade().1), - ); - - SubstreamProtocol::new(select, ()).with_timeout(timeout) - } - - fn inject_fully_negotiated_inbound( - &mut self, - out: >::Output, - _info: Self::InboundOpenInfo, - ) { - match out { - // Gossipsub - EitherOutput::First(out) => { - self.gossip_handler.inject_fully_negotiated_inbound(out, ()) - } - // RPC - EitherOutput::Second(EitherOutput::First(out)) => { - self.rpc_handler.inject_fully_negotiated_inbound(out, ()) - } - // Identify - EitherOutput::Second(EitherOutput::Second(out)) => self - .identify_handler - .inject_fully_negotiated_inbound(out, ()), - } - } - - fn inject_fully_negotiated_outbound( - &mut self, - protocol: >::Output, - info: Self::OutboundOpenInfo, - ) { - match (protocol, info) { - // Gossipsub - (EitherOutput::First(protocol), EitherOutput::First(info)) => self - .gossip_handler - .inject_fully_negotiated_outbound(protocol, info), - // RPC - ( - EitherOutput::Second(EitherOutput::First(protocol)), - EitherOutput::Second(EitherOutput::First(info)), - ) => self - .rpc_handler - .inject_fully_negotiated_outbound(protocol, info), - // Identify - ( - EitherOutput::Second(EitherOutput::Second(protocol)), - EitherOutput::Second(EitherOutput::Second(())), - ) => self - .identify_handler - .inject_fully_negotiated_outbound(protocol, ()), - // Reaching here means we got a protocol and info for different behaviours - _ => unreachable!("output and protocol don't match"), - } - } - - fn inject_event(&mut self, event: Self::InEvent) { - match event { - DelegateIn::Gossipsub(ev) => self.gossip_handler.inject_event(ev), - DelegateIn::RPC(ev) => self.rpc_handler.inject_event(ev), - DelegateIn::Identify(()) => self.identify_handler.inject_event(()), - } - } - - fn inject_dial_upgrade_error( - &mut self, - info: Self::OutboundOpenInfo, - error: ProtocolsHandlerUpgrErr< - >::Error, - >, - ) { - match info { - // Gossipsub - EitherOutput::First(info) => match error { - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => { - self.gossip_handler.inject_dial_upgrade_error( - info, - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), - ) - } - ProtocolsHandlerUpgrErr::Timer => self - .gossip_handler - .inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timer), - ProtocolsHandlerUpgrErr::Timeout => self - .gossip_handler - .inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timeout), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::A(err))) => { - self.gossip_handler.inject_dial_upgrade_error( - info, - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)), - ) - } - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => { - unreachable!("info and error don't match") - } - }, - // RPC - EitherOutput::Second(EitherOutput::First(info)) => match error { - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => { - self.rpc_handler.inject_dial_upgrade_error( - info, - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), - ) - } - ProtocolsHandlerUpgrErr::Timer => self - .rpc_handler - .inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timer), - ProtocolsHandlerUpgrErr::Timeout => self - .rpc_handler - .inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timeout), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B( - EitherError::A(err), - ))) => self.rpc_handler.inject_dial_upgrade_error( - info, - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)), - ), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => { - unreachable!("info and error don't match") - } - }, - // Identify - EitherOutput::Second(EitherOutput::Second(())) => match error { - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => { - self.identify_handler.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), - ) - } - ProtocolsHandlerUpgrErr::Timer => self - .identify_handler - .inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timer), - ProtocolsHandlerUpgrErr::Timeout => self - .identify_handler - .inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B( - EitherError::B(err), - ))) => self.identify_handler.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)), - ), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => { - unreachable!("info and error don't match") - } - }, - } - } - - fn connection_keep_alive(&self) -> KeepAlive { - self.gossip_handler - .connection_keep_alive() - .max(self.rpc_handler.connection_keep_alive()) - .max(self.identify_handler.connection_keep_alive()) - } - - #[allow(clippy::type_complexity)] - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, - > { - match self.gossip_handler.poll(cx) { - Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::Gossipsub(event))); - } - Poll::Ready(ProtocolsHandlerEvent::Close(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Gossipsub( - event, - ))); - } - Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => { - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol - .map_upgrade(EitherUpgrade::A) - .map_info(EitherOutput::First), - }); - } - Poll::Pending => (), - }; - - match self.rpc_handler.poll(cx) { - Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::RPC(event))); - } - Poll::Ready(ProtocolsHandlerEvent::Close(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::RPC(event))); - } - Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => { - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol - .map_upgrade(|u| EitherUpgrade::B(EitherUpgrade::A(u))) - .map_info(|info| EitherOutput::Second(EitherOutput::First(info))), - }); - } - Poll::Pending => (), - }; - - match self.identify_handler.poll(cx) { - Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::Identify( - Box::new(event), - ))); - } - Poll::Ready(ProtocolsHandlerEvent::Close(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Identify(event))); - } - Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => { - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol - .map_upgrade(|u| EitherUpgrade::B(EitherUpgrade::B(u))) - .map_info(|_| EitherOutput::Second(EitherOutput::Second(()))), - }); - } - Poll::Pending => (), - }; - - Poll::Pending - } -} diff --git a/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs b/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs deleted file mode 100644 index d587ea6549a..00000000000 --- a/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs +++ /dev/null @@ -1,132 +0,0 @@ -use crate::behaviour::Gossipsub; -use crate::rpc::*; -use delegate::DelegatingHandler; -pub(super) use delegate::{ - DelegateError, DelegateIn, DelegateInProto, DelegateOut, DelegateOutInfo, DelegateOutProto, -}; -use libp2p::{ - core::upgrade::{InboundUpgrade, OutboundUpgrade}, - identify::Identify, - swarm::protocols_handler::{ - KeepAlive, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol, - }, - swarm::{NegotiatedSubstream, ProtocolsHandler}, -}; -use std::task::{Context, Poll}; -use types::EthSpec; - -mod delegate; - -/// Handler that combines Lighthouse's Behaviours' handlers in a delegating manner. -pub struct BehaviourHandler { - /// Handler combining all sub behaviour's handlers. - delegate: DelegatingHandler, - /// Flag indicating if the handler is shutting down. - shutting_down: bool, -} - -impl BehaviourHandler { - pub fn new(gossipsub: &mut Gossipsub, rpc: &mut RPC, identify: &mut Identify) -> Self { - BehaviourHandler { - delegate: DelegatingHandler::new(gossipsub, rpc, identify), - shutting_down: false, - } - } -} - -#[derive(Clone)] -pub enum BehaviourHandlerIn { - Delegate(DelegateIn), - /// Start the shutdown process. - Shutdown(Option<(RequestId, OutboundRequest)>), -} - -impl ProtocolsHandler for BehaviourHandler { - type InEvent = BehaviourHandlerIn; - type OutEvent = DelegateOut; - type Error = DelegateError; - type InboundProtocol = DelegateInProto; - type OutboundProtocol = DelegateOutProto; - type OutboundOpenInfo = DelegateOutInfo; - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - self.delegate.listen_protocol() - } - - fn inject_fully_negotiated_inbound( - &mut self, - out: >::Output, - _info: Self::InboundOpenInfo, - ) { - self.delegate.inject_fully_negotiated_inbound(out, ()) - } - - fn inject_fully_negotiated_outbound( - &mut self, - out: >::Output, - info: Self::OutboundOpenInfo, - ) { - self.delegate.inject_fully_negotiated_outbound(out, info) - } - - fn inject_event(&mut self, event: Self::InEvent) { - match event { - BehaviourHandlerIn::Delegate(delegated_ev) => self.delegate.inject_event(delegated_ev), - /* Events coming from the behaviour */ - BehaviourHandlerIn::Shutdown(last_message) => { - self.shutting_down = true; - self.delegate.rpc_mut().shutdown(last_message); - } - } - } - - fn inject_dial_upgrade_error( - &mut self, - info: Self::OutboundOpenInfo, - err: ProtocolsHandlerUpgrErr< - >::Error, - >, - ) { - self.delegate.inject_dial_upgrade_error(info, err) - } - - // We don't use the keep alive to disconnect. This is handled in the poll - fn connection_keep_alive(&self) -> KeepAlive { - KeepAlive::Yes - } - - #[allow(clippy::type_complexity)] - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, - > { - // Disconnect if the sub-handlers are ready. - // Currently we only respect the RPC handler. - if self.shutting_down && KeepAlive::No == self.delegate.rpc().connection_keep_alive() { - return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Disconnected)); - } - - match self.delegate.poll(cx) { - Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Custom(event)) - } - Poll::Ready(ProtocolsHandlerEvent::Close(err)) => { - return Poll::Ready(ProtocolsHandlerEvent::Close(err)) - } - Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => { - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }); - } - Poll::Pending => (), - } - - Poll::Pending - } -} diff --git a/beacon_node/eth2_libp2p/src/behaviour/mod.rs b/beacon_node/eth2_libp2p/src/behaviour/mod.rs index 8e1f036a7b5..88d7e294d8c 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/mod.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/mod.rs @@ -1,38 +1,35 @@ +use crate::behaviour::gossipsub_scoring_parameters::{ + lighthouse_gossip_thresholds, PeerScoreSettings, +}; +use crate::config::gossipsub_config; +use crate::discovery::{subnet_predicate, Discovery, DiscoveryEvent, TARGET_SUBNET_PEERS}; +use crate::peer_manager::{ + score::ReportSource, ConnectionDirection, PeerManager, PeerManagerEvent, +}; use crate::rpc::*; use crate::service::METADATA_FILENAME; use crate::types::{ - subnet_id_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, + subnet_id_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, }; use crate::Eth2Enr; -use crate::{behaviour::gossipsub_scoring_parameters::PeerScoreSettings, Subnet}; -use crate::{ - config::gossipsub_config, - peer_manager::{ - score::{PeerAction, ReportSource}, - ConnectionDirection, PeerManager, PeerManagerEvent, - }, -}; use crate::{error, metrics, Enr, NetworkConfig, NetworkGlobals, PubsubMessage, TopicHash}; use futures::prelude::*; -use handler::{BehaviourHandler, BehaviourHandlerIn, DelegateIn, DelegateOut}; use libp2p::{ core::{ - connection::{ConnectedPoint, ConnectionId, ListenerId}, - identity::Keypair, - Multiaddr, + connection::ConnectionId, identity::Keypair, multiaddr::Protocol as MProtocol, Multiaddr, }, gossipsub::{ subscription_filter::{MaxCountSubscriptionFilter, WhitelistSubscriptionFilter}, Gossipsub as BaseGossipsub, GossipsubEvent, IdentTopic as Topic, MessageAcceptance, - MessageAuthenticity, MessageId, PeerScoreThresholds, + MessageAuthenticity, MessageId, }, - identify::{Identify, IdentifyEvent}, + identify::{Identify, IdentifyConfig, IdentifyEvent}, swarm::{ - AddressScore, NetworkBehaviour, NetworkBehaviourAction as NBAction, NotifyHandler, - PollParameters, ProtocolsHandler, + AddressScore, DialPeerCondition, NetworkBehaviourAction as NBAction, + NetworkBehaviourEventProcess, PollParameters, }, - PeerId, + NetworkBehaviour, PeerId, }; use slog::{crit, debug, o, trace, warn}; use ssz::Encode; @@ -50,11 +47,9 @@ use types::{ ChainSpec, EnrForkId, EthSpec, ForkContext, SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, }; -mod gossipsub_scoring_parameters; -mod handler; +pub mod gossipsub_scoring_parameters; const MAX_IDENTIFY_ADDRESSES: usize = 10; -pub const GOSSIPSUB_GREYLIST_THRESHOLD: f64 = -16000.0; /// Identifier of requests sent by a peer. pub type PeerRequestId = (ConnectionId, SubstreamId); @@ -66,11 +61,15 @@ pub type Gossipsub = BaseGossipsub; #[derive(Debug)] pub enum BehaviourEvent { /// We have successfully dialed and connected to a peer. - PeerDialed(PeerId), + PeerConnectedOutgoing(PeerId), /// A peer has successfully dialed and connected to us. - PeerConnected(PeerId), + PeerConnectedIncoming(PeerId), /// A peer has disconnected. PeerDisconnected(PeerId), + /// The peer needs to be banned. + PeerBanned(PeerId), + /// The peer has been unbanned. + PeerUnbanned(PeerId), /// An RPC Request that was sent failed. RPCFailed { /// The id of the failed request. @@ -108,49 +107,77 @@ pub enum BehaviourEvent { StatusPeer(PeerId), } +/// Internal type to pass messages from sub-behaviours to the poll of the global behaviour to be +/// specified as an NBAction. +enum InternalBehaviourMessage { + /// Dial a Peer. + DialPeer(PeerId), + /// The socket has been updated. + SocketUpdated(Multiaddr), +} + /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. +#[derive(NetworkBehaviour)] +#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")] pub struct Behaviour { + /* Sub-Behaviours */ /// The routing pub-sub mechanism for eth2. gossipsub: Gossipsub, /// The Eth2 RPC specified in the wire-0 protocol. eth2_rpc: RPC, + /// Discv5 Discovery protocol. + discovery: Discovery, /// Keep regular connection to peers and disconnect if absent. // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. /// Provides IP addresses and peer information. identify: Identify, + + /* Auxiliary Fields */ /// The peer manager that keeps track of peer's reputation and status. + #[behaviour(ignore)] peer_manager: PeerManager, /// The output events generated by this behaviour to be consumed in the swarm poll. + #[behaviour(ignore)] events: VecDeque>, - /// Queue of peers to disconnect and an optional reason for the disconnection. - peers_to_dc: VecDeque<(PeerId, Option)>, + /// Internal behaviour events, the NBAction type is composed of sub-behaviours, so we use a + /// custom type here to avoid having to specify the concrete type. + #[behaviour(ignore)] + internal_events: VecDeque, /// A collections of variables accessible outside the network service. + #[behaviour(ignore)] network_globals: Arc>, /// Keeps track of the current EnrForkId for upgrading gossipsub topics. // NOTE: This can be accessed via the network_globals ENR. However we keep it here for quick // lookups for every gossipsub message send. + #[behaviour(ignore)] enr_fork_id: EnrForkId, - /// The waker for the current thread. + /// The waker for the current task. This is used to wake the task when events are added to the + /// queue. + #[behaviour(ignore)] waker: Option, - /// Directory where metadata is stored + /// Directory where metadata is stored. + #[behaviour(ignore)] network_dir: PathBuf, + #[behaviour(ignore)] fork_context: Arc, - /// Logger for behaviour actions. - log: slog::Logger, - + /// Gossipsub score parameters. + #[behaviour(ignore)] score_settings: PeerScoreSettings, - /// The interval for updating gossipsub scores + #[behaviour(ignore)] update_gossipsub_scores: tokio::time::Interval, + /// Logger for behaviour actions. + #[behaviour(ignore)] + log: slog::Logger, } /// Implements the combined behaviour for the libp2p service. impl Behaviour { pub async fn new( local_key: &Keypair, - mut net_conf: NetworkConfig, + mut config: NetworkConfig, network_globals: Arc>, log: &slog::Logger, fork_context: Arc, @@ -158,20 +185,24 @@ impl Behaviour { ) -> error::Result { let behaviour_log = log.new(o!()); - let identify = if net_conf.private { - Identify::new( - "".into(), + // Set up the Identify Behaviour + let identify_config = if config.private { + IdentifyConfig::new( "".into(), local_key.public(), // Still send legitimate public key ) } else { - Identify::new( - "lighthouse/libp2p".into(), - lighthouse_version::version_with_platform(), - local_key.public(), - ) + IdentifyConfig::new("eth2/1.0.0".into(), local_key.public()) + .with_agent_version(lighthouse_version::version_with_platform()) }; + // Build and start the discovery sub-behaviour + let mut discovery = + Discovery::new(local_key, &config, network_globals.clone(), log).await?; + // start searching for peers + discovery.discover_peers(); + + // Grab our local ENR FORK ID let enr_fork_id = network_globals .local_enr() .eth2() @@ -188,34 +219,28 @@ impl Behaviour { max_subscriptions_per_request: 100, //this is according to the current go implementation }; - net_conf.gs_config = gossipsub_config(fork_context.clone()); - - // Initialize the compression transform. - let snappy_transform = SnappyTransform::new(net_conf.gs_config.max_transmit_size()); + config.gs_config = gossipsub_config(fork_context.clone()); + // Build and configure the Gossipsub behaviour + let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( MessageAuthenticity::Anonymous, - net_conf.gs_config.clone(), + config.gs_config.clone(), filter, snappy_transform, ) .map_err(|e| format!("Could not construct gossipsub: {:?}", e))?; - //we don't know the number of active validators and the current slot yet + // Construct a set of gossipsub peer scoring parameters + // We don't know the number of active validators and the current slot yet let active_validators = TSpec::minimum_validator_count(); let current_slot = Slot::new(0); - let thresholds = PeerScoreThresholds { - gossip_threshold: -4000.0, - publish_threshold: -8000.0, - graylist_threshold: GOSSIPSUB_GREYLIST_THRESHOLD, - accept_px_threshold: 100.0, - opportunistic_graft_threshold: 5.0, - }; + let thresholds = lighthouse_gossip_thresholds(); - let score_settings = PeerScoreSettings::new(chain_spec, &net_conf.gs_config); + let score_settings = PeerScoreSettings::new(chain_spec, &config.gs_config); - //Prepare scoring parameters + // Prepare scoring parameters let params = score_settings.get_peer_score_params( active_validators, &thresholds, @@ -225,6 +250,7 @@ impl Behaviour { trace!(behaviour_log, "Using peer score params"; "params" => ?params); + // Set up a scoring update interval let update_gossipsub_scores = tokio::time::interval(params.decay_interval); gossipsub @@ -232,17 +258,19 @@ impl Behaviour { .expect("Valid score params and thresholds"); Ok(Behaviour { - eth2_rpc: RPC::new(fork_context.clone(), log.clone()), + // Sub-behaviours gossipsub, - identify, - peer_manager: PeerManager::new(local_key, &net_conf, network_globals.clone(), log) - .await?, + eth2_rpc: RPC::new(fork_context.clone(), log.clone()), + discovery, + identify: Identify::new(identify_config), + // Auxiliary fields + peer_manager: PeerManager::new(&config, network_globals.clone(), log).await?, events: VecDeque::new(), - peers_to_dc: VecDeque::new(), + internal_events: VecDeque::new(), network_globals, enr_fork_id, waker: None, - network_dir: net_conf.network_dir.clone(), + network_dir: config.network_dir.clone(), log: behaviour_log, score_settings, fork_context, @@ -250,54 +278,16 @@ impl Behaviour { }) } - pub fn update_gossipsub_parameters( - &mut self, - active_validators: usize, - current_slot: Slot, - ) -> error::Result<()> { - let (beacon_block_params, beacon_aggregate_proof_params, beacon_attestation_subnet_params) = - self.score_settings - .get_dynamic_topic_params(active_validators, current_slot)?; - - let fork_digest = self.enr_fork_id.fork_digest; - let get_topic = |kind: GossipKind| -> Topic { - GossipTopic::new(kind, GossipEncoding::default(), fork_digest).into() - }; - - debug!(self.log, "Updating gossipsub score parameters"; - "active_validators" => active_validators); - trace!(self.log, "Updated gossipsub score parameters"; - "beacon_block_params" => ?beacon_block_params, - "beacon_aggregate_proof_params" => ?beacon_aggregate_proof_params, - "beacon_attestation_subnet_params" => ?beacon_attestation_subnet_params, - ); - - self.gossipsub - .set_topic_params(get_topic(GossipKind::BeaconBlock), beacon_block_params)?; - - self.gossipsub.set_topic_params( - get_topic(GossipKind::BeaconAggregateAndProof), - beacon_aggregate_proof_params, - )?; - - for i in 0..self.score_settings.attestation_subnet_count() { - self.gossipsub.set_topic_params( - get_topic(GossipKind::Attestation(SubnetId::new(i))), - beacon_attestation_subnet_params.clone(), - )?; - } + /* Public Accessible Functions to interact with the behaviour */ - Ok(()) + /// Get a mutable reference to the underlying discovery sub-behaviour. + pub fn discovery_mut(&mut self) -> &mut Discovery { + &mut self.discovery } - /// Attempts to connect to a libp2p peer. - /// - /// This MUST be used over Swarm::dial() as this keeps track of the peer in the peer manager. - /// - /// All external dials, dial a multiaddr. This is currently unused but kept here in case any - /// part of lighthouse needs to connect to a peer_id in the future. - pub fn dial(&mut self, peer_id: &PeerId) { - self.peer_manager.dial_peer(peer_id); + /// Get a mutable reference to the peer manager. + pub fn peer_manager_mut(&mut self) -> &mut PeerManager { + &mut self.peer_manager } /// Returns the local ENR of the node. @@ -461,6 +451,48 @@ impl Behaviour { } } + /// Updates the current gossipsub scoring parameters based on the validator count and current + /// slot. + pub fn update_gossipsub_parameters( + &mut self, + active_validators: usize, + current_slot: Slot, + ) -> error::Result<()> { + let (beacon_block_params, beacon_aggregate_proof_params, beacon_attestation_subnet_params) = + self.score_settings + .get_dynamic_topic_params(active_validators, current_slot)?; + + let fork_digest = self.enr_fork_id.fork_digest; + let get_topic = |kind: GossipKind| -> Topic { + GossipTopic::new(kind, GossipEncoding::default(), fork_digest).into() + }; + + debug!(self.log, "Updating gossipsub score parameters"; + "active_validators" => active_validators); + trace!(self.log, "Updated gossipsub score parameters"; + "beacon_block_params" => ?beacon_block_params, + "beacon_aggregate_proof_params" => ?beacon_aggregate_proof_params, + "beacon_attestation_subnet_params" => ?beacon_attestation_subnet_params, + ); + + self.gossipsub + .set_topic_params(get_topic(GossipKind::BeaconBlock), beacon_block_params)?; + + self.gossipsub.set_topic_params( + get_topic(GossipKind::BeaconAggregateAndProof), + beacon_aggregate_proof_params, + )?; + + for i in 0..self.score_settings.attestation_subnet_count() { + self.gossipsub.set_topic_params( + get_topic(GossipKind::Attestation(SubnetId::new(i))), + beacon_attestation_subnet_params.clone(), + )?; + } + + Ok(()) + } + /* Eth2 RPC behaviour functions */ /// Send a request to a peer over RPC. @@ -493,11 +525,6 @@ impl Behaviour { /* Peer management functions */ - /// Report a peer's action. - pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction, source: ReportSource) { - self.peer_manager.report_peer(peer_id, action, source) - } - /// Disconnects from a peer providing a reason. /// /// This will send a goodbye, disconnect and then ban the peer. @@ -508,23 +535,19 @@ impl Behaviour { /// Returns an iterator over all enr entries in the DHT. pub fn enr_entries(&mut self) -> Vec { - self.peer_manager.discovery_mut().table_entries_enr() + self.discovery.table_entries_enr() } /// Add an ENR to the routing table of the discovery mechanism. pub fn add_enr(&mut self, enr: Enr) { - self.peer_manager.discovery_mut().add_enr(enr); + self.discovery.add_enr(enr); } /// Updates a subnet value to the ENR attnets/syncnets bitfield. /// /// The `value` is `true` if a subnet is being added and false otherwise. pub fn update_enr_subnet(&mut self, subnet_id: Subnet, value: bool) { - if let Err(e) = self - .peer_manager - .discovery_mut() - .update_enr_bitfield(subnet_id, value) - { + if let Err(e) = self.discovery.update_enr_bitfield(subnet_id, value) { crit!(self.log, "Could not update ENR bitfield"; "error" => e); } // update the local meta data which informs our peers of the update during PINGS @@ -533,16 +556,58 @@ impl Behaviour { /// Attempts to discover new peers for a given subnet. The `min_ttl` gives the time at which we /// would like to retain the peers for. - pub fn discover_subnet_peers(&mut self, subnet_subscriptions: Vec) { - self.peer_manager - .discover_subnet_peers(subnet_subscriptions) + pub fn discover_subnet_peers(&mut self, subnets_to_discover: Vec) { + // If discovery is not started or disabled, ignore the request + if !self.discovery.started { + return; + } + + let filtered: Vec = subnets_to_discover + .into_iter() + .filter(|s| { + // Extend min_ttl of connected peers on required subnets + if let Some(min_ttl) = s.min_ttl { + self.network_globals + .peers + .write() + .extend_peers_on_subnet(&s.subnet, min_ttl); + } + // Already have target number of peers, no need for subnet discovery + let peers_on_subnet = self + .network_globals + .peers + .read() + .good_peers_on_subnet(s.subnet) + .count(); + if peers_on_subnet >= TARGET_SUBNET_PEERS { + trace!( + self.log, + "Discovery query ignored"; + "subnet" => ?s.subnet, + "reason" => "Already connected to desired peers", + "connected_peers_on_subnet" => peers_on_subnet, + "target_subnet_peers" => TARGET_SUBNET_PEERS, + ); + false + // Queue an outgoing connection request to the cached peers that are on `s.subnet_id`. + // If we connect to the cached peers before the discovery query starts, then we potentially + // save a costly discovery query. + } else { + self.dial_cached_enrs_in_subnet(s.subnet); + true + } + }) + .collect(); + + // request the subnet query from discovery + if !filtered.is_empty() { + self.discovery.discover_subnet_peers(filtered); + } } /// Updates the local ENR's "eth2" field with the latest EnrForkId. pub fn update_fork_version(&mut self, enr_fork_id: EnrForkId) { - self.peer_manager - .discovery_mut() - .update_eth2_enr(enr_fork_id.clone()); + self.discovery.update_eth2_enr(enr_fork_id.clone()); // update the local reference self.enr_fork_id = enr_fork_id; @@ -553,15 +618,13 @@ impl Behaviour { /// Updates the current meta data of the node to match the local ENR. fn update_metadata_bitfields(&mut self) { let local_attnets = self - .peer_manager - .discovery() + .discovery .local_enr() .attestation_bitfield::() .expect("Local discovery must have attestation bitfield"); let local_syncnets = self - .peer_manager - .discovery() + .discovery .local_enr() .sync_committee_bitfield::() .expect("Local discovery must have sync committee bitfield"); @@ -626,7 +689,102 @@ impl Behaviour { &mut self.peer_manager } - fn on_gossip_event(&mut self, event: GossipsubEvent) { + // RPC Propagation methods + /// Queues the response to be sent upwards as long at it was requested outside the Behaviour. + fn propagate_response(&mut self, id: RequestId, peer_id: PeerId, response: Response) { + if !matches!(id, RequestId::Behaviour) { + self.add_event(BehaviourEvent::ResponseReceived { + peer_id, + id, + response, + }); + } + } + + /// Convenience function to propagate a request. + fn propagate_request(&mut self, id: PeerRequestId, peer_id: PeerId, request: Request) { + self.add_event(BehaviourEvent::RequestReceived { + peer_id, + id, + request, + }); + } + + /// Adds an event to the queue waking the current task to process it. + fn add_event(&mut self, event: BehaviourEvent) { + self.events.push_back(event); + if let Some(waker) = &self.waker { + waker.wake_by_ref(); + } + } + + /// Dial cached enrs in discovery service that are in the given `subnet_id` and aren't + /// in Connected, Dialing or Banned state. + fn dial_cached_enrs_in_subnet(&mut self, subnet: Subnet) { + let predicate = subnet_predicate::(vec![subnet], &self.log); + let peers_to_dial: Vec = self + .discovery + .cached_enrs() + .filter_map(|(peer_id, enr)| { + let peers = self.network_globals.peers.read(); + if predicate(enr) && peers.should_dial(peer_id) { + Some(*peer_id) + } else { + None + } + }) + .collect(); + for peer_id in peers_to_dial { + debug!(self.log, "Dialing cached ENR peer"; "peer_id" => %peer_id); + // Remove the ENR from the cache to prevent continual re-dialing on disconnects + self.discovery.remove_cached_enr(&peer_id); + self.internal_events + .push_back(InternalBehaviourMessage::DialPeer(peer_id)); + } + } + + /// Creates a whitelist topic filter that covers all possible topics using the given set of + /// possible fork digests. + fn create_whitelist_filter( + possible_fork_digests: Vec<[u8; 4]>, + attestation_subnet_count: u64, + sync_committee_subnet_count: u64, + ) -> WhitelistSubscriptionFilter { + let mut possible_hashes = HashSet::new(); + for fork_digest in possible_fork_digests { + let mut add = |kind| { + let topic: Topic = + GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into(); + possible_hashes.insert(topic.hash()); + }; + + use GossipKind::*; + add(BeaconBlock); + add(BeaconAggregateAndProof); + add(VoluntaryExit); + add(ProposerSlashing); + add(AttesterSlashing); + add(SignedContributionAndProof); + for id in 0..attestation_subnet_count { + add(Attestation(SubnetId::new(id))); + } + for id in 0..sync_committee_subnet_count { + add(SyncCommitteeMessage(SyncSubnetId::new(id))); + } + } + WhitelistSubscriptionFilter(possible_hashes) + } +} + +/* Behaviour Event Process Implementations + * + * These implementations dictate how to process each event that is emitted from each + * sub-behaviour. + */ + +// Gossipsub +impl NetworkBehaviourEventProcess for Behaviour { + fn inject_event(&mut self, event: GossipsubEvent) { match event { GossipsubEvent::Message { propagation_source, @@ -670,43 +828,25 @@ impl Behaviour { } } } +} - /// Queues the response to be sent upwards as long at it was requested outside the Behaviour. - fn propagate_response(&mut self, id: RequestId, peer_id: PeerId, response: Response) { - if !matches!(id, RequestId::Behaviour) { - self.add_event(BehaviourEvent::ResponseReceived { - peer_id, - id, - response, - }); - } - } - - /// Convenience function to propagate a request. - fn propagate_request(&mut self, id: PeerRequestId, peer_id: PeerId, request: Request) { - self.add_event(BehaviourEvent::RequestReceived { - peer_id, - id, - request, - }); - } - - fn on_rpc_event(&mut self, message: RPCMessage) { - let peer_id = message.peer_id; +// RPC +impl NetworkBehaviourEventProcess> for Behaviour { + fn inject_event(&mut self, event: RPCMessage) { + let peer_id = event.peer_id; if !self.peer_manager.is_connected(&peer_id) { - //ignore this event debug!( self.log, - "Ignoring rpc message of disconnected peer"; + "Ignoring rpc message of disconnecting peer"; "peer" => %peer_id ); return; } - let handler_id = message.conn_id; + let handler_id = event.conn_id; // The METADATA and PING RPC responses are handled within the behaviour and not propagated - match message.event { + match event.event { Err(handler_err) => { match handler_err { HandlerErr::Inbound { @@ -764,12 +904,10 @@ impl Behaviour { "reason" => %reason, "client" => %self.network_globals.client(&peer_id), ); - self.peers_to_dc.push_back((peer_id, None)); // NOTE: We currently do not inform the application that we are - // disconnecting here. - // The actual disconnection event will be relayed to the application. Ideally - // this time difference is short, but we may need to introduce a message to - // inform the application layer early. + // disconnecting here. The RPC handler will automatically + // disconnect for us. + // The actual disconnection event will be relayed to the application. } /* Protocols propagated to the Network */ InboundRequest::Status(msg) => { @@ -819,38 +957,127 @@ impl Behaviour { } } } +} - /// Consumes the events list when polled. - fn custom_poll( +// Discovery +impl NetworkBehaviourEventProcess for Behaviour { + fn inject_event(&mut self, event: DiscoveryEvent) { + match event { + DiscoveryEvent::SocketUpdated(socket_addr) => { + // A new UDP socket has been detected. + // Build a multiaddr to report to libp2p + let mut multiaddr = Multiaddr::from(socket_addr.ip()); + // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling + // should handle this. + multiaddr.push(MProtocol::Tcp(self.network_globals.listen_port_tcp())); + self.internal_events + .push_back(InternalBehaviourMessage::SocketUpdated(multiaddr)); + } + DiscoveryEvent::QueryResult(results) => { + let to_dial_peers = self.peer_manager.peers_discovered(results); + for peer_id in to_dial_peers { + debug!(self.log, "Dialing discovered peer"; "peer_id" => %peer_id); + self.internal_events + .push_back(InternalBehaviourMessage::DialPeer(peer_id)); + } + } + } + } +} + +// Identify +impl NetworkBehaviourEventProcess for Behaviour { + fn inject_event(&mut self, event: IdentifyEvent) { + match event { + IdentifyEvent::Received { peer_id, mut info } => { + if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { + debug!( + self.log, + "More than 10 addresses have been identified, truncating" + ); + info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES); + } + // send peer info to the peer manager. + self.peer_manager.identify(&peer_id, &info); + + debug!(self.log, "Identified Peer"; "peer" => %peer_id, + "protocol_version" => info.protocol_version, + "agent_version" => info.agent_version, + "listening_ addresses" => ?info.listen_addrs, + "observed_address" => ?info.observed_addr, + "protocols" => ?info.protocols + ); + } + IdentifyEvent::Sent { .. } => {} + IdentifyEvent::Error { .. } => {} + IdentifyEvent::Pushed { .. } => {} + } + } +} + +impl Behaviour { + /// Consumes the events list and drives the Lighthouse global NetworkBehaviour. + fn poll( &mut self, cx: &mut Context, - ) -> Poll, BehaviourEvent>> { - // handle pending disconnections to perform - if let Some((peer_id, reason)) = self.peers_to_dc.pop_front() { - return Poll::Ready(NBAction::NotifyHandler { - peer_id, - handler: NotifyHandler::Any, - event: BehaviourHandlerIn::Shutdown( - reason.map(|reason| (RequestId::Behaviour, OutboundRequest::Goodbye(reason))), - ), - }); + _: &mut impl PollParameters, + ) -> Poll>> { + if let Some(waker) = &self.waker { + if waker.will_wake(cx.waker()) { + self.waker = Some(cx.waker().clone()); + } + } else { + self.waker = Some(cx.waker().clone()); + } + + // Handle internal events first + if let Some(event) = self.internal_events.pop_front() { + match event { + InternalBehaviourMessage::DialPeer(peer_id) => { + return Poll::Ready(NBAction::DialPeer { + peer_id, + condition: DialPeerCondition::Disconnected, + }); + } + InternalBehaviourMessage::SocketUpdated(address) => { + return Poll::Ready(NBAction::ReportObservedAddr { + address, + score: AddressScore::Finite(1), + }); + } + } } // check the peer manager for events loop { match self.peer_manager.poll_next_unpin(cx) { Poll::Ready(Some(event)) => match event { - PeerManagerEvent::Dial(peer_id) => { - return Poll::Ready(NBAction::DialPeer { + PeerManagerEvent::PeerConnectedIncoming(peer_id) => { + return Poll::Ready(NBAction::GenerateEvent( + BehaviourEvent::PeerConnectedIncoming(peer_id), + )); + } + PeerManagerEvent::PeerConnectedOutgoing(peer_id) => { + return Poll::Ready(NBAction::GenerateEvent( + BehaviourEvent::PeerConnectedOutgoing(peer_id), + )); + } + PeerManagerEvent::PeerDisconnected(peer_id) => { + return Poll::Ready(NBAction::GenerateEvent( + BehaviourEvent::PeerDisconnected(peer_id), + )); + } + PeerManagerEvent::Banned(peer_id, associated_ips) => { + self.discovery.ban_peer(&peer_id, associated_ips); + return Poll::Ready(NBAction::GenerateEvent(BehaviourEvent::PeerBanned( peer_id, - condition: libp2p::swarm::DialPeerCondition::Disconnected, - }); + ))); } - PeerManagerEvent::SocketUpdated(address) => { - return Poll::Ready(NBAction::ReportObservedAddr { - address, - score: AddressScore::Finite(1), - }); + PeerManagerEvent::UnBanned(peer_id, associated_ips) => { + self.discovery.unban_peer(&peer_id, associated_ips); + return Poll::Ready(NBAction::GenerateEvent(BehaviourEvent::PeerUnbanned( + peer_id, + ))); } PeerManagerEvent::Status(peer_id) => { // it's time to status. We don't keep a beacon chain reference here, so we inform @@ -859,6 +1086,10 @@ impl Behaviour { peer_id, ))); } + PeerManagerEvent::DiscoverPeers => { + // Peer manager has requested a discovery query for more peers. + self.discovery.discover_peers(); + } PeerManagerEvent::Ping(peer_id) => { // send a ping request to this peer self.ping(RequestId::Behaviour, peer_id); @@ -867,17 +1098,10 @@ impl Behaviour { self.send_meta_data_request(peer_id); } PeerManagerEvent::DisconnectPeer(peer_id, reason) => { - debug!(self.log, "PeerManager disconnecting peer"; + debug!(self.log, "Peer Manager disconnecting peer"; "peer_id" => %peer_id, "reason" => %reason); // send one goodbye - return Poll::Ready(NBAction::NotifyHandler { - peer_id, - handler: NotifyHandler::Any, - event: BehaviourHandlerIn::Shutdown(Some(( - RequestId::Behaviour, - OutboundRequest::Goodbye(reason), - ))), - }); + self.eth2_rpc.shutdown(peer_id, reason); } }, Poll::Pending => break, @@ -896,386 +1120,6 @@ impl Behaviour { Poll::Pending } - - fn on_identify_event(&mut self, event: IdentifyEvent) { - match event { - IdentifyEvent::Received { - peer_id, - mut info, - observed_addr, - } => { - if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { - debug!( - self.log, - "More than 10 addresses have been identified, truncating" - ); - info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES); - } - // send peer info to the peer manager. - self.peer_manager.identify(&peer_id, &info); - - debug!(self.log, "Identified Peer"; "peer" => %peer_id, - "protocol_version" => info.protocol_version, - "agent_version" => info.agent_version, - "listening_ addresses" => ?info.listen_addrs, - "observed_address" => ?observed_addr, - "protocols" => ?info.protocols - ); - } - IdentifyEvent::Sent { .. } => {} - IdentifyEvent::Error { .. } => {} - } - } - - /// Adds an event to the queue waking the current thread to process it. - fn add_event(&mut self, event: BehaviourEvent) { - self.events.push_back(event); - if let Some(waker) = &self.waker { - waker.wake_by_ref(); - } - } - - /// Creates a whitelist topic filter that covers all possible topics using the given set of - /// possible fork digests. - fn create_whitelist_filter( - possible_fork_digests: Vec<[u8; 4]>, - attestation_subnet_count: u64, - sync_committee_subnet_count: u64, - ) -> WhitelistSubscriptionFilter { - let mut possible_hashes = HashSet::new(); - for fork_digest in possible_fork_digests { - let mut add = |kind| { - let topic: Topic = - GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into(); - possible_hashes.insert(topic.hash()); - }; - - use GossipKind::*; - add(BeaconBlock); - add(BeaconAggregateAndProof); - add(VoluntaryExit); - add(ProposerSlashing); - add(AttesterSlashing); - add(SignedContributionAndProof); - for id in 0..attestation_subnet_count { - add(Attestation(SubnetId::new(id))); - } - for id in 0..sync_committee_subnet_count { - add(SyncCommitteeMessage(SyncSubnetId::new(id))); - } - } - WhitelistSubscriptionFilter(possible_hashes) - } -} - -/// Calls the given function with the given args on all sub behaviours. -macro_rules! delegate_to_behaviours { - ($self: ident, $fn: ident, $($arg: ident), *) => { - $self.gossipsub.$fn($($arg),*); - $self.eth2_rpc.$fn($($arg),*); - $self.identify.$fn($($arg),*); - }; -} - -impl NetworkBehaviour for Behaviour { - type ProtocolsHandler = BehaviourHandler; - type OutEvent = BehaviourEvent; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - BehaviourHandler::new(&mut self.gossipsub, &mut self.eth2_rpc, &mut self.identify) - } - - fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - self.peer_manager.addresses_of_peer(peer_id) - } - - // This gets called every time a connection is established. - // NOTE: The current logic implies that we would reject extra connections for already connected - // peers if we have reached our peer limit. This is fine for the time being as we currently - // only allow a single connection per peer. - fn inject_connection_established( - &mut self, - peer_id: &PeerId, - conn_id: &ConnectionId, - endpoint: &ConnectedPoint, - ) { - let goodbye_reason: Option = if self.peer_manager.is_banned(peer_id) { - // If the peer is banned, send goodbye with reason banned. - // A peer that has recently transitioned to the banned state should be in the - // disconnecting state, but the `is_banned()` function is dependent on score so should - // be true here in this case. - Some(GoodbyeReason::Banned) - } else if self.peer_manager.peer_limit_reached() - && self - .network_globals - .peers - .read() - .peer_info(peer_id) - .map_or(true, |i| !i.has_future_duty()) - { - // If we are at our peer limit and we don't need the peer for a future validator - // duty, send goodbye with reason TooManyPeers - Some(GoodbyeReason::TooManyPeers) - } else { - None - }; - - if let Some(goodbye_reason) = goodbye_reason { - match goodbye_reason { - GoodbyeReason::Banned => { - debug!(self.log, "Disconnecting newly connected peer"; "peer_id" => %peer_id, "reason" => %goodbye_reason) - } - _ => { - trace!(self.log, "Disconnecting newly connected peer"; "peer_id" => %peer_id, "reason" => %goodbye_reason) - } - } - self.peers_to_dc.push_back((*peer_id, Some(goodbye_reason))); - // NOTE: We don't inform the peer manager that this peer is disconnecting. It is simply - // rejected with a goodbye. - return; - } - - // All peers at this point will be registered as being connected. - // Notify the peer manager of a successful connection - match endpoint { - ConnectedPoint::Listener { send_back_addr, .. } => { - self.peer_manager - .connect_ingoing(&peer_id, send_back_addr.clone()); - self.add_event(BehaviourEvent::PeerConnected(*peer_id)); - debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Incoming"); - } - ConnectedPoint::Dialer { address } => { - self.peer_manager - .connect_outgoing(&peer_id, address.clone()); - self.add_event(BehaviourEvent::PeerDialed(*peer_id)); - debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Dialed"); - } - } - // report the event to the behaviour - delegate_to_behaviours!( - self, - inject_connection_established, - peer_id, - conn_id, - endpoint - ); - } - - // This gets called on the initial connection establishment. - // NOTE: This gets called after inject_connection_established. Therefore the logic in that - // function dictates the logic here. - fn inject_connected(&mut self, peer_id: &PeerId) { - // If the PeerManager has connected this peer, inform the behaviours - if !self.network_globals.peers.read().is_connected(&peer_id) { - return; - } - - // increment prometheus metrics - metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); - metrics::set_gauge( - &metrics::PEERS_CONNECTED, - self.network_globals.connected_peers() as i64, - ); - - delegate_to_behaviours!(self, inject_connected, peer_id); - } - - // This gets called every time a connection is closed. - // NOTE: The peer manager state can be modified in the lifetime of the peer. Due to the scoring - // mechanism. Peers can become banned. In this case, we still want to inform the behaviours. - fn inject_connection_closed( - &mut self, - peer_id: &PeerId, - conn_id: &ConnectionId, - endpoint: &ConnectedPoint, - ) { - // If the peer manager (and therefore the behaviour's) believe this peer connected, inform - // about the disconnection. - // It could be the peer was in the process of being disconnected. In this case the - // sub-behaviours are expecting this peer to be connected and we inform them. - if self - .network_globals - .peers - .read() - .is_connected_or_disconnecting(peer_id) - { - // We are disconnecting the peer or the peer has already been connected. - // Both these cases, the peer has been previously registered in the sub protocols. - delegate_to_behaviours!(self, inject_connection_closed, peer_id, conn_id, endpoint); - } - } - - // This gets called once there are no more active connections. - fn inject_disconnected(&mut self, peer_id: &PeerId) { - // If the application/behaviour layers thinks this peer has connected inform it of the disconnect. - - // Remove all subnet subscriptions from peerdb for the disconnected peer. - self.peer_manager().remove_all_subscriptions(&peer_id); - - if self - .network_globals - .peers - .read() - .is_connected_or_disconnecting(peer_id) - { - // We are disconnecting the peer or the peer has already been connected. - // Both these cases, the peer has been previously registered in the sub protocols and - // potentially the application layer. - // Inform the application. - self.add_event(BehaviourEvent::PeerDisconnected(*peer_id)); - // Inform the behaviour. - delegate_to_behaviours!(self, inject_disconnected, peer_id); - - debug!(self.log, "Peer disconnected"; "peer_id" => %peer_id); - - // Decrement the PEERS_PER_CLIENT metric - if let Some(kind) = self - .network_globals - .peers - .read() - .peer_info(peer_id) - .map(|info| info.client.kind.clone()) - { - if let Some(v) = - metrics::get_int_gauge(&metrics::PEERS_PER_CLIENT, &[&kind.to_string()]) - { - v.dec() - }; - } - } - - // Inform the peer manager. - // NOTE: It may be the case that a rejected node, due to too many peers is disconnected - // here and the peer manager has no knowledge of its connection. We insert it here for - // reference so that peer manager can track this peer. - self.peer_manager.notify_disconnect(&peer_id); - - // Update the prometheus metrics - metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); - metrics::set_gauge( - &metrics::PEERS_CONNECTED, - self.network_globals.connected_peers() as i64, - ); - } - - fn inject_addr_reach_failure( - &mut self, - peer_id: Option<&PeerId>, - addr: &Multiaddr, - error: &dyn std::error::Error, - ) { - delegate_to_behaviours!(self, inject_addr_reach_failure, peer_id, addr, error); - } - - fn inject_dial_failure(&mut self, peer_id: &PeerId) { - // Could not dial the peer, inform the peer manager. - self.peer_manager.notify_dial_failure(&peer_id); - delegate_to_behaviours!(self, inject_dial_failure, peer_id); - } - - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { - delegate_to_behaviours!(self, inject_new_listen_addr, addr); - } - - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { - delegate_to_behaviours!(self, inject_expired_listen_addr, addr); - } - - fn inject_new_external_addr(&mut self, addr: &Multiaddr) { - delegate_to_behaviours!(self, inject_new_external_addr, addr); - } - - fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { - delegate_to_behaviours!(self, inject_listener_error, id, err); - } - fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &std::io::Error>) { - delegate_to_behaviours!(self, inject_listener_closed, id, reason); - } - - fn inject_event( - &mut self, - peer_id: PeerId, - conn_id: ConnectionId, - event: ::OutEvent, - ) { - // If the peer is not supposed to be connected (undergoing active disconnection, - // don't process any of its messages. - if !self.network_globals.peers.read().is_connected(&peer_id) { - return; - } - - // Events comming from the handler, redirected to each behaviour - match event { - DelegateOut::Gossipsub(ev) => self.gossipsub.inject_event(peer_id, conn_id, ev), - DelegateOut::RPC(ev) => self.eth2_rpc.inject_event(peer_id, conn_id, ev), - DelegateOut::Identify(ev) => self.identify.inject_event(peer_id, conn_id, *ev), - } - } - - fn poll( - &mut self, - cx: &mut Context, - poll_params: &mut impl PollParameters, - ) -> Poll::InEvent, Self::OutEvent>> { - // update the waker if needed - if let Some(waker) = &self.waker { - if waker.will_wake(cx.waker()) { - self.waker = Some(cx.waker().clone()); - } - } else { - self.waker = Some(cx.waker().clone()); - } - - macro_rules! poll_behaviour { - /* $behaviour: The sub-behaviour being polled. - * $on_event_fn: Function to call if we get an event from the sub-behaviour. - * $notify_handler_event_closure: Closure mapping the received event type to - * the one that the handler should get. - */ - ($behaviour: ident, $on_event_fn: ident, $notify_handler_event_closure: expr) => { - loop { - // poll the sub-behaviour - match self.$behaviour.poll(cx, poll_params) { - Poll::Ready(action) => match action { - // call the designated function to handle the event from sub-behaviour - NBAction::GenerateEvent(event) => self.$on_event_fn(event), - NBAction::DialAddress { address } => { - return Poll::Ready(NBAction::DialAddress { address }) - } - NBAction::DialPeer { peer_id, condition } => { - return Poll::Ready(NBAction::DialPeer { peer_id, condition }) - } - NBAction::NotifyHandler { - peer_id, - handler, - event, - } => { - return Poll::Ready(NBAction::NotifyHandler { - peer_id, - handler, - // call the closure mapping the received event to the needed one - // in order to notify the handler - event: BehaviourHandlerIn::Delegate( - $notify_handler_event_closure(event), - ), - }); - } - NBAction::ReportObservedAddr { address, score } => { - return Poll::Ready(NBAction::ReportObservedAddr { address, score }) - } - }, - Poll::Pending => break, - } - } - }; - } - - poll_behaviour!(gossipsub, on_gossip_event, DelegateIn::Gossipsub); - poll_behaviour!(eth2_rpc, on_rpc_event, DelegateIn::RPC); - poll_behaviour!(identify, on_identify_event, DelegateIn::Identify); - - self.custom_poll(cx) - } } /* Public API types */ diff --git a/beacon_node/eth2_libp2p/src/config.rs b/beacon_node/eth2_libp2p/src/config.rs index 98349294832..26e26ede054 100644 --- a/beacon_node/eth2_libp2p/src/config.rs +++ b/beacon_node/eth2_libp2p/src/config.rs @@ -16,13 +16,15 @@ use std::sync::Arc; use std::time::Duration; use types::{ForkContext, ForkName}; +/// The maximum transmit size of gossip messages in bytes. pub const GOSSIP_MAX_SIZE: usize = 1_048_576; +/// This is a constant to be used in discovery. The lower bound of the gossipsub mesh. +pub const MESH_N_LOW: usize = 6; // We treat uncompressed messages as invalid and never use the INVALID_SNAPPY_DOMAIN as in the // specification. We leave it here for posterity. // const MESSAGE_DOMAIN_INVALID_SNAPPY: [u8; 4] = [0, 0, 0, 0]; const MESSAGE_DOMAIN_VALID_SNAPPY: [u8; 4] = [1, 0, 0, 0]; -pub const MESH_N_LOW: usize = 6; #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] @@ -115,10 +117,20 @@ impl Default for Config { .build() .expect("valid gossipsub configuration"); + // Discv5 Unsolicited Packet Rate Limiter + let filter_rate_limiter = Some( + discv5::RateLimiterBuilder::new() + .total_n_every(10, Duration::from_secs(1)) // Allow bursts, average 10 per second + .ip_n_every(9, Duration::from_secs(1)) // Allow bursts, average 9 per second + .node_n_every(8, Duration::from_secs(1)) // Allow bursts, average 8 per second + .build() + .expect("The total rate limit has been specified"), + ); + // discv5 configuration let discv5_config = Discv5ConfigBuilder::new() .enable_packet_filter() - .session_cache_capacity(1000) + .session_cache_capacity(5000) .request_timeout(Duration::from_secs(1)) .query_peer_timeout(Duration::from_secs(2)) .query_timeout(Duration::from_secs(30)) @@ -127,6 +139,11 @@ impl Default for Config { .query_parallelism(5) .disable_report_discovered_peers() .ip_limit() // limits /24 IP's in buckets. + .incoming_bucket_limit(8) // half the bucket size + .filter_rate_limiter(filter_rate_limiter) + .filter_max_bans_per_ip(Some(5)) + .filter_max_nodes_per_ip(Some(10)) + .ban_duration(Some(Duration::from_secs(3600))) .ping_interval(Duration::from_secs(300)) .build(); @@ -205,8 +222,8 @@ pub fn gossipsub_config(fork_context: Arc) -> GossipsubConfig { .mesh_n_high(12) .gossip_lazy(6) .fanout_ttl(Duration::from_secs(60)) - .history_length(6) - .max_messages_per_rpc(Some(10)) + .history_length(12) + .max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large .history_gossip(3) .validate_messages() // require validation before propagation .validation_mode(ValidationMode::Anonymous) diff --git a/beacon_node/eth2_libp2p/src/discovery/mod.rs b/beacon_node/eth2_libp2p/src/discovery/mod.rs index 404184f00e2..16b87c4fdba 100644 --- a/beacon_node/eth2_libp2p/src/discovery/mod.rs +++ b/beacon_node/eth2_libp2p/src/discovery/mod.rs @@ -1,8 +1,15 @@ -///! This manages the discovery and management of peers. +//! The discovery sub-behaviour of Lighthouse. +//! +//! This module creates a libp2p dummy-behaviour built around the discv5 protocol. It handles +//! queries and manages access to the discovery routing table. + pub(crate) mod enr; pub mod enr_ext; // Allow external use of the lighthouse ENR builder +use crate::{config, metrics}; +use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; +use discv5::{enr::NodeId, Discv5, Discv5Event}; pub use enr::{ build_enr, create_enr_builder_from_config, load_enr_from_disk, use_or_load_enr, CombinedKey, Eth2Enr, @@ -10,13 +17,16 @@ pub use enr::{ pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt}; pub use libp2p::core::identity::{Keypair, PublicKey}; -use crate::{config, metrics}; -use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; -use discv5::{enr::NodeId, Discv5, Discv5Event}; use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; use futures::prelude::*; use futures::stream::FuturesUnordered; -use libp2p::core::PeerId; +pub use libp2p::{ + core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}, + swarm::{ + protocols_handler::ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction as NBAction, + NotifyHandler, PollParameters, SubstreamProtocol, + }, +}; use lru::LruCache; use slog::{crit, debug, error, info, trace, warn}; use ssz::Encode; @@ -308,6 +318,11 @@ impl Discovery { self.cached_enrs.iter() } + /// Removes a cached ENR from the list. + pub fn remove_cached_enr(&mut self, peer_id: &PeerId) -> Option { + self.cached_enrs.pop(peer_id) + } + /// This adds a new `FindPeers` query to the queue if one doesn't already exist. pub fn discover_peers(&mut self) { // If the discv5 service isn't running or we are in the process of a query, don't bother queuing a new one. @@ -547,33 +562,38 @@ impl Discovery { // first try and convert the peer_id to a node_id. if let Ok(node_id) = peer_id_to_node_id(peer_id) { // If we could convert this peer id, remove it from the DHT and ban it from discovery. - self.discv5.ban_node(&node_id); + self.discv5.ban_node(&node_id, None); // Remove the node from the routing table. self.discv5.remove_node(&node_id); } for ip_address in ip_addresses { - self.discv5.ban_ip(ip_address); + self.discv5.ban_ip(ip_address, None); } } + /// Unbans the peer in discovery. pub fn unban_peer(&mut self, peer_id: &PeerId, ip_addresses: Vec) { // first try and convert the peer_id to a node_id. if let Ok(node_id) = peer_id_to_node_id(peer_id) { // If we could convert this peer id, remove it from the DHT and ban it from discovery. - self.discv5.permit_node(&node_id); + self.discv5.ban_node_remove(&node_id); } for ip_address in ip_addresses { - self.discv5.permit_ip(ip_address); + self.discv5.ban_ip_remove(&ip_address); } } - // mark node as disconnected in DHT, freeing up space for other nodes + /// Marks node as disconnected in the DHT, freeing up space for other nodes, this also removes + /// nodes from the cached ENR list. pub fn disconnect_peer(&mut self, peer_id: &PeerId) { if let Ok(node_id) = peer_id_to_node_id(peer_id) { self.discv5.disconnect_node(&node_id); } + // Remove the peer from the cached list, to prevent redialing disconnected + // peers. + self.cached_enrs.pop(peer_id); } /* Internal Functions */ @@ -919,9 +939,68 @@ impl Discovery { } None } +} + +/* NetworkBehaviour Implementation */ + +impl NetworkBehaviour for Discovery { + // Discovery is not a real NetworkBehaviour... + type ProtocolsHandler = libp2p::swarm::protocols_handler::DummyProtocolsHandler; + type OutEvent = DiscoveryEvent; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + libp2p::swarm::protocols_handler::DummyProtocolsHandler::default() + } + + // Handles the libp2p request to obtain multiaddrs for peer_id's in order to dial them. + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + if let Some(enr) = self.enr_of_peer(peer_id) { + // ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP + // port is removed, which is assumed to be associated with the discv5 protocol (and + // therefore irrelevant for other libp2p components). + enr.multiaddr_tcp() + } else { + // PeerId is not known + Vec::new() + } + } + + fn inject_connected(&mut self, _peer_id: &PeerId) {} + fn inject_disconnected(&mut self, _peer_id: &PeerId) {} + fn inject_connection_established( + &mut self, + _: &PeerId, + _: &ConnectionId, + _connected_point: &ConnectedPoint, + ) { + } + fn inject_connection_closed( + &mut self, + _: &PeerId, + _: &ConnectionId, + _connected_point: &ConnectedPoint, + ) { + } + fn inject_event( + &mut self, + _: PeerId, + _: ConnectionId, + _: ::OutEvent, + ) { + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + // set peer as disconnected in discovery DHT + debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id); + self.disconnect_peer(peer_id); + } - // Main execution loop to be driven by the peer manager. - pub fn poll(&mut self, cx: &mut Context) -> Poll { + // Main execution loop to drive the behaviour + fn poll( + &mut self, + cx: &mut Context, + _: &mut impl PollParameters, + ) -> Poll::InEvent, Self::OutEvent>> { if !self.started { return Poll::Pending; } @@ -932,7 +1011,9 @@ impl Discovery { // Drive the queries and return any results from completed queries if let Some(results) = self.poll_queries(cx) { // return the result to the peer manager - return Poll::Ready(DiscoveryEvent::QueryResult(results)); + return Poll::Ready(NBAction::GenerateEvent(DiscoveryEvent::QueryResult( + results, + ))); } // Process the server event stream @@ -980,9 +1061,13 @@ impl Discovery { enr::save_enr_to_disk(Path::new(&self.enr_dir), &enr, &self.log); // update network globals *self.network_globals.local_enr.write() = enr; - return Poll::Ready(DiscoveryEvent::SocketUpdated(socket)); + return Poll::Ready(NBAction::GenerateEvent( + DiscoveryEvent::SocketUpdated(socket), + )); } - _ => {} // Ignore all other discv5 server events + Discv5Event::EnrAdded { .. } + | Discv5Event::TalkRequest(_) + | Discv5Event::NodeInserted { .. } => {} // Ignore all other discv5 server events } } } diff --git a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs index c3c378b517b..b73e8a20dca 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs @@ -1,29 +1,26 @@ -//! Implementation of a Lighthouse's peer management system. +//! Implementation of Lighthouse's peer management system. pub use self::peerdb::*; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; use crate::types::SyncState; -use crate::{ - discovery::{subnet_predicate, Discovery, DiscoveryEvent, TARGET_SUBNET_PEERS}, - Subnet, -}; +use crate::Subnet; use crate::{error, metrics, Gossipsub}; -use crate::{EnrExt, NetworkConfig, NetworkGlobals, PeerId, SubnetDiscovery}; +use crate::{NetworkConfig, NetworkGlobals, PeerId}; +use discv5::Enr; use futures::prelude::*; use futures::Stream; use hashset_delay::HashSetDelay; -use libp2p::core::multiaddr::Protocol as MProtocol; +use libp2p::core::ConnectedPoint; use libp2p::identify::IdentifyInfo; -use slog::{crit, debug, error, trace, warn}; +use slog::{crit, debug, error, warn}; use smallvec::SmallVec; use std::{ - net::SocketAddr, pin::Pin, sync::Arc, task::{Context, Poll}, time::{Duration, Instant}, }; -use types::{EthSpec, SyncSubnetId}; +use types::EthSpec; pub use libp2p::core::{identity::Keypair, Multiaddr}; @@ -38,7 +35,8 @@ pub use peer_info::{ConnectionDirection, PeerConnectionStatus, PeerConnectionSta pub use peer_sync_status::{PeerSyncStatus, SyncInfo}; use score::{PeerAction, ReportSource, ScoreState}; use std::cmp::Ordering; -use std::collections::{hash_map::Entry, HashMap}; +use std::collections::HashMap; +use std::net::IpAddr; /// The time in seconds between re-status's peers. const STATUS_INTERVAL: u64 = 300; @@ -57,15 +55,14 @@ const HEARTBEAT_INTERVAL: u64 = 30; /// A fraction of `PeerManager::target_peers` that we allow to connect to us in excess of /// `PeerManager::target_peers`. For clarity, if `PeerManager::target_peers` is 50 and /// PEER_EXCESS_FACTOR = 0.1 we allow 10% more nodes, i.e 55. -const PEER_EXCESS_FACTOR: f32 = 0.1; +pub const PEER_EXCESS_FACTOR: f32 = 0.1; +/// A fraction of `PeerManager::target_peers` that need to be outbound-only connections. +pub const MIN_OUTBOUND_ONLY_FACTOR: f32 = 0.1; /// Relative factor of peers that are allowed to have a negative gossipsub score without penalizing /// them in lighthouse. const ALLOWED_NEGATIVE_GOSSIPSUB_FACTOR: f32 = 0.1; -/// A fraction of `PeerManager::target_peers` that need to be outbound-only connections. -const MIN_OUTBOUND_ONLY_FACTOR: f32 = 0.1; - /// The main struct that handles peer's reputation and connection status. pub struct PeerManager { /// Storage of network globals to access the `PeerDB`. @@ -82,25 +79,22 @@ pub struct PeerManager { target_peers: usize, /// The maximum number of peers we allow (exceptions for subnet peers) max_peers: usize, - /// A collection of sync committee subnets that we need to stay subscribed to. - /// Sync committee subnets are longer term (256 epochs). Hence, we need to re-run - /// discovery queries for subnet peers if we disconnect from existing sync - /// committee subnet peers. - sync_committee_subnets: HashMap, - /// The discovery service. - discovery: Discovery, /// The heartbeat interval to perform routine maintenance. heartbeat: tokio::time::Interval, + /// Keeps track of whether the discovery service is enabled or not. + discovery_enabled: bool, /// The logger associated with the `PeerManager`. log: slog::Logger, } /// The events that the `PeerManager` outputs (requests). pub enum PeerManagerEvent { - /// Dial a PeerId. - Dial(PeerId), - /// Inform libp2p that our external socket addr has been updated. - SocketUpdated(Multiaddr), + /// A peer has dialed us. + PeerConnectedIncoming(PeerId), + /// A peer has been dialed. + PeerConnectedOutgoing(PeerId), + /// A peer has disconnected. + PeerDisconnected(PeerId), /// Sends a STATUS to a peer. Status(PeerId), /// Sends a PING to a peer. @@ -109,22 +103,22 @@ pub enum PeerManagerEvent { MetaData(PeerId), /// The peer should be disconnected. DisconnectPeer(PeerId, GoodbyeReason), + /// Inform the behaviour to ban this peer and associated ip addresses. + Banned(PeerId, Vec), + /// The peer should be unbanned with the associated ip addresses. + UnBanned(PeerId, Vec), + /// Request the behaviour to discover more peers. + DiscoverPeers, } impl PeerManager { // NOTE: Must be run inside a tokio executor. pub async fn new( - local_key: &Keypair, config: &NetworkConfig, network_globals: Arc>, log: &slog::Logger, ) -> error::Result { - // start the discovery service - let mut discovery = Discovery::new(local_key, config, network_globals.clone(), log).await?; - - // start searching for peers - discovery.discover_peers(); - + // Set up the peer manager heartbeat interval let heartbeat = tokio::time::interval(tokio::time::Duration::from_secs(HEARTBEAT_INTERVAL)); Ok(PeerManager { @@ -135,23 +129,14 @@ impl PeerManager { status_peers: HashSetDelay::new(Duration::from_secs(STATUS_INTERVAL)), target_peers: config.target_peers, max_peers: (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as usize, - sync_committee_subnets: HashMap::default(), - discovery, heartbeat, + discovery_enabled: !config.disable_discovery, log: log.clone(), }) } /* Public accessible functions */ - /// Attempts to connect to a peer. - /// - /// Returns true if the peer was accepted into the database. - pub fn dial_peer(&mut self, peer_id: &PeerId) -> bool { - self.events.push(PeerManagerEvent::Dial(*peer_id)); - self.connect_peer(peer_id, ConnectingType::Dialing) - } - /// The application layer wants to disconnect from a peer for a particular reason. /// /// All instant disconnections are fatal and we ban the associated peer. @@ -226,81 +211,52 @@ impl PeerManager { self.ban_and_unban_peers(to_ban_peers, to_unban_peers); } - /* Discovery Requests */ - - /// Provides a reference to the underlying discovery service. - pub fn discovery(&self) -> &Discovery { - &self.discovery - } - - /// Provides a mutable reference to the underlying discovery service. - pub fn discovery_mut(&mut self) -> &mut Discovery { - &mut self.discovery - } - - /// A request to find peers on a given subnet. - pub fn discover_subnet_peers(&mut self, subnets_to_discover: Vec) { - // If discovery is not started or disabled, ignore the request - if !self.discovery.started { - return; - } + /// Peers that have been returned by discovery requests that are suitable for dialing are + /// returned here. + /// + /// NOTE: By dialing `PeerId`s and not multiaddrs, libp2p requests the multiaddr associated + /// with a new `PeerId` which involves a discovery routing table lookup. We could dial the + /// multiaddr here, however this could relate to duplicate PeerId's etc. If the lookup + /// proves resource constraining, we should switch to multiaddr dialling here. + #[allow(clippy::mutable_key_type)] + pub fn peers_discovered(&mut self, results: HashMap>) -> Vec { + let mut to_dial_peers = Vec::new(); - let filtered: Vec = subnets_to_discover - .into_iter() - .filter(|s| { - if let Some(min_ttl) = s.min_ttl { - // Extend min_ttl of connected peers on required subnets + let connected_or_dialing = self.network_globals.connected_or_dialing_peers(); + for (peer_id, min_ttl) in results { + // we attempt a connection if this peer is a subnet peer or if the max peer count + // is not yet filled (including dialing peers) + if (min_ttl.is_some() || connected_or_dialing + to_dial_peers.len() < self.max_peers) + && self.network_globals.peers.read().should_dial(&peer_id) + { + // This should be updated with the peer dialing. In fact created once the peer is + // dialed + if let Some(min_ttl) = min_ttl { self.network_globals .peers .write() - .extend_peers_on_subnet(&s.subnet, min_ttl); - - // Insert subnet into list of long lived sync committee subnets if required - if let Subnet::SyncCommittee(subnet_id) = s.subnet { - match self.sync_committee_subnets.entry(subnet_id) { - Entry::Vacant(_) => { - self.sync_committee_subnets.insert(subnet_id, min_ttl); - } - Entry::Occupied(old) => { - if *old.get() < min_ttl { - self.sync_committee_subnets.insert(subnet_id, min_ttl); - } - } - } - } + .update_min_ttl(&peer_id, min_ttl); } + to_dial_peers.push(peer_id); + } + } - // Already have target number of peers, no need for subnet discovery - let peers_on_subnet = self - .network_globals - .peers - .read() - .good_peers_on_subnet(s.subnet) - .count(); - if peers_on_subnet >= TARGET_SUBNET_PEERS { - trace!( - self.log, - "Discovery query ignored"; - "subnet_id" => ?s.subnet, - "reason" => "Already connected to desired peers", - "connected_peers_on_subnet" => peers_on_subnet, - "target_subnet_peers" => TARGET_SUBNET_PEERS, - ); - false - // Queue an outgoing connection request to the cached peers that are on `s.subnet_id`. - // If we connect to the cached peers before the discovery query starts, then we potentially - // save a costly discovery query. - } else { - self.dial_cached_enrs_in_subnet(s.subnet); - true - } - }) - .collect(); + // Queue another discovery if we need to + let peer_count = self.network_globals.connected_or_dialing_peers(); + let outbound_only_peer_count = self.network_globals.connected_outbound_only_peers(); + let min_outbound_only_target = + (self.target_peers as f32 * MIN_OUTBOUND_ONLY_FACTOR).ceil() as usize; - // request the subnet query from discovery - if !filtered.is_empty() { - self.discovery.discover_subnet_peers(filtered); + if self.discovery_enabled + && (peer_count < self.target_peers.saturating_sub(to_dial_peers.len()) + || outbound_only_peer_count < min_outbound_only_target) + { + // We need more peers, re-queue a discovery lookup. + debug!(self.log, "Starting a new peer discovery query"; "connected_peers" => peer_count, "target_peers" => self.target_peers); + self.events.push(PeerManagerEvent::DiscoverPeers); } + + to_dial_peers } /// A STATUS message has been received from a peer. This resets the status timer. @@ -331,19 +287,144 @@ impl PeerManager { /* Notifications from the Swarm */ - /// Updates the state of the peer as disconnected. - /// - /// This is also called when dialing a peer fails. - pub fn notify_disconnect(&mut self, peer_id: &PeerId) { - self.network_globals - .peers - .write() - .notify_disconnect(peer_id); + // A peer is being dialed. + pub fn inject_dialing(&mut self, peer_id: &PeerId, enr: Option) { + self.inject_peer_connection(peer_id, ConnectingType::Dialing, enr); + } - // remove the ping and status timer for the peer - self.inbound_ping_peers.remove(peer_id); - self.outbound_ping_peers.remove(peer_id); - self.status_peers.remove(peer_id); + pub fn inject_connection_established( + &mut self, + peer_id: PeerId, + endpoint: ConnectedPoint, + num_established: std::num::NonZeroU32, + enr: Option, + ) { + // Log the connection + match &endpoint { + ConnectedPoint::Listener { .. } => { + debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Incoming", "connections" => %num_established); + } + ConnectedPoint::Dialer { .. } => { + debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Outgoing", "connections" => %num_established); + } + } + + // Should not be able to connect to a banned peer. Double check here + if self.is_banned(&peer_id) { + warn!(self.log, "Connected to a banned peer"; "peer_id" => %peer_id); + self.events.push(PeerManagerEvent::DisconnectPeer( + peer_id, + GoodbyeReason::Banned, + )); + self.network_globals + .peers + .write() + .notify_disconnecting(peer_id, true); + return; + } + + // Check the connection limits + if self.peer_limit_reached() + && self + .network_globals + .peers + .read() + .peer_info(&peer_id) + .map_or(true, |peer| !peer.has_future_duty()) + { + self.events.push(PeerManagerEvent::DisconnectPeer( + peer_id, + GoodbyeReason::TooManyPeers, + )); + self.network_globals + .peers + .write() + .notify_disconnecting(peer_id, false); + return; + } + + // Register the newly connected peer (regardless if we are about to disconnect them). + // NOTE: We don't register peers that we are disconnecting immediately. The network service + // does not need to know about these peers. + match endpoint { + ConnectedPoint::Listener { send_back_addr, .. } => { + self.inject_connect_ingoing(&peer_id, send_back_addr, enr); + if num_established == std::num::NonZeroU32::new(1).expect("valid") { + self.events + .push(PeerManagerEvent::PeerConnectedIncoming(peer_id)); + } + } + ConnectedPoint::Dialer { address } => { + self.inject_connect_outgoing(&peer_id, address, enr); + if num_established == std::num::NonZeroU32::new(1).expect("valid") { + self.events + .push(PeerManagerEvent::PeerConnectedOutgoing(peer_id)); + } + } + } + + // increment prometheus metrics + metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); + metrics::set_gauge( + &metrics::PEERS_CONNECTED, + self.network_globals.connected_peers() as i64, + ); + } + + pub fn inject_connection_closed( + &mut self, + peer_id: PeerId, + _endpoint: ConnectedPoint, + num_established: u32, + ) { + if num_established == 0 { + // There are no more connections + + // Remove all subnet subscriptions from the peer_db + self.remove_all_subscriptions(&peer_id); + + if self + .network_globals + .peers + .read() + .is_connected_or_disconnecting(&peer_id) + { + // We are disconnecting the peer or the peer has already been connected. + // Both these cases, the peer has been previously registered by the peer manager and + // potentially the application layer. + // Inform the application. + self.events + .push(PeerManagerEvent::PeerDisconnected(peer_id)); + debug!(self.log, "Peer disconnected"; "peer_id" => %peer_id); + + // Decrement the PEERS_PER_CLIENT metric + if let Some(kind) = self + .network_globals + .peers + .read() + .peer_info(&peer_id) + .map(|info| info.client.kind.clone()) + { + if let Some(v) = + metrics::get_int_gauge(&metrics::PEERS_PER_CLIENT, &[&kind.to_string()]) + { + v.dec() + }; + } + } + + // NOTE: It may be the case that a rejected node, due to too many peers is disconnected + // here and the peer manager has no knowledge of its connection. We insert it here for + // reference so that peer manager can track this peer. + self.inject_disconnect(&peer_id); + + // Update the prometheus metrics + metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); + metrics::set_gauge( + &metrics::PEERS_CONNECTED, + self.network_globals.connected_peers() as i64, + ); + } } /// A dial attempt has failed. @@ -351,27 +432,12 @@ impl PeerManager { /// NOTE: It can be the case that we are dialing a peer and during the dialing process the peer /// connects and the dial attempt later fails. To handle this, we only update the peer_db if /// the peer is not already connected. - pub fn notify_dial_failure(&mut self, peer_id: &PeerId) { + pub fn inject_dial_failure(&mut self, peer_id: &PeerId) { if !self.network_globals.peers.read().is_connected(peer_id) { - self.notify_disconnect(peer_id); - // set peer as disconnected in discovery DHT - debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id); - self.discovery.disconnect_peer(peer_id); + self.inject_disconnect(peer_id); } } - /// Sets a peer as connected as long as their reputation allows it - /// Informs if the peer was accepted - pub fn connect_ingoing(&mut self, peer_id: &PeerId, multiaddr: Multiaddr) -> bool { - self.connect_peer(peer_id, ConnectingType::IngoingConnected { multiaddr }) - } - - /// Sets a peer as connected as long as their reputation allows it - /// Informs if the peer was accepted - pub fn connect_outgoing(&mut self, peer_id: &PeerId, multiaddr: Multiaddr) -> bool { - self.connect_peer(peer_id, ConnectingType::OutgoingConnected { multiaddr }) - } - /// Reports if a peer is banned or not. /// /// This is used to determine if we should accept incoming connections. @@ -507,6 +573,7 @@ impl PeerManager { }, }, RPCError::NegotiationTimeout => PeerAction::LowToleranceError, + RPCError::Disconnected => return, // No penalty for a graceful disconnection }; self.report_peer(peer_id, peer_action, ReportSource::RPC); @@ -598,22 +665,6 @@ impl PeerManager { } } - // Handles the libp2p request to obtain multiaddrs for peer_id's in order to dial them. - pub fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - if let Some(enr) = self.discovery.enr_of_peer(peer_id) { - // ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP - // port is removed, which is assumed to be associated with the discv5 protocol (and - // therefore irrelevant for other libp2p components). - let mut out_list = enr.multiaddr(); - out_list.retain(|addr| !addr.iter().any(|v| matches!(v, MProtocol::Udp(_)))); - - out_list - } else { - // PeerId is not known - Vec::new() - } - } - pub(crate) fn update_gossipsub_scores(&mut self, gossipsub: &Gossipsub) { let mut to_ban_peers = Vec::new(); let mut to_unban_peers = Vec::new(); @@ -669,71 +720,49 @@ impl PeerManager { /* Internal functions */ - // The underlying discovery server has updated our external IP address. We send this up to - // notify libp2p. - fn socket_updated(&mut self, socket: SocketAddr) { - // Build a multiaddr to report to libp2p - let mut multiaddr = Multiaddr::from(socket.ip()); - // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling - // should handle this. - multiaddr.push(MProtocol::Tcp(self.network_globals.listen_port_tcp())); - self.events.push(PeerManagerEvent::SocketUpdated(multiaddr)); + /// Sets a peer as connected as long as their reputation allows it + /// Informs if the peer was accepted + fn inject_connect_ingoing( + &mut self, + peer_id: &PeerId, + multiaddr: Multiaddr, + enr: Option, + ) -> bool { + self.inject_peer_connection(peer_id, ConnectingType::IngoingConnected { multiaddr }, enr) } - /// Dial cached enrs in discovery service that are in the given `Subnet` and aren't - /// in Connected, Dialing or Banned state. - fn dial_cached_enrs_in_subnet(&mut self, subnet: Subnet) { - let predicate = subnet_predicate::(vec![subnet], &self.log); - let peers_to_dial: Vec = self - .discovery() - .cached_enrs() - .filter_map(|(peer_id, enr)| { - let peers = self.network_globals.peers.read(); - if predicate(enr) && peers.should_dial(peer_id) { - Some(*peer_id) - } else { - None - } - }) - .collect(); - for peer_id in &peers_to_dial { - debug!(self.log, "Dialing cached ENR peer"; "peer_id" => %peer_id); - self.dial_peer(peer_id); - } + /// Sets a peer as connected as long as their reputation allows it + /// Informs if the peer was accepted + fn inject_connect_outgoing( + &mut self, + peer_id: &PeerId, + multiaddr: Multiaddr, + enr: Option, + ) -> bool { + self.inject_peer_connection( + peer_id, + ConnectingType::OutgoingConnected { multiaddr }, + enr, + ) } - /// Peers that have been returned by discovery requests are dialed here if they are suitable. + /// Updates the state of the peer as disconnected. /// - /// NOTE: By dialing `PeerId`s and not multiaddrs, libp2p requests the multiaddr associated - /// with a new `PeerId` which involves a discovery routing table lookup. We could dial the - /// multiaddr here, however this could relate to duplicate PeerId's etc. If the lookup - /// proves resource constraining, we should switch to multiaddr dialling here. - #[allow(clippy::mutable_key_type)] - fn peers_discovered(&mut self, results: HashMap>) { - let mut to_dial_peers = Vec::new(); - - let connected_or_dialing = self.network_globals.connected_or_dialing_peers(); - for (peer_id, min_ttl) in results { - // we attempt a connection if this peer is a subnet peer or if the max peer count - // is not yet filled (including dialing peers) - if (min_ttl.is_some() || connected_or_dialing + to_dial_peers.len() < self.max_peers) - && self.network_globals.peers.read().should_dial(&peer_id) - { - // This should be updated with the peer dialing. In fact created once the peer is - // dialed - if let Some(min_ttl) = min_ttl { - self.network_globals - .peers - .write() - .update_min_ttl(&peer_id, min_ttl); - } - to_dial_peers.push(peer_id); - } - } - for peer_id in to_dial_peers { - debug!(self.log, "Dialing discovered peer"; "peer_id" => %peer_id); - self.dial_peer(&peer_id); + /// This is also called when dialing a peer fails. + fn inject_disconnect(&mut self, peer_id: &PeerId) { + if self + .network_globals + .peers + .write() + .inject_disconnect(peer_id) + { + self.ban_peer(peer_id); } + + // remove the ping and status timer for the peer + self.inbound_ping_peers.remove(peer_id); + self.outbound_ping_peers.remove(peer_id); + self.status_peers.remove(peer_id); } /// Registers a peer as connected. The `ingoing` parameter determines if the peer is being @@ -742,7 +771,12 @@ impl PeerManager { /// This is called by `connect_ingoing` and `connect_outgoing`. /// /// Informs if the peer was accepted in to the db or not. - fn connect_peer(&mut self, peer_id: &PeerId, connection: ConnectingType) -> bool { + fn inject_peer_connection( + &mut self, + peer_id: &PeerId, + connection: ConnectingType, + enr: Option, + ) -> bool { { let mut peerdb = self.network_globals.peers.write(); if peerdb.is_banned(&peer_id) { @@ -750,8 +784,6 @@ impl PeerManager { slog::crit!(self.log, "Connection has been allowed to a banned peer"; "peer_id" => %peer_id); } - let enr = self.discovery.enr_of_peer(peer_id); - match connection { ConnectingType::Dialing => { peerdb.dialing_peer(peer_id, enr); @@ -798,6 +830,8 @@ impl PeerManager { true } + /// This handles score transitions between states. It transitions peers states from + /// disconnected/banned/connected. fn handle_score_transitions( previous_state: ScoreState, peer_id: &PeerId, @@ -838,6 +872,7 @@ impl PeerManager { } } + /// Updates the state of banned peers. fn ban_and_unban_peers(&mut self, to_ban_peers: Vec, to_unban_peers: Vec) { // process banning peers for peer_id in to_ban_peers { @@ -907,7 +942,9 @@ impl PeerManager { }) .unwrap_or_default(); - self.discovery.ban_peer(&peer_id, banned_ip_addresses); + // Inform the Swarm to ban the peer + self.events + .push(PeerManagerEvent::Banned(*peer_id, banned_ip_addresses)); } /// Unbans a peer. @@ -923,49 +960,12 @@ impl PeerManager { .map(|info| info.seen_addresses().collect::>()) .unwrap_or_default(); - self.discovery.unban_peer(&peer_id, seen_ip_addresses); + // Inform the Swarm to unban the peer + self.events + .push(PeerManagerEvent::UnBanned(*peer_id, seen_ip_addresses)); Ok(()) } - /// Run discovery query for additional sync committee peers if we fall below `TARGET_PEERS`. - fn maintain_sync_committee_peers(&mut self) { - // Remove expired entries - self.sync_committee_subnets - .retain(|_, v| *v > Instant::now()); - - let subnets_to_discover: Vec = self - .sync_committee_subnets - .iter() - .filter_map(|(k, v)| { - if self - .network_globals - .peers - .read() - .good_peers_on_subnet(Subnet::SyncCommittee(*k)) - .count() - < TARGET_SUBNET_PEERS - { - Some(SubnetDiscovery { - subnet: Subnet::SyncCommittee(*k), - min_ttl: Some(*v), - }) - } else { - None - } - }) - .collect(); - - // request the subnet query from discovery - if !subnets_to_discover.is_empty() { - debug!( - self.log, - "Making subnet queries for maintaining sync committee peers"; - "subnets" => ?subnets_to_discover.iter().map(|s| s.subnet).collect::>() - ); - self.discovery.discover_subnet_peers(subnets_to_discover); - } - } - /// The Peer manager's heartbeat maintains the peer count and maintains peer reputations. /// /// It will request discovery queries if the peer count has not reached the desired number of @@ -978,20 +978,18 @@ impl PeerManager { let min_outbound_only_target = (self.target_peers as f32 * MIN_OUTBOUND_ONLY_FACTOR).ceil() as usize; - if peer_count < self.target_peers || outbound_only_peer_count < min_outbound_only_target { + if self.discovery_enabled + && (peer_count < self.target_peers + || outbound_only_peer_count < min_outbound_only_target) + { // If we need more peers, queue a discovery lookup. - if self.discovery.started { - debug!(self.log, "Starting a new peer discovery query"; "connected_peers" => peer_count, "target_peers" => self.target_peers); - self.discovery.discover_peers(); - } + debug!(self.log, "Starting a new peer discovery query"; "connected_peers" => peer_count, "target_peers" => self.target_peers); + self.events.push(PeerManagerEvent::DiscoverPeers); } // Updates peer's scores. self.update_peer_scores(); - // Maintain minimum count for sync committee peers. - self.maintain_sync_committee_peers(); - // Keep a list of peers we are disconnecting let mut disconnecting_peers = Vec::new(); @@ -1025,7 +1023,7 @@ impl PeerManager { let mut peer_db = self.network_globals.peers.write(); for peer_id in disconnecting_peers { - peer_db.notify_disconnecting(&peer_id); + peer_db.notify_disconnecting(peer_id, false); self.events.push(PeerManagerEvent::DisconnectPeer( peer_id, GoodbyeReason::TooManyPeers, @@ -1043,14 +1041,6 @@ impl Stream for PeerManager { self.heartbeat(); } - // handle any discovery events - while let Poll::Ready(event) = self.discovery.poll(cx) { - match event { - DiscoveryEvent::SocketUpdated(socket_addr) => self.socket_updated(socket_addr), - DiscoveryEvent::QueryResult(results) => self.peers_discovered(results), - } - } - // poll the timeouts for pings and status' loop { match self.inbound_ping_peers.poll_next_unpin(cx) { @@ -1175,7 +1165,7 @@ mod tests { vec![], &log, ); - PeerManager::new(&keypair, &config, Arc::new(globals), &log) + PeerManager::new(&config, Arc::new(globals), &log) .await .unwrap() } @@ -1192,11 +1182,19 @@ mod tests { let outbound_only_peer1 = PeerId::random(); let outbound_only_peer2 = PeerId::random(); - peer_manager.connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_outgoing(&outbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_outgoing(&outbound_only_peer2, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_outgoing( + &outbound_only_peer1, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + peer_manager.inject_connect_outgoing( + &outbound_only_peer2, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); // Set the outbound-only peers to have the lowest score. peer_manager @@ -1248,13 +1246,17 @@ mod tests { // Connect to 20 ingoing-only peers. for _i in 0..19 { let peer = PeerId::random(); - peer_manager.connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap(), None); } // Connect an outbound-only peer. // Give it the lowest score so that it is evaluated first in the disconnect list iterator. let outbound_only_peer = PeerId::random(); - peer_manager.connect_ingoing(&outbound_only_peer, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing( + &outbound_only_peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); peer_manager .network_globals .peers @@ -1280,12 +1282,20 @@ mod tests { let inbound_only_peer1 = PeerId::random(); let outbound_only_peer1 = PeerId::random(); - peer_manager.connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_outgoing(&peer0, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_outgoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None); // Connect to two peers that are on the threshold of being disconnected. - peer_manager.connect_ingoing(&inbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_outgoing(&outbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing( + &inbound_only_peer1, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + peer_manager.inject_connect_outgoing( + &outbound_only_peer1, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); peer_manager .network_globals .peers @@ -1335,12 +1345,20 @@ mod tests { let inbound_only_peer1 = PeerId::random(); let outbound_only_peer1 = PeerId::random(); - peer_manager.connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap(), None); // Connect to two peers that are on the threshold of being disconnected. - peer_manager.connect_ingoing(&inbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_outgoing(&outbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing( + &inbound_only_peer1, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + peer_manager.inject_connect_outgoing( + &outbound_only_peer1, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); peer_manager .network_globals .peers @@ -1387,12 +1405,20 @@ mod tests { let inbound_only_peer1 = PeerId::random(); let outbound_only_peer1 = PeerId::random(); - peer_manager.connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap()); - peer_manager.connect_outgoing(&outbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_outgoing( + &outbound_only_peer1, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); // Have one peer be on the verge of disconnection. - peer_manager.connect_ingoing(&inbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap()); + peer_manager.inject_connect_ingoing( + &inbound_only_peer1, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); peer_manager .network_globals .peers diff --git a/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs b/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs index 211dad99949..0f0ad0df0f0 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/peer_info.rs @@ -207,25 +207,16 @@ impl PeerInfo { // Setters /// Modifies the status to Disconnected and sets the last seen instant to now. Returns None if - /// no changes were made. Returns Some(bool) where the bool represents if peer became banned or - /// simply just disconnected. + /// no changes were made. Returns Some(bool) where the bool represents if peer is to now be + /// baned pub fn notify_disconnect(&mut self) -> Option { match self.connection_status { Banned { .. } | Disconnected { .. } => None, Disconnecting { to_ban } => { - // If we are disconnecting this peer in the process of banning, we now ban the - // peer. - if to_ban { - self.connection_status = Banned { - since: Instant::now(), - }; - Some(true) - } else { - self.connection_status = Disconnected { - since: Instant::now(), - }; - Some(false) - } + self.connection_status = Disconnected { + since: Instant::now(), + }; + Some(to_ban) } Connected { .. } | Dialing { .. } | Unknown => { self.connection_status = Disconnected { @@ -236,11 +227,8 @@ impl PeerInfo { } } - /// Notify the we are currently disconnecting this peer, after which the peer will be - /// considered banned. - // This intermediate state is required to inform the network behaviours that the sub-protocols - // are aware this peer exists and it is in the process of being banned. Compared to nodes that - // try to connect to us and are already banned (sub protocols do not know of these peers). + /// Notify the we are currently disconnecting this peer. Optionally ban the peer after the + /// disconnect. pub fn disconnecting(&mut self, to_ban: bool) { self.connection_status = Disconnecting { to_ban } } diff --git a/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs b/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs index 95c237521e7..19089fe2dcd 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs @@ -456,29 +456,33 @@ impl PeerDB { self.connect(peer_id, multiaddr, enr, ConnectionDirection::Outgoing) } - /// Sets the peer as disconnected. A banned peer remains banned - pub fn notify_disconnect(&mut self, peer_id: &PeerId) { + /// Sets the peer as disconnected. A banned peer remains banned. If the node has become banned, + /// this returns true, otherwise this is false. + pub fn inject_disconnect(&mut self, peer_id: &PeerId) -> bool { // Note that it could be the case we prevent new nodes from joining. In this instance, // we don't bother tracking the new node. if let Some(info) = self.peers.get_mut(peer_id) { - if let Some(became_banned) = info.notify_disconnect() { - if became_banned { - self.banned_peers_count - .add_banned_peer(info.seen_addresses()); - } else { - self.disconnected_peers += 1; - } + if !matches!( + info.connection_status(), + PeerConnectionStatus::Disconnected { .. } | PeerConnectionStatus::Banned { .. } + ) { + self.disconnected_peers += 1; } + let result = info.notify_disconnect().unwrap_or(false); self.shrink_to_fit(); + result + } else { + false } } - /// Notifies the peer manager that the peer is undergoing a normal disconnect (without banning - /// afterwards. - pub fn notify_disconnecting(&mut self, peer_id: &PeerId) { - if let Some(info) = self.peers.get_mut(peer_id) { - info.disconnecting(false); - } + /// Notifies the peer manager that the peer is undergoing a normal disconnect. Optionally tag + /// the peer to be banned after the disconnect. + pub fn notify_disconnecting(&mut self, peer_id: PeerId, to_ban_afterwards: bool) { + self.peers + .entry(peer_id) + .or_default() + .disconnecting(to_ban_afterwards); } /// Marks a peer to be disconnected and then banned. @@ -508,15 +512,17 @@ impl PeerDB { PeerConnectionStatus::Disconnected { .. } => { // It is possible to ban a peer that has a disconnected score, if there are many // events that score it poorly and are processed after it has disconnected. - debug!(log_ref, "Banning a disconnected peer"; "peer_id" => %peer_id); self.disconnected_peers = self.disconnected_peers.saturating_sub(1); info.ban(); self.banned_peers_count .add_banned_peer(info.seen_addresses()); + self.shrink_to_fit(); false } PeerConnectionStatus::Disconnecting { .. } => { - warn!(log_ref, "Banning peer that is currently disconnecting"; "peer_id" => %peer_id); + // NOTE: This can occur due a rapid downscore of a peer. It goes through the + // disconnection phase and straight into banning in a short time-frame. + debug!(log_ref, "Banning peer that is currently disconnecting"; "peer_id" => %peer_id); info.disconnecting(true); false } @@ -535,6 +541,7 @@ impl PeerDB { self.banned_peers_count .add_banned_peer(info.seen_addresses()); info.ban(); + self.shrink_to_fit(); false } } @@ -729,7 +736,7 @@ mod tests { assert_eq!(pdb.disconnected_peers, 0); for p in pdb.connected_peer_ids().cloned().collect::>() { - pdb.notify_disconnect(&p); + pdb.inject_disconnect(&p); } assert_eq!(pdb.disconnected_peers, MAX_DC_PEERS); @@ -747,7 +754,8 @@ mod tests { for p in pdb.connected_peer_ids().cloned().collect::>() { pdb.disconnect_and_ban(&p); - pdb.notify_disconnect(&p); + pdb.inject_disconnect(&p); + pdb.disconnect_and_ban(&p); } assert_eq!(pdb.banned_peers_count.banned_peers(), MAX_BANNED_PEERS); @@ -807,23 +815,24 @@ mod tests { pdb.connect_ingoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap(), None); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); pdb.connect_outgoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap(), None); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); pdb.disconnect_and_ban(&random_peer); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); + pdb.disconnect_and_ban(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); } @@ -838,6 +847,10 @@ mod tests { let random_peer1 = PeerId::random(); let random_peer2 = PeerId::random(); let random_peer3 = PeerId::random(); + println!("{}", random_peer); + println!("{}", random_peer1); + println!("{}", random_peer2); + println!("{}", random_peer3); pdb.connect_ingoing(&random_peer, multiaddr.clone(), None); pdb.connect_ingoing(&random_peer1, multiaddr.clone(), None); @@ -849,10 +862,17 @@ mod tests { pdb.banned_peers().count() ); + println!("1:{}", pdb.disconnected_peers); + pdb.connect_ingoing(&random_peer, multiaddr.clone(), None); - pdb.notify_disconnect(&random_peer1); + pdb.inject_disconnect(&random_peer1); + println!("2:{}", pdb.disconnected_peers); + pdb.disconnect_and_ban(&random_peer2); + println!("3:{}", pdb.disconnected_peers); + pdb.inject_disconnect(&random_peer2); + println!("4:{}", pdb.disconnected_peers); pdb.disconnect_and_ban(&random_peer2); - pdb.notify_disconnect(&random_peer2); + println!("5:{}", pdb.disconnected_peers); pdb.connect_ingoing(&random_peer3, multiaddr.clone(), None); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( @@ -860,7 +880,16 @@ mod tests { pdb.banned_peers().count() ); pdb.disconnect_and_ban(&random_peer1); - pdb.notify_disconnect(&random_peer1); + println!("6:{}", pdb.disconnected_peers); + pdb.inject_disconnect(&random_peer1); + println!("7:{}", pdb.disconnected_peers); + pdb.disconnect_and_ban(&random_peer1); + println!("8:{}", pdb.disconnected_peers); + println!( + "{}, {:?}", + pdb.disconnected_peers, + pdb.disconnected_peers().collect::>() + ); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( pdb.banned_peers_count.banned_peers(), @@ -874,7 +903,8 @@ mod tests { pdb.banned_peers().count() ); pdb.disconnect_and_ban(&random_peer3); - pdb.notify_disconnect(&random_peer3); + pdb.inject_disconnect(&random_peer3); + pdb.disconnect_and_ban(&random_peer3); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( pdb.banned_peers_count.banned_peers(), @@ -882,32 +912,34 @@ mod tests { ); pdb.disconnect_and_ban(&random_peer3); - pdb.notify_disconnect(&random_peer3); + pdb.inject_disconnect(&random_peer3); + pdb.disconnect_and_ban(&random_peer3); pdb.connect_ingoing(&random_peer1, multiaddr.clone(), None); - pdb.notify_disconnect(&random_peer2); + pdb.inject_disconnect(&random_peer2); + pdb.disconnect_and_ban(&random_peer3); + pdb.inject_disconnect(&random_peer3); pdb.disconnect_and_ban(&random_peer3); - pdb.notify_disconnect(&random_peer3); pdb.connect_ingoing(&random_peer, multiaddr, None); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( pdb.banned_peers_count.banned_peers(), pdb.banned_peers().count() ); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( pdb.banned_peers_count.banned_peers(), pdb.banned_peers().count() ); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!( pdb.banned_peers_count.banned_peers(), pdb.banned_peers().count() ); pdb.disconnect_and_ban(&random_peer); - pdb.notify_disconnect(&random_peer); + pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); } @@ -953,7 +985,8 @@ mod tests { for p in &peers[..BANNED_PEERS_PER_IP_THRESHOLD + 1] { pdb.disconnect_and_ban(p); - pdb.notify_disconnect(p); + pdb.inject_disconnect(p); + pdb.disconnect_and_ban(p); } //check that ip1 and ip2 are banned but ip3-5 not @@ -965,7 +998,8 @@ mod tests { //ban also the last peer in peers pdb.disconnect_and_ban(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); - pdb.notify_disconnect(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); + pdb.inject_disconnect(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); + pdb.disconnect_and_ban(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); //check that ip1-ip4 are banned but ip5 not assert!(pdb.is_banned(&p1)); @@ -1015,7 +1049,8 @@ mod tests { // ban all peers for p in &peers { pdb.disconnect_and_ban(p); - pdb.notify_disconnect(p); + pdb.inject_disconnect(p); + pdb.disconnect_and_ban(p); } // check ip is banned @@ -1036,7 +1071,8 @@ mod tests { for p in &peers { pdb.connect_ingoing(&p, socker_addr.clone(), None); pdb.disconnect_and_ban(p); - pdb.notify_disconnect(p); + pdb.inject_disconnect(p); + pdb.disconnect_and_ban(p); } // both IP's are now banned @@ -1052,7 +1088,8 @@ mod tests { // reban every peer except one for p in &peers[1..] { pdb.disconnect_and_ban(p); - pdb.notify_disconnect(p); + pdb.inject_disconnect(p); + pdb.disconnect_and_ban(p); } // nothing is banned @@ -1061,7 +1098,8 @@ mod tests { //reban last peer pdb.disconnect_and_ban(&peers[0]); - pdb.notify_disconnect(&peers[0]); + pdb.inject_disconnect(&peers[0]); + pdb.disconnect_and_ban(&peers[0]); //Ip's are banned again assert!(pdb.is_banned(&p1)); diff --git a/beacon_node/eth2_libp2p/src/peer_manager/score.rs b/beacon_node/eth2_libp2p/src/peer_manager/score.rs index 02479bef067..8b20192296d 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/score.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/score.rs @@ -5,7 +5,7 @@ //! As the logic develops this documentation will advance. //! //! The scoring algorithms are currently experimental. -use crate::behaviour::GOSSIPSUB_GREYLIST_THRESHOLD; +use crate::behaviour::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD; use serde::Serialize; use std::time::Instant; use strum::AsRefStr; @@ -31,7 +31,7 @@ const MIN_SCORE: f64 = -100.0; /// The halflife of a peer's score. I.e the number of seconds it takes for the score to decay to half its value. const SCORE_HALFLIFE: f64 = 600.0; /// The number of seconds we ban a peer for before their score begins to decay. -const BANNED_BEFORE_DECAY: Duration = Duration::from_secs(1800); +const BANNED_BEFORE_DECAY: Duration = Duration::from_secs(12 * 3600); // 12 hours /// We weight negative gossipsub scores in such a way that they never result in a disconnect by /// themselves. This "solves" the problem of non-decaying gossipsub scores for disconnected peers. diff --git a/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs b/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs index 2f34c80d438..f0b6accd2d9 100644 --- a/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs @@ -415,16 +415,16 @@ fn handle_length( bytes: &mut BytesMut, ) -> Result, RPCError> { if let Some(length) = len { - return Ok(Some(*length)); + Ok(Some(*length)) } else { // Decode the length of the uncompressed bytes from an unsigned varint // Note: length-prefix of > 10 bytes(uint64) would be a decoding error match uvi_codec.decode(bytes).map_err(RPCError::from)? { Some(length) => { *len = Some(length as usize); - return Ok(Some(length)); + Ok(Some(length)) } - None => return Ok(None), // need more bytes to decode length + None => Ok(None), // need more bytes to decode length } } } diff --git a/beacon_node/eth2_libp2p/src/rpc/handler.rs b/beacon_node/eth2_libp2p/src/rpc/handler.rs index 4c6f7896e71..506093ee6cb 100644 --- a/beacon_node/eth2_libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2_libp2p/src/rpc/handler.rs @@ -1,9 +1,11 @@ #![allow(clippy::type_complexity)] #![allow(clippy::cognitive_complexity)] -use super::methods::{RPCCodedResponse, RPCResponseErrorCode, RequestId, ResponseTermination}; +use super::methods::{ + GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, RequestId, ResponseTermination, +}; use super::outbound::OutboundRequestContainer; -use super::protocol::{Protocol, RPCError, RPCProtocol}; +use super::protocol::{InboundRequest, Protocol, RPCError, RPCProtocol}; use super::{RPCReceived, RPCSend}; use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; use crate::rpc::protocol::InboundFramed; @@ -228,13 +230,14 @@ where } } - /// Initiates the handler's shutdown process, sending an optional last message to the peer. - pub fn shutdown(&mut self, final_msg: Option<(RequestId, OutboundRequest)>) { + /// Initiates the handler's shutdown process, sending an optional Goodbye message to the + /// peer. + fn shutdown(&mut self, goodbye_reason: Option) { if matches!(self.state, HandlerState::Active) { if !self.dial_queue.is_empty() { debug!(self.log, "Starting handler shutdown"; "unsent_queued_requests" => self.dial_queue.len()); } - // we now drive to completion communications already dialed/established + // We now drive to completion communications already dialed/established while let Some((id, req)) = self.dial_queue.pop() { self.events_out.push(Err(HandlerErr::Outbound { error: RPCError::HandlerRejected, @@ -243,9 +246,10 @@ where })); } - // Queue our final message, if any - if let Some((id, req)) = final_msg { - self.dial_queue.push((id, req)); + // Queue our goodbye message. + if let Some(reason) = goodbye_reason { + self.dial_queue + .push((RequestId::Router, OutboundRequest::Goodbye(reason))); } self.state = HandlerState::ShuttingDown(Box::new(sleep_until( @@ -352,6 +356,11 @@ where ); } + // If we received a goodbye, shutdown the connection. + if let InboundRequest::Goodbye(_) = req { + self.shutdown(None); + } + self.events_out.push(Ok(RPCReceived::Request( self.current_inbound_substream_id, req, @@ -419,6 +428,7 @@ where match rpc_event { RPCSend::Request(id, req) => self.send_request(id, req), RPCSend::Response(inbound_id, response) => self.send_response(inbound_id, response), + RPCSend::Shutdown(reason) => self.shutdown(Some(reason)), } } @@ -519,6 +529,9 @@ where if delay.is_elapsed() { self.state = HandlerState::Deactivated; debug!(self.log, "Handler deactivated"); + return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::InternalError( + "Shutdown timeout", + ))); } } @@ -878,6 +891,19 @@ where .map_info(|()| (id, req)), }); } + + // Check if we have completed sending a goodbye, disconnect. + if let HandlerState::ShuttingDown(_) = self.state { + if self.dial_queue.is_empty() + && self.outbound_substreams.is_empty() + && self.inbound_substreams.is_empty() + && self.events_out.is_empty() + && self.dial_negotiated == 0 + { + return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::Disconnected)); + } + } + Poll::Pending } } diff --git a/beacon_node/eth2_libp2p/src/rpc/methods.rs b/beacon_node/eth2_libp2p/src/rpc/methods.rs index 9c3bd809459..b2be196474d 100644 --- a/beacon_node/eth2_libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2_libp2p/src/rpc/methods.rs @@ -160,9 +160,9 @@ impl From for GoodbyeReason { } } -impl Into for GoodbyeReason { - fn into(self) -> u64 { - self as u64 +impl From for u64 { + fn from(reason: GoodbyeReason) -> u64 { + reason as u64 } } diff --git a/beacon_node/eth2_libp2p/src/rpc/mod.rs b/beacon_node/eth2_libp2p/src/rpc/mod.rs index 8f79e4fa167..96fa23506cd 100644 --- a/beacon_node/eth2_libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2_libp2p/src/rpc/mod.rs @@ -53,6 +53,8 @@ pub enum RPCSend { /// peer. The second parameter is a single chunk of a response. These go over *inbound* /// connections. Response(SubstreamId, RPCCodedResponse), + /// Lighthouse has requested to terminate the connection with a goodbye message. + Shutdown(GoodbyeReason), } /// RPC events received from outside Lighthouse. @@ -78,6 +80,7 @@ impl std::fmt::Display for RPCSend { match self { RPCSend::Request(id, req) => write!(f, "RPC Request(id: {:?}, {})", id, req), RPCSend::Response(id, res) => write!(f, "RPC Response(id: {:?}, {})", id, res), + RPCSend::Shutdown(reason) => write!(f, "Sending Goodbye: {}", reason), } } } @@ -117,11 +120,7 @@ impl RPC { methods::MAX_REQUEST_BLOCKS, Duration::from_secs(10), ) - .n_every( - Protocol::BlocksByRoot, - methods::MAX_REQUEST_BLOCKS, - Duration::from_secs(10), - ) + .n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10)) .build() .expect("Configuration parameters are valid"); RPC { @@ -163,6 +162,16 @@ impl RPC { event: RPCSend::Request(request_id, event), }); } + + /// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This + /// gracefully terminates the RPC behaviour with a goodbye message. + pub fn shutdown(&mut self, peer_id: PeerId, reason: GoodbyeReason) { + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: RPCSend::Shutdown(reason), + }); + } } impl NetworkBehaviour for RPC diff --git a/beacon_node/eth2_libp2p/src/rpc/protocol.rs b/beacon_node/eth2_libp2p/src/rpc/protocol.rs index 6aa804c493b..b85e48d3134 100644 --- a/beacon_node/eth2_libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2_libp2p/src/rpc/protocol.rs @@ -504,6 +504,8 @@ pub enum RPCError { NegotiationTimeout, /// Handler rejected this request. HandlerRejected, + /// We have intentionally disconnected. + Disconnected, } impl From for RPCError { @@ -542,6 +544,7 @@ impl std::fmt::Display for RPCError { RPCError::InternalError(ref err) => write!(f, "Internal error: {}", err), RPCError::NegotiationTimeout => write!(f, "Negotiation timeout"), RPCError::HandlerRejected => write!(f, "Handler rejected the request"), + RPCError::Disconnected => write!(f, "Gracefully Disconnected"), } } } @@ -560,6 +563,7 @@ impl std::error::Error for RPCError { RPCError::ErrorResponse(_, _) => None, RPCError::NegotiationTimeout => None, RPCError::HandlerRejected => None, + RPCError::Disconnected => None, } } } diff --git a/beacon_node/eth2_libp2p/src/service.rs b/beacon_node/eth2_libp2p/src/service.rs index a35ec8f5960..a329848d8f4 100644 --- a/beacon_node/eth2_libp2p/src/service.rs +++ b/beacon_node/eth2_libp2p/src/service.rs @@ -29,6 +29,8 @@ use std::sync::Arc; use std::time::Duration; use types::{ChainSpec, EnrForkId, EthSpec, ForkContext}; +use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR}; + pub const NETWORK_KEY_FILENAME: &str = "key"; /// The maximum simultaneous libp2p connections per peer. const MAX_CONNECTIONS_PER_PEER: u32 = 1; @@ -133,8 +135,17 @@ impl Service { let limits = ConnectionLimits::default() .with_max_pending_incoming(Some(5)) .with_max_pending_outgoing(Some(16)) - .with_max_established_incoming(Some((config.target_peers as f64 * 1.2) as u32)) - .with_max_established_outgoing(Some((config.target_peers as f64 * 1.2) as u32)) + .with_max_established_incoming(Some( + (config.target_peers as f32 + * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) + as u32, + )) + .with_max_established_outgoing(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)) as u32, + )) + .with_max_established_total(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)) as u32, + )) .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); ( @@ -225,7 +236,7 @@ impl Service { let mut subscribed_topics: Vec = vec![]; for topic_kind in &config.topics { - if swarm.subscribe_kind(topic_kind.clone()) { + if swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) { subscribed_topics.push(topic_kind.clone()); } else { warn!(log, "Could not subscribe to topic"; "topic" => %topic_kind); @@ -248,7 +259,9 @@ impl Service { /// Sends a request to a peer, with a given Id. pub fn send_request(&mut self, peer_id: PeerId, request_id: RequestId, request: Request) { - self.swarm.send_request(peer_id, request_id, request); + self.swarm + .behaviour_mut() + .send_request(peer_id, request_id, request); } /// Informs the peer that their request failed. @@ -259,42 +272,80 @@ impl Service { error: RPCResponseErrorCode, reason: String, ) { - self.swarm._send_error_reponse(peer_id, id, error, reason); + self.swarm + .behaviour_mut() + ._send_error_reponse(peer_id, id, error, reason); } /// Report a peer's action. pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction, source: ReportSource) { - self.swarm.report_peer(peer_id, action, source); + self.swarm + .behaviour_mut() + .peer_manager_mut() + .report_peer(peer_id, action, source); } /// Disconnect and ban a peer, providing a reason. pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) { - self.swarm.goodbye_peer(peer_id, reason, source); + self.swarm + .behaviour_mut() + .goodbye_peer(peer_id, reason, source); } /// Sends a response to a peer's request. pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response) { - self.swarm.send_successful_response(peer_id, id, response); + self.swarm + .behaviour_mut() + .send_successful_response(peer_id, id, response); } pub async fn next_event(&mut self) -> Libp2pEvent { loop { - match self.swarm.next_event().await { - SwarmEvent::Behaviour(behaviour) => return Libp2pEvent::Behaviour(behaviour), - SwarmEvent::ConnectionEstablished { .. } => { - // A connection could be established with a banned peer. This is - // handled inside the behaviour. + match self.swarm.select_next_some().await { + SwarmEvent::Behaviour(behaviour) => { + // Handle banning here + match &behaviour { + BehaviourEvent::PeerBanned(peer_id) => { + self.swarm.ban_peer_id(*peer_id); + } + BehaviourEvent::PeerUnbanned(peer_id) => { + self.swarm.unban_peer_id(*peer_id); + } + _ => {} + } + return Libp2pEvent::Behaviour(behaviour); + } + SwarmEvent::ConnectionEstablished { + peer_id, + endpoint, + num_established, + } => { + // Inform the peer manager. + // We require the ENR to inject into the peer db, if it exists. + let enr = self + .swarm + .behaviour_mut() + .discovery_mut() + .enr_of_peer(&peer_id); + self.swarm + .behaviour_mut() + .peer_manager_mut() + .inject_connection_established(peer_id, endpoint, num_established, enr); } SwarmEvent::ConnectionClosed { peer_id, - cause, - endpoint: _, + cause: _, + endpoint, num_established, } => { - trace!(self.log, "Connection closed"; "peer_id" => %peer_id, "cause" => ?cause, "connections" => num_established); + // Inform the peer manager. + self.swarm + .behaviour_mut() + .peer_manager_mut() + .inject_connection_closed(peer_id, endpoint, num_established); } - SwarmEvent::NewListenAddr(multiaddr) => { - return Libp2pEvent::NewListenAddr(multiaddr) + SwarmEvent::NewListenAddr { address, .. } => { + return Libp2pEvent::NewListenAddr(address) } SwarmEvent::IncomingConnection { local_addr, @@ -307,10 +358,10 @@ impl Service { send_back_addr, error, } => { - debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error) + debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); } - SwarmEvent::BannedPeer { .. } => { - // We do not ban peers at the swarm layer, so this should never occur. + SwarmEvent::BannedPeer { peer_id, .. } => { + debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); } SwarmEvent::UnreachableAddr { peer_id, @@ -319,20 +370,26 @@ impl Service { attempts_remaining, } => { debug!(self.log, "Failed to dial address"; "peer_id" => %peer_id, "address" => %address, "error" => %error, "attempts_remaining" => attempts_remaining); + self.swarm + .behaviour_mut() + .peer_manager_mut() + .inject_dial_failure(&peer_id); } SwarmEvent::UnknownPeerUnreachableAddr { address, error } => { debug!(self.log, "Peer not known at dialed address"; "address" => %address, "error" => %error); } - SwarmEvent::ExpiredListenAddr(multiaddr) => { - debug!(self.log, "Listen address expired"; "multiaddr" => %multiaddr) + SwarmEvent::ExpiredListenAddr { address, .. } => { + debug!(self.log, "Listen address expired"; "address" => %address) } - SwarmEvent::ListenerClosed { addresses, reason } => { + SwarmEvent::ListenerClosed { + addresses, reason, .. + } => { crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); if Swarm::listeners(&self.swarm).count() == 0 { return Libp2pEvent::ZeroListeners; } } - SwarmEvent::ListenerError { error } => { + SwarmEvent::ListenerError { error, .. } => { // this is non fatal, but we still check warn!(self.log, "Listener error"; "error" => ?error); if Swarm::listeners(&self.swarm).count() == 0 { @@ -340,7 +397,16 @@ impl Service { } } SwarmEvent::Dialing(peer_id) => { - debug!(self.log, "Dialing peer"; "peer_id" => %peer_id); + // We require the ENR to inject into the peer db, if it exists. + let enr = self + .swarm + .behaviour_mut() + .discovery_mut() + .enr_of_peer(&peer_id); + self.swarm + .behaviour_mut() + .peer_manager_mut() + .inject_dialing(&peer_id, enr); } } } @@ -354,8 +420,8 @@ type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; fn build_transport( local_private_key: Keypair, ) -> std::io::Result<(BoxedTransport, Arc)> { - let transport = libp2p::tcp::TokioTcpConfig::new().nodelay(true); - let transport = libp2p::dns::DnsConfig::new(transport)?; + let tcp = libp2p::tcp::TokioTcpConfig::new().nodelay(true); + let transport = libp2p::dns::TokioDnsConfig::system(tcp)?; #[cfg(feature = "libp2p-websocket")] let transport = { let trans_clone = transport.clone(); @@ -369,13 +435,17 @@ fn build_transport( mplex_config.set_max_buffer_size(256); mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block); + // yamux config + let mut yamux_config = libp2p::yamux::YamuxConfig::default(); + yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); + // Authentication Ok(( transport .upgrade(core::upgrade::Version::V1) .authenticate(generate_noise_config(&local_private_key)) .multiplex(core::upgrade::SelectUpgrade::new( - libp2p::yamux::YamuxConfig::default(), + yamux_config, mplex_config, )) .timeout(Duration::from_secs(10)) diff --git a/beacon_node/eth2_libp2p/src/types/topics.rs b/beacon_node/eth2_libp2p/src/types/topics.rs index fa67dc20bb3..bcd4a9c4911 100644 --- a/beacon_node/eth2_libp2p/src/types/topics.rs +++ b/beacon_node/eth2_libp2p/src/types/topics.rs @@ -161,19 +161,19 @@ impl GossipTopic { } } -impl Into for GossipTopic { - fn into(self) -> Topic { - Topic::new(self) +impl From for Topic { + fn from(topic: GossipTopic) -> Topic { + Topic::new(topic) } } -impl Into for GossipTopic { - fn into(self) -> String { - let encoding = match self.encoding { +impl From for String { + fn from(topic: GossipTopic) -> String { + let encoding = match topic.encoding { GossipEncoding::SSZSnappy => SSZ_SNAPPY_ENCODING_POSTFIX, }; - let kind = match self.kind { + let kind = match topic.kind { GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), @@ -188,7 +188,7 @@ impl Into for GossipTopic { format!( "/{}/{}/{}/{}", TOPIC_PREFIX, - hex::encode(self.fork_digest), + hex::encode(topic.fork_digest), kind, encoding ) diff --git a/beacon_node/eth2_libp2p/tests/common/mod.rs b/beacon_node/eth2_libp2p/tests/common/mod.rs index ad1dba161aa..cd8394fe811 100644 --- a/beacon_node/eth2_libp2p/tests/common/mod.rs +++ b/beacon_node/eth2_libp2p/tests/common/mod.rs @@ -134,7 +134,7 @@ pub async fn build_libp2p_instance( #[allow(dead_code)] pub fn get_enr(node: &LibP2PService) -> Enr { - node.swarm.local_enr() + node.swarm.behaviour().local_enr() } // Returns `n` libp2p peers in fully connected topology. @@ -179,7 +179,7 @@ pub async fn build_node_pair( let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log).await; let mut receiver = build_libp2p_instance(rt, vec![], receiver_log).await; - let receiver_multiaddr = receiver.swarm.local_enr().multiaddr()[1].clone(); + let receiver_multiaddr = receiver.swarm.behaviour_mut().local_enr().multiaddr()[1].clone(); // let the two nodes set up listeners let sender_fut = async { diff --git a/beacon_node/eth2_libp2p/tests/rpc_tests.rs b/beacon_node/eth2_libp2p/tests/rpc_tests.rs index 0a1f7aed04f..9d1faf748cf 100644 --- a/beacon_node/eth2_libp2p/tests/rpc_tests.rs +++ b/beacon_node/eth2_libp2p/tests/rpc_tests.rs @@ -54,10 +54,10 @@ fn test_status_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.send_request( + sender.swarm.behaviour_mut().send_request( peer_id, RequestId::Sync(10), rpc_request.clone(), @@ -91,7 +91,7 @@ fn test_status_rpc() { if request == rpc_request { // send the response debug!(log, "Receiver Received"); - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( peer_id, id, rpc_response.clone(), @@ -150,10 +150,10 @@ fn test_blocks_by_range_chunked_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.send_request( + sender.swarm.behaviour_mut().send_request( peer_id, RequestId::Sync(10), rpc_request.clone(), @@ -198,14 +198,14 @@ fn test_blocks_by_range_chunked_rpc() { // send the response warn!(log, "Receiver got request"); for _ in 1..=messages_to_send { - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( peer_id, id, rpc_response.clone(), ); } // send the stream termination - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( peer_id, id, Response::BlocksByRange(None), @@ -264,10 +264,10 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.send_request( + sender.swarm.behaviour_mut().send_request( peer_id, RequestId::Sync(10), rpc_request.clone(), @@ -336,7 +336,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { if message_info.is_some() { messages_sent += 1; let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( *peer_id, *stream_id, rpc_response.clone(), @@ -396,10 +396,10 @@ fn test_blocks_by_range_single_empty_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.send_request( + sender.swarm.behaviour_mut().send_request( peer_id, RequestId::Sync(10), rpc_request.clone(), @@ -442,14 +442,14 @@ fn test_blocks_by_range_single_empty_rpc() { warn!(log, "Receiver got request"); for _ in 1..=messages_to_send { - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( peer_id, id, rpc_response.clone(), ); } // send the stream termination - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( peer_id, id, Response::BlocksByRange(None), @@ -515,10 +515,10 @@ fn test_blocks_by_root_chunked_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.send_request( + sender.swarm.behaviour_mut().send_request( peer_id, RequestId::Sync(10), rpc_request.clone(), @@ -572,13 +572,15 @@ fn test_blocks_by_root_chunked_rpc() { } else { rpc_response_altair.clone() }; - receiver - .swarm - .send_successful_response(peer_id, id, rpc_response); + receiver.swarm.behaviour_mut().send_successful_response( + peer_id, + id, + rpc_response, + ); debug!(log, "Sending message"); } // send the stream termination - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( peer_id, id, Response::BlocksByRange(None), @@ -645,10 +647,10 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.send_request( + sender.swarm.behaviour_mut().send_request( peer_id, RequestId::Sync(10), rpc_request.clone(), @@ -717,7 +719,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { if message_info.is_some() { messages_sent += 1; let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.swarm.send_successful_response( + receiver.swarm.behaviour_mut().send_successful_response( *peer_id, *stream_id, rpc_response.clone(), @@ -760,10 +762,10 @@ fn test_goodbye_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => { + Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a goodbye and disconnect debug!(log, "Sending RPC"); - sender.swarm.goodbye_peer( + sender.swarm.behaviour_mut().goodbye_peer( &peer_id, GoodbyeReason::IrrelevantNetwork, ReportSource::SyncService, diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 04706f72d97..bf19189f97d 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -20,7 +20,7 @@ merkle_proof = { path = "../../consensus/merkle_proof" } eth2_ssz = "0.1.2" eth2_hashing = "0.1.0" tree_hash = "0.1.1" -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } parking_lot = "0.11.0" slog = "2.5.2" exit-future = "0.2.0" diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index ddea773f7ac..0f288cfea0c 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] warp = { git = "https://github.com/paulhauner/warp ", branch = "cors-wildcard" } serde = { version = "1.0.116", features = ["derive"] } -tokio = { version = "1.1.0", features = ["macros","sync"] } +tokio = { version = "1.7.1", features = ["macros","sync"] } tokio-stream = { version = "0.1.3", features = ["sync"] } tokio-util = "0.6.3" parking_lot = "0.11.0" @@ -34,5 +34,4 @@ futures = "0.3.8" store = { path = "../store" } environment = { path = "../../lighthouse/environment" } tree_hash = "0.1.1" -discv5 = { version = "0.1.0-beta.5", features = ["libp2p"] } sensitive_url = { path = "../../common/sensitive_url" } diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 0b97c371925..80f325cddba 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -5,11 +5,11 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, BeaconChain, StateSkipConfig, WhenSlotSkipped, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; -use discv5::enr::{CombinedKey, EnrBuilder}; use environment::null_logger; use eth2::Error; use eth2::StatusCode; use eth2::{types::*, BeaconNodeHttpClient, Timeouts}; +use eth2_libp2p::discv5::enr::{CombinedKey, EnrBuilder}; use eth2_libp2p::{ rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index e1746781bf1..aabf96b8273 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -23,7 +23,7 @@ warp_utils = { path = "../../common/warp_utils" } malloc_utils = { path = "../../common/malloc_utils" } [dev-dependencies] -tokio = { version = "1.1.0", features = ["sync"] } +tokio = { version = "1.7.1", features = ["sync"] } reqwest = { version = "0.11.0", features = ["json"] } environment = { path = "../../lighthouse/environment" } types = { path = "../../consensus/types" } diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 10808c58eb3..63990a54c88 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -15,7 +15,6 @@ slog-term = "2.6.0" slog-async = "2.5.0" logging = { path = "../../common/logging" } environment = { path = "../../lighthouse/environment" } -discv5 = { version = "0.1.0-beta.3" } [dependencies] beacon_chain = { path = "../beacon_chain" } @@ -32,7 +31,7 @@ eth2_ssz_types = { path = "../../consensus/ssz_types" } tree_hash = "0.1.1" futures = "0.3.7" error-chain = "0.12.4" -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } tokio-stream = "0.1.3" parking_lot = "0.11.0" smallvec = "1.6.1" diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index 8bb758fb0de..30cc1724276 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -8,9 +8,9 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; -use discv5::enr::{CombinedKey, EnrBuilder}; use environment::{null_logger, Environment, EnvironmentBuilder}; use eth2_libp2p::{ + discv5::enr::{CombinedKey, EnrBuilder}, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}, MessageId, NetworkGlobals, PeerId, diff --git a/beacon_node/network/src/beacon_processor/worker/mod.rs b/beacon_node/network/src/beacon_processor/worker/mod.rs index a0c6d200bc7..adc1ad66903 100644 --- a/beacon_node/network/src/beacon_processor/worker/mod.rs +++ b/beacon_node/network/src/beacon_processor/worker/mod.rs @@ -1,7 +1,7 @@ use super::work_reprocessing_queue::ReprocessQueueMessage; use crate::{service::NetworkMessage, sync::SyncMessage}; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use slog::{error, Logger}; +use slog::{debug, Logger}; use std::sync::Arc; use tokio::sync::mpsc; @@ -27,7 +27,7 @@ impl Worker { /// Creates a log if there is an internal error. fn send_sync_committee_message(&self, message: SyncMessage) { self.sync_tx.send(message).unwrap_or_else(|e| { - error!(self.log, "Could not send message to the sync service"; + debug!(self.log, "Could not send message to the sync service, likely shutdown"; "error" => %e) }); } @@ -37,7 +37,7 @@ impl Worker { /// Creates a log if there is an internal error. fn send_network_message(&self, message: NetworkMessage) { self.network_tx.send(message).unwrap_or_else(|e| { - error!(self.log, "Could not send message to the network service"; + debug!(self.log, "Could not send message to the network service, likely shutdown"; "error" => %e) }); } diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs index 59b0bd9ab24..881be15a7f8 100644 --- a/beacon_node/network/src/persisted_dht.rs +++ b/beacon_node/network/src/persisted_dht.rs @@ -27,6 +27,13 @@ pub fn persist_dht, Cold: ItemStore>( store.put_item(&DHT_DB_KEY, &PersistedDht { enrs }) } +/// Attempts to clear any DHT entries. +pub fn clear_dht, Cold: ItemStore>( + store: Arc>, +) -> Result<(), store::Error> { + store.hot_db.delete::(&DHT_DB_KEY) +} + /// Wrapper around DHT for persistence to disk. pub struct PersistedDht { pub enrs: Vec, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 4d35a35d2cc..17d985c4442 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,4 +1,4 @@ -use crate::persisted_dht::{load_dht, persist_dht}; +use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; use crate::subnet_service::SyncCommitteeService; use crate::{error, metrics}; @@ -209,7 +209,7 @@ impl NetworkService { "Loading peers into the routing table"; "peers" => enrs_to_load.len() ); for enr in enrs_to_load { - libp2p.swarm.add_enr(enr.clone()); + libp2p.swarm.behaviour_mut().add_enr(enr.clone()); } } @@ -309,7 +309,7 @@ fn spawn_service( .map(|gauge| gauge.reset()); } metrics::update_gossip_metrics::( - &service.libp2p.swarm.gs(), + &service.libp2p.swarm.behaviour_mut().gs(), &service.network_globals, ); // update sync metrics @@ -345,8 +345,7 @@ fn spawn_service( }) ) }).unwrap_or(None) { - if (*service.libp2p.swarm) - .update_gossipsub_parameters(active_validators, slot).is_err() { + if service.libp2p.swarm.behaviour_mut().update_gossipsub_parameters(active_validators, slot).is_err() { error!( service.log, "Failed to update gossipsub parameters"; @@ -372,7 +371,7 @@ fn spawn_service( service.upnp_mappings = (tcp_socket.map(|s| s.port()), udp_socket.map(|s| s.port())); // If there is an external TCP port update, modify our local ENR. if let Some(tcp_socket) = tcp_socket { - if let Err(e) = service.libp2p.swarm.peer_manager().discovery_mut().update_enr_tcp_port(tcp_socket.port()) { + if let Err(e) = service.libp2p.swarm.behaviour_mut().discovery_mut().update_enr_tcp_port(tcp_socket.port()) { warn!(service.log, "Failed to update ENR"; "error" => e); } } @@ -380,7 +379,7 @@ fn spawn_service( // UPnP mappings if !service.discovery_auto_update { if let Some(udp_socket) = udp_socket { - if let Err(e) = service.libp2p.swarm.peer_manager().discovery_mut().update_enr_udp_socket(udp_socket) { + if let Err(e) = service.libp2p.swarm.behaviour_mut().discovery_mut().update_enr_udp_socket(udp_socket) { warn!(service.log, "Failed to update ENR"; "error" => e); } } @@ -399,6 +398,7 @@ fn spawn_service( service .libp2p .swarm + .behaviour_mut() .report_message_validation_result( &propagation_source, message_id, validation_result ); @@ -417,7 +417,7 @@ fn spawn_service( "topics" => ?topic_kinds ); metrics::expose_publish_metrics(&messages); - service.libp2p.swarm.publish(messages); + service.libp2p.swarm.behaviour_mut().publish(messages); } NetworkMessage::ReportPeer { peer_id, action, source } => service.libp2p.report_peer(&peer_id, action, source), NetworkMessage::GoodbyePeer { peer_id, reason, source } => service.libp2p.goodbye_peer(&peer_id, reason, source), @@ -440,7 +440,7 @@ fn spawn_service( for topic_kind in eth2_libp2p::types::CORE_TOPICS.iter() { for fork_digest in service.required_gossip_fork_digests() { let topic = GossipTopic::new(topic_kind.clone(), GossipEncoding::default(), fork_digest); - if service.libp2p.swarm.subscribe(topic.clone()) { + if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { subscribed_topics.push(topic); } else { warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); @@ -453,10 +453,10 @@ fn spawn_service( for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { let subnet = Subnet::Attestation(SubnetId::new(subnet_id)); // Update the ENR bitfield - service.libp2p.swarm.update_enr_subnet(subnet, true); + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); for fork_digest in service.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - if service.libp2p.swarm.subscribe(topic.clone()) { + if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { subscribed_topics.push(topic); } else { warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); @@ -466,10 +466,10 @@ fn spawn_service( for subnet_id in 0..<::EthSpec as EthSpec>::SyncCommitteeSubnetCount::to_u64() { let subnet = Subnet::SyncCommittee(SyncSubnetId::new(subnet_id)); // Update the ENR bitfield - service.libp2p.swarm.update_enr_subnet(subnet, true); + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); for fork_digest in service.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - if service.libp2p.swarm.subscribe(topic.clone()) { + if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { subscribed_topics.push(topic); } else { warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); @@ -494,23 +494,23 @@ fn spawn_service( SubnetServiceMessage::Subscribe(subnet) => { for fork_digest in service.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.subscribe(topic); + service.libp2p.swarm.behaviour_mut().subscribe(topic); } } SubnetServiceMessage::Unsubscribe(subnet) => { for fork_digest in service.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.unsubscribe(topic); + service.libp2p.swarm.behaviour_mut().unsubscribe(topic); } } SubnetServiceMessage::EnrAdd(subnet) => { - service.libp2p.swarm.update_enr_subnet(subnet, true); + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); } SubnetServiceMessage::EnrRemove(subnet) => { - service.libp2p.swarm.update_enr_subnet(subnet, false); + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, false); } SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - service.libp2p.swarm.discover_subnet_peers(subnets_to_discover); + service.libp2p.swarm.behaviour_mut().discover_subnet_peers(subnets_to_discover); } } } @@ -520,23 +520,23 @@ fn spawn_service( SubnetServiceMessage::Subscribe(subnet) => { for fork_digest in service.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.subscribe(topic); + service.libp2p.swarm.behaviour_mut().subscribe(topic); } } SubnetServiceMessage::Unsubscribe(subnet) => { for fork_digest in service.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.unsubscribe(topic); + service.libp2p.swarm.behaviour_mut().unsubscribe(topic); } } SubnetServiceMessage::EnrAdd(subnet) => { - service.libp2p.swarm.update_enr_subnet(subnet, true); + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); } SubnetServiceMessage::EnrRemove(subnet) => { - service.libp2p.swarm.update_enr_subnet(subnet, false); + service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, false); } SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - service.libp2p.swarm.discover_subnet_peers(subnets_to_discover); + service.libp2p.swarm.behaviour_mut().discover_subnet_peers(subnets_to_discover); } } } @@ -544,17 +544,15 @@ fn spawn_service( // poll the swarm match libp2p_event { Libp2pEvent::Behaviour(event) => match event { - - BehaviourEvent::PeerDialed(peer_id) => { + BehaviourEvent::PeerConnectedOutgoing(peer_id) => { let _ = service .router_send .send(RouterMessage::PeerDialed(peer_id)) .map_err(|_| { debug!(service.log, "Failed to send peer dialed to router"); }); }, - BehaviourEvent::PeerConnected(_peer_id) => { - // A peer has connected to us - // We currently do not perform any action here. + BehaviourEvent::PeerConnectedIncoming(_) | BehaviourEvent::PeerBanned(_) | BehaviourEvent::PeerUnbanned(_) => { + // No action required for these events. }, BehaviourEvent::PeerDisconnected(peer_id) => { let _ = service @@ -667,6 +665,7 @@ fn spawn_service( service .libp2p .swarm + .behaviour_mut() .update_fork_version(new_enr_fork_id.clone()); // Reinitialize the next_fork_update service.next_fork_update = Box::pin(next_fork_delay(&service.beacon_chain).into()); @@ -683,7 +682,7 @@ fn spawn_service( } Some(_) = &mut service.next_unsubscribe => { let new_enr_fork_id = service.beacon_chain.enr_fork_id(); - service.libp2p.swarm.unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); + service.libp2p.swarm.behaviour_mut().unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); info!(service.log, "Unsubscribed from old fork topics"); service.next_unsubscribe = Box::pin(None.into()); } @@ -706,12 +705,16 @@ fn next_fork_delay( impl Drop for NetworkService { fn drop(&mut self) { // network thread is terminating - let enrs = self.libp2p.swarm.enr_entries(); + let enrs = self.libp2p.swarm.behaviour_mut().enr_entries(); debug!( self.log, "Persisting DHT to store"; "Number of peers" => enrs.len(), ); + if let Err(e) = clear_dht::(self.store.clone()) { + error!(self.log, "Failed to clear old DHT entries"; "error" => ?e); + } + // Still try to update new entries match persist_dht::(self.store.clone(), enrs) { Err(e) => error!( self.log, diff --git a/beacon_node/timer/Cargo.toml b/beacon_node/timer/Cargo.toml index d45a214b388..9da65f9dbfc 100644 --- a/beacon_node/timer/Cargo.toml +++ b/beacon_node/timer/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" beacon_chain = { path = "../beacon_chain" } types = { path = "../../consensus/types" } slot_clock = { path = "../../common/slot_clock" } -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } slog = "2.5.2" parking_lot = "0.11.0" futures = "0.3.7" diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 897fa2f3bc0..bb4dff5f8c6 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -13,7 +13,7 @@ eth2_network_config = { path = "../common/eth2_network_config" } eth2_ssz = "0.1.2" slog = "2.5.2" sloggers = "1.0.1" -tokio = "1.1.0" +tokio = "1.7.1" log = "0.4.11" slog-term = "2.6.0" logging = { path = "../common/logging" } diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index b1efd805ce7..ed563504b0b 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -89,6 +89,7 @@ pub async fn run(config: BootNodeConfig, log: slog::Logger) { // Ignore these events here } Discv5Event::EnrAdded { .. } => {} // Ignore + Discv5Event::TalkRequest(_) => {} // Ignore Discv5Event::NodeInserted { .. } => {} // Ignore Discv5Event::SocketUpdated(socket_addr) => { info!(log, "External socket address updated"; "socket_addr" => format!("{:?}", socket_addr)); diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index b11f93672ef..bab813d19e9 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -19,4 +19,4 @@ serde_yaml = "0.8.13" types = { path = "../../consensus/types"} eth2_ssz = "0.1.2" eth2_config = { path = "../eth2_config"} -enr = { version = "0.5.0", features = ["ed25519", "k256"] } +enr = { version = "0.5.1", features = ["ed25519", "k256"] } diff --git a/common/hashset_delay/Cargo.toml b/common/hashset_delay/Cargo.toml index 80e5e9e2b6e..d07023ee15a 100644 --- a/common/hashset_delay/Cargo.toml +++ b/common/hashset_delay/Cargo.toml @@ -9,4 +9,4 @@ futures = "0.3.7" tokio-util = { version = "0.6.2", features = ["time"] } [dev-dependencies] -tokio = { version = "1.1.0", features = ["time", "rt-multi-thread", "macros"] } +tokio = { version = "1.7.1", features = ["time", "rt-multi-thread", "macros"] } diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 7ecaec20111..b95f33e8f50 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -10,7 +10,7 @@ edition = "2018" reqwest = { version = "0.11.0", features = ["json","stream"] } futures = "0.3.7" task_executor = { path = "../task_executor" } -tokio = "1.1.0" +tokio = "1.7.1" eth2 = {path = "../eth2"} serde_json = "1.0.58" serde = "1.0.116" diff --git a/common/remote_signer_consumer/Cargo.toml b/common/remote_signer_consumer/Cargo.toml index bda6264c621..9a3c443d108 100644 --- a/common/remote_signer_consumer/Cargo.toml +++ b/common/remote_signer_consumer/Cargo.toml @@ -11,6 +11,6 @@ remote_signer_test = { path = "../../testing/remote_signer_test" } [dependencies] reqwest = { version = "0.11.0", features = ["json"] } serde = { version = "1.0.116", features = ["derive"] } -tokio = { version = "1.1.0", features = ["time"] } +tokio = { version = "1.7.1", features = ["time"] } types = { path = "../../consensus/types" } sensitive_url = { path = "../sensitive_url" } diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 5f142522c52..d25438f4909 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2018" [dependencies] -tokio = { version = "1.1.0", features = ["rt"] } +tokio = { version = "1.7.1", features = ["rt"] } slog = "2.5.2" futures = "0.3.7" exit-future = "0.2.0" diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index 7b2ab637398..faf27906c6a 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -14,7 +14,7 @@ beacon_chain = { path = "../../beacon_node/beacon_chain" } state_processing = { path = "../../consensus/state_processing" } safe_arith = { path = "../../consensus/safe_arith" } serde = { version = "1.0.116", features = ["derive"] } -tokio = { version = "1.1.0", features = ["sync"] } +tokio = { version = "1.7.1", features = ["sync"] } headers = "0.3.2" lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index c12deda9722..1d311ec8cd9 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -27,7 +27,7 @@ dirs = "3.0.1" genesis = { path = "../beacon_node/genesis" } deposit_contract = { path = "../common/deposit_contract" } tree_hash = "0.1.1" -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } clap_utils = { path = "../common/clap_utils" } eth2_libp2p = { path = "../beacon_node/eth2_libp2p" } validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 925ce855c44..0598998a470 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -19,7 +19,7 @@ spec-minimal = [] [dependencies] beacon_node = { "path" = "../beacon_node" } -tokio = "1.1.0" +tokio = "1.7.1" slog = { version = "2.5.2", features = ["max_level_trace"] } sloggers = "1.0.1" types = { "path" = "../consensus/types" } diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index 6e0556a6d2d..31e153055a8 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -tokio = { version = "1.1.0", features = ["macros", "rt", "rt-multi-thread" ] } +tokio = { version = "1.7.1", features = ["macros", "rt", "rt-multi-thread" ] } slog = { version = "2.5.2", features = ["max_level_trace"] } sloggers = "1.0.1" types = { "path" = "../../consensus/types" } diff --git a/slasher/service/Cargo.toml b/slasher/service/Cargo.toml index 0c43ed11371..23a85e7ba80 100644 --- a/slasher/service/Cargo.toml +++ b/slasher/service/Cargo.toml @@ -14,6 +14,6 @@ slog = "2.5.2" slot_clock = { path = "../../common/slot_clock" } state_processing = { path = "../../consensus/state_processing" } task_executor = { path = "../../common/task_executor" } -tokio = { version = "1.1.0", features = ["full"] } +tokio = { version = "1.7.1", features = ["full"] } tokio-stream = "0.1.3" types = { path = "../../consensus/types" } diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml index 73f96bff561..12286735c58 100644 --- a/testing/eth1_test_rig/Cargo.toml +++ b/testing/eth1_test_rig/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -tokio = { version = "1.1.0", features = ["time"] } +tokio = { version = "1.7.1", features = ["time"] } tokio-compat-02 = "0.2.0" web3 = { version = "0.16.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } futures = "0.3.7" diff --git a/testing/remote_signer_test/Cargo.toml b/testing/remote_signer_test/Cargo.toml index 0fbb4b106ec..43cd2e13c1b 100644 --- a/testing/remote_signer_test/Cargo.toml +++ b/testing/remote_signer_test/Cargo.toml @@ -15,7 +15,7 @@ reqwest = { version = "0.11.0", features = ["blocking", "json"] } serde = { version = "1.0.116", features = ["derive"] } serde_json = "1.0.58" tempfile = "3.1.0" -tokio = { version = "1.1.0", features = ["time"] } +tokio = { version = "1.7.1", features = ["time"] } types = { path = "../../consensus/types" } sensitive_url = { path = "../../common/sensitive_url" } diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 6733ee6327c..038f8f72b6f 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -13,7 +13,7 @@ types = { path = "../../consensus/types" } validator_client = { path = "../../validator_client" } parking_lot = "0.11.0" futures = "0.3.7" -tokio = "1.1.0" +tokio = "1.7.1" eth1_test_rig = { path = "../eth1_test_rig" } env_logger = "0.8.2" clap = "2.33.3" diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 38fa8e87811..792255cf07f 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -9,7 +9,7 @@ name = "validator_client" path = "src/lib.rs" [dev-dependencies] -tokio = { version = "1.1.0", features = ["time", "rt-multi-thread", "macros"] } +tokio = { version = "1.7.1", features = ["time", "rt-multi-thread", "macros"] } deposit_contract = { path = "../common/deposit_contract" } [dependencies] @@ -30,7 +30,7 @@ serde_yaml = "0.8.13" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } slog-async = "2.5.0" slog-term = "2.6.0" -tokio = { version = "1.1.0", features = ["time"] } +tokio = { version = "1.7.1", features = ["time"] } futures = "0.3.7" dirs = "3.0.1" directory = { path = "../common/directory" }