diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 37839791456b..a92dc0bb006c 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,3 +2,5 @@ /.github/ @paritytech/ci @chevdor /scripts/ci/ @paritytech/ci @chevdor /.gitlab-ci.yml @paritytech/ci +# lingua.dic is not managed by CI team +/scripts/ci/gitlab/lingua.dic diff --git a/.github/workflows/release-50_publish-docker-release.yml b/.github/workflows/release-50_publish-docker-release.yml index bb74b32e8000..5a9d683bc3b3 100644 --- a/.github/workflows/release-50_publish-docker-release.yml +++ b/.github/workflows/release-50_publish-docker-release.yml @@ -12,7 +12,7 @@ jobs: - name: Checkout sources uses: actions/checkout@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1.7 + uses: docker/setup-buildx-action@95cb08cb2672c73d4ffd2f422e6d11953d2a9c70 # v2.1.0 - name: Cache Docker layers uses: actions/cache@v3 with: diff --git a/.github/workflows/release-51_publish-docker-manual.yml b/.github/workflows/release-51_publish-docker-manual.yml index 3caea3e3c3c9..e0bcf99a8d60 100644 --- a/.github/workflows/release-51_publish-docker-manual.yml +++ b/.github/workflows/release-51_publish-docker-manual.yml @@ -19,7 +19,7 @@ jobs: - name: Checkout sources uses: actions/checkout@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1.7 + uses: docker/setup-buildx-action@95cb08cb2672c73d4ffd2f422e6d11953d2a9c70 # v2.1.0 - name: Cache Docker layers uses: actions/cache@v3 with: diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ce9be9af767a..0dd640186bb8 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -35,7 +35,7 @@ variables: CI_IMAGE: "paritytech/ci-linux:production" DOCKER_OS: "debian:stretch" ARCH: "x86_64" - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.2.67" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.2.78" PIPELINE_SCRIPTS_TAG: "v0.4" default: diff --git a/Cargo.lock b/Cargo.lock index 3fd0ff745d58..1bd75cddc0b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -312,9 +312,9 @@ dependencies = [ [[package]] name = "async-std-resolver" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2f8a4a203be3325981310ab243a28e6e4ea55b6519bffce05d41ab60e09ad8" +checksum = "6ba50e24d9ee0a8950d3d03fc6d0dd10aa14b5de3b101949b4e160f7fee7c723" dependencies = [ "async-std", "async-trait", @@ -435,7 +435,7 @@ dependencies = [ [[package]] name = "beefy-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "array-bytes", "async-trait", @@ -472,7 +472,7 @@ dependencies = [ [[package]] name = "beefy-gadget-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "beefy-gadget", "beefy-primitives", @@ -492,7 +492,7 @@ dependencies = [ [[package]] name = "beefy-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "beefy-primitives", "sp-api", @@ -502,10 +502,11 @@ dependencies = [ [[package]] name = "beefy-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "parity-scale-codec", "scale-info", + "serde", "sp-api", "sp-application-crypto", "sp-core", @@ -515,12 +516,6 @@ dependencies = [ "sp-std", ] -[[package]] -name = "bimap" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50ae17cabbc8a38a1e3e4c1a6a664e9a09672dc14d0896fa8d865d3a5a446b07" - [[package]] name = "bincode" version = "1.3.3" @@ -628,7 +623,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding 0.1.5", + "block-padding", "byte-tools", "byteorder", "generic-array 0.12.4", @@ -640,7 +635,6 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding 0.2.1", "generic-array 0.14.4", ] @@ -662,12 +656,6 @@ dependencies = [ "byte-tools", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "blocking" version = "1.1.0" @@ -922,26 +910,24 @@ dependencies = [ [[package]] name = "clap" -version = "3.1.18" +version = "4.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2dbdf4bdacb33466e854ce889eee8dfd5729abf7ccd7664d0a2d60cd384440b" +checksum = "6bf8832993da70a4c6d13c581f4463c2bdda27b9bf1c5498dc4365543abe6d6f" dependencies = [ "atty", "bitflags", "clap_derive", "clap_lex", - "indexmap", - "lazy_static", + "once_cell", "strsim", "termcolor", - "textwrap", ] [[package]] name = "clap_derive" -version = "3.1.18" +version = "4.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25320346e922cffe59c0bbc5410c8d8784509efb321488971081313cb1e1a33c" +checksum = "c42f169caba89a7d512b5418b09864543eeb4d497416c917d7137863bd2076ad" dependencies = [ "heck", "proc-macro-error", @@ -952,22 +938,13 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a37c35f1112dad5e6e0b1adaff798507497a18fceeb30cceb3bae7d1427b9213" +checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8" dependencies = [ "os_str_bytes", ] -[[package]] -name = "cmake" -version = "0.1.48" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" -dependencies = [ - "cc", -] - [[package]] name = "coarsetime" version = "0.1.22" @@ -1302,17 +1279,6 @@ dependencies = [ "cipher", ] -[[package]] -name = "cuckoofilter" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b810a8449931679f64cd7eef1bbd0fa315801b6d5d9cdc1ace2804d6529eee18" -dependencies = [ - "byteorder", - "fnv", - "rand 0.7.3", -] - [[package]] name = "curve25519-dalek" version = "2.1.3" @@ -1533,6 +1499,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "downcast-rs" version = "1.2.0" @@ -1656,9 +1628,9 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.4.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" +checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck", "proc-macro2", @@ -1977,6 +1949,15 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "float-cmp" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" +dependencies = [ + "num-traits", +] + [[package]] name = "fnv" version = "1.0.7" @@ -2001,7 +1982,7 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "parity-scale-codec", ] @@ -2016,10 +1997,16 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fragile" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85dcb89d2b10c5f6133de2efd8c11959ce9dbb46a2f7a4cab208c4eeda6ce1ab" + [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-support", "frame-system", @@ -2042,7 +2029,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "Inflector", "array-bytes", @@ -2093,7 +2080,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2104,7 +2091,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2120,7 +2107,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-support", "frame-system", @@ -2149,7 +2136,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "bitflags", "frame-metadata", @@ -2181,7 +2168,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "Inflector", "cfg-expr", @@ -2195,7 +2182,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2207,7 +2194,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "proc-macro2", "quote", @@ -2217,7 +2204,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-support", "frame-support-test-pallet", @@ -2240,7 +2227,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-support", "frame-system", @@ -2251,7 +2238,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-support", "log", @@ -2269,7 +2256,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -2284,7 +2271,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "parity-scale-codec", "sp-api", @@ -2293,7 +2280,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-support", "parity-scale-codec", @@ -2464,7 +2451,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "chrono", "frame-election-provider-support", @@ -2690,12 +2677,6 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" -[[package]] -name = "hex_fmt" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" - [[package]] name = "hmac" version = "0.8.1" @@ -2729,13 +2710,14 @@ dependencies = [ [[package]] name = "honggfuzz" -version = "0.5.54" +version = "0.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bea09577d948a98a5f59b7c891e274c4fb35ad52f67782b3d0cb53b9c05301f1" +checksum = "848e9c511092e0daa0a35a63e8e6e475a3e8f870741448b9f6028d69b142f18e" dependencies = [ "arbitrary", "lazy_static", - "memmap", + "memmap2 0.5.0", + "rustc_version", ] [[package]] @@ -2873,9 +2855,9 @@ dependencies = [ [[package]] name = "if-watch" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae8f4a3c3d4c89351ca83e120c1c00b27df945d38e05695668c9d4b4f7bc52f3" +checksum = "065c008e570a43c00de6aed9714035e5ea6a498c255323db9091722af6ee67dd" dependencies = [ "async-io", "core-foundation", @@ -3409,9 +3391,9 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.46.1" +version = "0.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81327106887e42d004fbdab1fef93675be2e2e07c1b95fce45e2cc813485611d" +checksum = "ec878fda12ebec479186b3914ebc48ff180fa4c51847e11a1a68bf65249e02c1" dependencies = [ "bytes", "futures", @@ -3419,12 +3401,8 @@ dependencies = [ "getrandom 0.2.3", "instant", "lazy_static", - "libp2p-autonat", "libp2p-core", - "libp2p-deflate", "libp2p-dns", - "libp2p-floodsub", - "libp2p-gossipsub", "libp2p-identify", "libp2p-kad", "libp2p-mdns", @@ -3432,49 +3410,24 @@ dependencies = [ "libp2p-mplex", "libp2p-noise", "libp2p-ping", - "libp2p-plaintext", - "libp2p-pnet", - "libp2p-relay", - "libp2p-rendezvous", "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-derive", "libp2p-tcp", - "libp2p-uds", "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", "multiaddr", "parking_lot 0.12.1", "pin-project", - "rand 0.7.3", "smallvec", ] -[[package]] -name = "libp2p-autonat" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4decc51f3573653a9f4ecacb31b1b922dd20c25a6322bb15318ec04287ec46f9" -dependencies = [ - "async-trait", - "futures", - "futures-timer", - "instant", - "libp2p-core", - "libp2p-request-response", - "libp2p-swarm", - "log", - "prost 0.10.3", - "prost-build 0.10.4", - "rand 0.8.5", -] - [[package]] name = "libp2p-core" -version = "0.34.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf9b94cefab7599b2d3dff2f93bee218c6621d68590b23ede4485813cbcece6" +checksum = "799676bb0807c788065e57551c6527d461ad572162b0519d1958946ff9e0539d" dependencies = [ "asn1_der", "bs58", @@ -3485,17 +3438,15 @@ dependencies = [ "futures-timer", "instant", "lazy_static", - "libsecp256k1", "log", "multiaddr", "multihash", "multistream-select", "parking_lot 0.12.1", "pin-project", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "rand 0.8.5", - "ring", "rw-stream-sink", "sha2 0.10.2", "smallvec", @@ -3505,22 +3456,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "libp2p-deflate" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0183dc2a3da1fbbf85e5b6cf51217f55b14f5daea0c455a9536eef646bfec71" -dependencies = [ - "flate2", - "futures", - "libp2p-core", -] - [[package]] name = "libp2p-dns" -version = "0.34.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cbf54723250fa5d521383be789bf60efdabe6bacfb443f87da261019a49b4b5" +checksum = "2322c9fb40d99101def6a01612ee30500c89abbbecb6297b3cd252903a4c1720" dependencies = [ "async-std-resolver", "futures", @@ -3531,57 +3471,11 @@ dependencies = [ "trust-dns-resolver", ] -[[package]] -name = "libp2p-floodsub" -version = "0.37.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98a4b6ffd53e355775d24b76f583fdda54b3284806f678499b57913adb94f231" -dependencies = [ - "cuckoofilter", - "fnv", - "futures", - "libp2p-core", - "libp2p-swarm", - "log", - "prost 0.10.3", - "prost-build 0.10.4", - "rand 0.7.3", - "smallvec", -] - -[[package]] -name = "libp2p-gossipsub" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74b4b888cfbeb1f5551acd3aa1366e01bf88ede26cc3c4645d0d2d004d5ca7b0" -dependencies = [ - "asynchronous-codec", - "base64", - "byteorder", - "bytes", - "fnv", - "futures", - "hex_fmt", - "instant", - "libp2p-core", - "libp2p-swarm", - "log", - "prometheus-client", - "prost 0.10.3", - "prost-build 0.10.4", - "rand 0.7.3", - "regex", - "sha2 0.10.2", - "smallvec", - "unsigned-varint", - "wasm-timer", -] - [[package]] name = "libp2p-identify" -version = "0.37.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c50b585518f8efd06f93ac2f976bd672e17cdac794644b3117edd078e96bda06" +checksum = "dcf9a121f699e8719bda2e6e9e9b6ddafc6cff4602471d6481c1067930ccb29b" dependencies = [ "asynchronous-codec", "futures", @@ -3589,9 +3483,9 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "lru 0.7.8", - "prost 0.10.3", - "prost-build 0.10.4", + "lru 0.8.0", + "prost", + "prost-build", "prost-codec", "smallvec", "thiserror", @@ -3600,9 +3494,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.38.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740862893bb5f06ac24acc9d49bdeadc3a5e52e51818a30a25c1f3519da2c851" +checksum = "6721c200e2021f6c3fab8b6cf0272ead8912d871610ee194ebd628cecf428f22" dependencies = [ "arrayvec 0.7.2", "asynchronous-codec", @@ -3615,9 +3509,9 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "prost 0.10.3", - "prost-build 0.10.4", - "rand 0.7.3", + "prost", + "prost-build", + "rand 0.8.5", "sha2 0.10.2", "smallvec", "thiserror", @@ -3628,16 +3522,15 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.38.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66e5e5919509603281033fd16306c61df7a4428ce274b67af5e14b07de5cdcb2" +checksum = "761704e727f7d68d58d7bc2231eafae5fc1b9814de24290f126df09d4bd37a15" dependencies = [ "async-io", "data-encoding", "dns-parser", "futures", "if-watch", - "lazy_static", "libp2p-core", "libp2p-swarm", "log", @@ -3649,25 +3542,23 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.7.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef8aff4a1abef42328fbb30b17c853fff9be986dc39af17ee39f9c5f755c5e0c" +checksum = "9ee31b08e78b7b8bfd1c4204a9dd8a87b4fcdf6dafc57eb51701c1c264a81cb9" dependencies = [ "libp2p-core", - "libp2p-gossipsub", "libp2p-identify", "libp2p-kad", "libp2p-ping", - "libp2p-relay", "libp2p-swarm", "prometheus-client", ] [[package]] name = "libp2p-mplex" -version = "0.34.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61fd1b20638ec209c5075dfb2e8ce6a7ea4ec3cd3ad7b77f7a477c06d53322e2" +checksum = "692664acfd98652de739a8acbb0a0d670f1d67190a49be6b4395e22c37337d89" dependencies = [ "asynchronous-codec", "bytes", @@ -3676,16 +3567,16 @@ dependencies = [ "log", "nohash-hasher", "parking_lot 0.12.1", - "rand 0.7.3", + "rand 0.8.5", "smallvec", "unsigned-varint", ] [[package]] name = "libp2p-noise" -version = "0.37.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "762408cb5d84b49a600422d7f9a42c18012d8da6ebcd570f9a4a4290ba41fb6f" +checksum = "048155686bd81fe6cb5efdef0c6290f25ad32a0a42e8f4f72625cf6a505a206f" dependencies = [ "bytes", "curve25519-dalek 3.2.0", @@ -3693,8 +3584,8 @@ dependencies = [ "lazy_static", "libp2p-core", "log", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "rand 0.8.5", "sha2 0.10.2", "snow", @@ -3705,9 +3596,9 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.37.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "100a6934ae1dbf8a693a4e7dd1d730fd60b774dafc45688ed63b554497c6c925" +checksum = "7228b9318d34689521349a86eb39a3c3a802c9efc99a0568062ffb80913e3f91" dependencies = [ "futures", "futures-timer", @@ -3715,95 +3606,15 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "rand 0.7.3", - "void", -] - -[[package]] -name = "libp2p-plaintext" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be27bf0820a6238a4e06365b096d428271cce85a129cf16f2fe9eb1610c4df86" -dependencies = [ - "asynchronous-codec", - "bytes", - "futures", - "libp2p-core", - "log", - "prost 0.10.3", - "prost-build 0.10.4", - "unsigned-varint", - "void", -] - -[[package]] -name = "libp2p-pnet" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f1a458bbda880107b5b36fcb9b5a1ef0c329685da0e203ed692a8ebe64cc92c" -dependencies = [ - "futures", - "log", - "pin-project", - "rand 0.7.3", - "salsa20", - "sha3 0.9.1", -] - -[[package]] -name = "libp2p-relay" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4931547ee0cce03971ccc1733ff05bb0c4349fd89120a39e9861e2bbe18843c3" -dependencies = [ - "asynchronous-codec", - "bytes", - "either", - "futures", - "futures-timer", - "instant", - "libp2p-core", - "libp2p-swarm", - "log", - "pin-project", - "prost 0.10.3", - "prost-build 0.10.4", - "prost-codec", - "rand 0.8.5", - "smallvec", - "static_assertions", - "thiserror", - "void", -] - -[[package]] -name = "libp2p-rendezvous" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9511c9672ba33284838e349623319c8cad2d18cfad243ae46c6b7e8a2982ea4e" -dependencies = [ - "asynchronous-codec", - "bimap", - "futures", - "futures-timer", - "instant", - "libp2p-core", - "libp2p-swarm", - "log", - "prost 0.10.3", - "prost-build 0.10.4", "rand 0.8.5", - "sha2 0.10.2", - "thiserror", - "unsigned-varint", "void", ] [[package]] name = "libp2p-request-response" -version = "0.19.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "508a189e2795d892c8f5c1fa1e9e0b1845d32d7b0b249dbf7b05b18811361843" +checksum = "8827af16a017b65311a410bb626205a9ad92ec0473967618425039fa5231adc1" dependencies = [ "async-trait", "bytes", @@ -3812,16 +3623,16 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "rand 0.7.3", + "rand 0.8.5", "smallvec", "unsigned-varint", ] [[package]] name = "libp2p-swarm" -version = "0.37.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ac5be6c2de2d1ff3f7693fda6faf8a827b1f3e808202277783fea9f527d114" +checksum = "46d13df7c37807965d82930c0e4b04a659efcb6cca237373b206043db5398ecf" dependencies = [ "either", "fnv", @@ -3831,7 +3642,7 @@ dependencies = [ "libp2p-core", "log", "pin-project", - "rand 0.7.3", + "rand 0.8.5", "smallvec", "thiserror", "void", @@ -3839,48 +3650,36 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.28.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f54a64b6957249e0ce782f8abf41d97f69330d02bf229f0672d864f0650cc76" +checksum = "a0eddc4497a8b5a506013c40e8189864f9c3a00db2b25671f428ae9007f3ba32" dependencies = [ + "heck", "quote", "syn", ] [[package]] name = "libp2p-tcp" -version = "0.34.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a6771dc19aa3c65d6af9a8c65222bfc8fcd446630ddca487acd161fa6096f3b" +checksum = "9839d96761491c6d3e238e70554b856956fca0ab60feb9de2cd08eed4473fa92" dependencies = [ "async-io", "futures", "futures-timer", "if-watch", - "ipnet", "libc", "libp2p-core", "log", "socket2", ] -[[package]] -name = "libp2p-uds" -version = "0.33.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d125e3e5f0d58f3c6ac21815b20cf4b6a88b8db9dc26368ea821838f4161fd4d" -dependencies = [ - "async-std", - "futures", - "libp2p-core", - "log", -] - [[package]] name = "libp2p-wasm-ext" -version = "0.34.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec894790eec3c1608f8d1a8a0bdf0dbeb79ed4de2dce964222011c2896dfa05a" +checksum = "a17b5b8e7a73e379e47b1b77f8a82c4721e97eca01abcd18e9cd91a23ca6ce97" dependencies = [ "futures", "js-sys", @@ -3892,9 +3691,9 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.36.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9808e57e81be76ff841c106b4c5974fb4d41a233a7bdd2afbf1687ac6def3818" +checksum = "3758ae6f89b2531a24b6d9f5776bda6a626b60a57600d7185d43dfa75ca5ecc4" dependencies = [ "either", "futures", @@ -3911,12 +3710,13 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.38.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6dea686217a06072033dc025631932810e2f6ad784e4fafa42e27d311c7a81c" +checksum = "30f079097a21ad017fc8139460630286f02488c8c13b26affb46623aa20d8845" dependencies = [ "futures", "libp2p-core", + "log", "parking_lot 0.12.1", "thiserror", "yamux", @@ -4164,16 +3964,6 @@ dependencies = [ "rustix", ] -[[package]] -name = "memmap" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "memmap2" version = "0.2.3" @@ -4295,6 +4085,33 @@ dependencies = [ "winapi", ] +[[package]] +name = "mockall" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2be9a9090bc1cac2930688fa9478092a64c6a92ddc6ae0692d46b37d9cab709" +dependencies = [ + "cfg-if 1.0.0", + "downcast", + "fragile", + "lazy_static", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86d702a0530a0141cf4ed147cf5ec7be6f2c187d4e37fcbefc39cf34116bfe8f" +dependencies = [ + "cfg-if 1.0.0", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "multiaddr" version = "0.14.0" @@ -4337,7 +4154,7 @@ dependencies = [ "digest 0.10.3", "multihash-derive", "sha2 0.10.2", - "sha3 0.10.0", + "sha3", "unsigned-varint", ] @@ -4363,9 +4180,9 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "multistream-select" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "363a84be6453a70e63513660f4894ef815daf88e3356bffcda9ca27d810ce83b" +checksum = "9bc41247ec209813e2fd414d6e16b9d94297dacf3cd613fa6ef09cd4d9755c10" dependencies = [ "bytes", "futures", @@ -4451,9 +4268,9 @@ dependencies = [ [[package]] name = "netlink-packet-route" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733ea73609acfd7fa7ddadfb7bf709b0471668c456ad9513685af543a06342b2" +checksum = "d9ea4302b9759a7a88242299225ea3688e63c85ea136371bb6cf94fd674efaab" dependencies = [ "anyhow", "bitflags", @@ -4477,23 +4294,24 @@ dependencies = [ [[package]] name = "netlink-proto" -version = "0.9.2" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef8785b8141e8432aa45fceb922a7e876d7da3fad37fa7e7ec702ace3aa0826b" +checksum = "65b4b14489ab424703c092062176d52ba55485a89c076b4f9db05092b7223aa6" dependencies = [ "bytes", "futures", "log", "netlink-packet-core", "netlink-sys", + "thiserror", "tokio", ] [[package]] name = "netlink-sys" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c9f9547a08241bee7b6558b9b98e1f290d187de8b7cfca2bbb4937bcaa8f8" +checksum = "92b654097027250401127914afb37cb1f311df6610a9891ff07a757e94199027" dependencies = [ "async-io", "bytes", @@ -4502,19 +4320,6 @@ dependencies = [ "log", ] -[[package]] -name = "nix" -version = "0.22.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4916f159ed8e5de0082076562152a76b7a1f64a01fd9d1e0fea002c37624faf" -dependencies = [ - "bitflags", - "cc", - "cfg-if 1.0.0", - "libc", - "memoffset", -] - [[package]] name = "nix" version = "0.23.1" @@ -4563,6 +4368,12 @@ dependencies = [ "version_check", ] +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + [[package]] name = "ntapi" version = "0.3.6" @@ -4795,15 +4606,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "owning_ref" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" -dependencies = [ - "stable_deref_trait", -] - [[package]] name = "owo-colors" version = "3.2.0" @@ -4813,7 +4615,7 @@ checksum = "20448fd678ec04e6ea15bbe0476874af65e98a01515d667aa49f1434dc44ebf4" [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -4827,7 +4629,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-support", "frame-system", @@ -4843,7 +4645,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-support", "frame-system", @@ -4858,7 +4660,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -4882,7 +4684,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4902,7 +4704,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-election-provider-support", "frame-support", @@ -4921,7 +4723,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -4936,7 +4738,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "beefy-primitives", "frame-support", @@ -4952,7 +4754,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "array-bytes", "beefy-merkle-tree", @@ -4975,7 +4777,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -4993,7 +4795,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5012,7 +4814,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5029,7 +4831,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5046,7 +4848,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5064,7 +4866,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5088,7 +4890,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5101,7 +4903,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5119,7 +4921,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5140,7 +4942,7 @@ dependencies = [ [[package]] name = "pallet-gilt" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5155,7 +4957,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5178,7 +4980,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5194,7 +4996,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5214,7 +5016,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5231,7 +5033,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5248,7 +5050,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "ckb-merkle-mountain-range", "frame-benchmarking", @@ -5266,7 +5068,7 @@ dependencies = [ [[package]] name = "pallet-mmr-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -5281,7 +5083,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5297,7 +5099,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-support", "frame-system", @@ -5314,7 +5116,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5334,7 +5136,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "parity-scale-codec", "sp-api", @@ -5344,7 +5146,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-support", "frame-system", @@ -5361,7 +5163,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5384,7 +5186,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5401,7 +5203,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5416,7 +5218,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5434,7 +5236,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5449,7 +5251,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5467,7 +5269,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5483,7 +5285,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-support", "frame-system", @@ -5504,7 +5306,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5520,7 +5322,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-support", "frame-system", @@ -5534,7 +5336,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5557,7 +5359,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5568,7 +5370,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "log", "sp-arithmetic", @@ -5577,7 +5379,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-support", "frame-system", @@ -5591,7 +5393,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5609,7 +5411,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5628,7 +5430,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-support", "frame-system", @@ -5644,7 +5446,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -5659,7 +5461,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -5670,7 +5472,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5687,7 +5489,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5703,7 +5505,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5718,7 +5520,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6087,7 +5889,7 @@ dependencies = [ "parity-util-mem", "polkadot-cli", "polkadot-core-primitives", - "remote-externalities", + "substrate-rpc-client", "tempfile", "tokio", ] @@ -6134,6 +5936,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "sp-application-crypto", + "sp-authority-discovery", "sp-core", "sp-keyring", "sp-keystore", @@ -7469,6 +7272,7 @@ dependencies = [ "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-primitives", + "rand 0.8.5", "sp-core", "sp-keystore", "tracing-gum", @@ -7675,8 +7479,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95e5a7689e456ab905c22c2b48225bb921aba7c8dfa58440d68ba13f6222a715" dependencies = [ "difflib", + "float-cmp", "itertools", + "normalize-line-endings", "predicates-core", + "regex", ] [[package]] @@ -7795,21 +7602,21 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.16.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1abe0255c04d15f571427a2d1e00099016506cf3297b53853acd2b7eb87825" +checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c" dependencies = [ "dtoa", "itoa 1.0.1", - "owning_ref", + "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] [[package]] name = "prometheus-client-derive-text-encode" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8e12d01b9d66ad9eb4529c57666b6263fc1993cb30261d83ead658fdd932652" +checksum = "66a455fbcb954c1a7decf3c586e860fd7889cddf4b8e164be736dbac95a953cd" dependencies = [ "proc-macro2", "quote", @@ -7827,16 +7634,6 @@ dependencies = [ "regex", ] -[[package]] -name = "prost" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc03e116981ff7d8da8e5c220e374587b98d294af7ba7dd7fda761158f00086f" -dependencies = [ - "bytes", - "prost-derive 0.10.1", -] - [[package]] name = "prost" version = "0.11.0" @@ -7844,29 +7641,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7" dependencies = [ "bytes", - "prost-derive 0.11.0", -] - -[[package]] -name = "prost-build" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" -dependencies = [ - "bytes", - "cfg-if 1.0.0", - "cmake", - "heck", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prost 0.10.3", - "prost-types 0.10.1", - "regex", - "tempfile", - "which", + "prost-derive", ] [[package]] @@ -7882,8 +7657,8 @@ dependencies = [ "log", "multimap", "petgraph", - "prost 0.11.0", - "prost-types 0.11.1", + "prost", + "prost-types", "regex", "tempfile", "which", @@ -7891,30 +7666,17 @@ dependencies = [ [[package]] name = "prost-codec" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00af1e92c33b4813cc79fda3f2dbf56af5169709be0202df730e9ebc3e4cd007" +checksum = "011ae9ff8359df7915f97302d591cdd9e0e27fbd5a4ddc5bd13b71079bb20987" dependencies = [ "asynchronous-codec", "bytes", - "prost 0.10.3", + "prost", "thiserror", "unsigned-varint", ] -[[package]] -name = "prost-derive" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "prost-derive" version = "0.11.0" @@ -7928,16 +7690,6 @@ dependencies = [ "syn", ] -[[package]] -name = "prost-types" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" -dependencies = [ - "bytes", - "prost 0.10.3", -] - [[package]] name = "prost-types" version = "0.11.1" @@ -7945,7 +7697,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e" dependencies = [ "bytes", - "prost 0.11.0", + "prost", ] [[package]] @@ -8251,10 +8003,9 @@ dependencies = [ [[package]] name = "remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "env_logger 0.9.0", - "jsonrpsee", "log", "parity-scale-codec", "serde", @@ -8263,6 +8014,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-version", + "substrate-rpc-client", ] [[package]] @@ -8468,16 +8220,16 @@ dependencies = [ [[package]] name = "rtnetlink" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f54290e54521dac3de4149d83ddf9f62a359b3cc93bcb494a794a41e6f4744b" +checksum = "322c53fd76a18698f1c27381d58091de3a043d356aa5bd0d510608b565f469a0" dependencies = [ "async-global-executor", "futures", "log", "netlink-packet-route", "netlink-proto", - "nix 0.22.3", + "nix 0.24.1", "thiserror", ] @@ -8578,15 +8330,6 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c9613b5a66ab9ba26415184cfc41156594925a9cf3a2057e57f31ff145f6568" -[[package]] -name = "salsa20" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c0fbb5f676da676c260ba276a8f43a8dc67cf02d1438423aeb1c677a7212686" -dependencies = [ - "cipher", -] - [[package]] name = "same-file" version = "1.0.6" @@ -8599,7 +8342,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "log", "sp-core", @@ -8610,7 +8353,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "async-trait", "futures", @@ -8619,8 +8362,8 @@ dependencies = [ "libp2p", "log", "parity-scale-codec", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "rand 0.7.3", "sc-client-api", "sc-network-common", @@ -8637,7 +8380,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "futures", "futures-timer", @@ -8660,7 +8403,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -8676,7 +8419,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "impl-trait-for-tuples", "memmap2 0.5.0", @@ -8693,7 +8436,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -8704,7 +8447,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "array-bytes", "chrono", @@ -8744,7 +8487,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "fnv", "futures", @@ -8772,7 +8515,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "hash-db", "kvdb", @@ -8797,7 +8540,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "async-trait", "futures", @@ -8821,7 +8564,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "async-trait", "fork-tree", @@ -8863,7 +8606,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "futures", "jsonrpsee", @@ -8885,7 +8628,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "fork-tree", "parity-scale-codec", @@ -8898,7 +8641,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "async-trait", "futures", @@ -8922,7 +8665,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "lazy_static", "lru 0.7.8", @@ -8949,7 +8692,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "environmental", "parity-scale-codec", @@ -8965,7 +8708,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "log", "parity-scale-codec", @@ -8980,7 +8723,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "cfg-if 1.0.0", "libc", @@ -9000,7 +8743,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "ahash", "array-bytes", @@ -9041,7 +8784,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "finality-grandpa", "futures", @@ -9062,7 +8805,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "ansi_term", "futures", @@ -9079,7 +8822,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "array-bytes", "async-trait", @@ -9094,7 +8837,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "array-bytes", "async-trait", @@ -9116,7 +8859,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", "pin-project", - "prost 0.11.0", + "prost", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -9141,14 +8884,14 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "cid", "futures", "libp2p", "log", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "sc-client-api", "sc-network-common", "sp-blockchain", @@ -9161,7 +8904,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "async-trait", "bitflags", @@ -9171,7 +8914,7 @@ dependencies = [ "libp2p", "linked_hash_set", "parity-scale-codec", - "prost-build 0.11.1", + "prost-build", "sc-consensus", "sc-peerset", "serde", @@ -9187,7 +8930,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "ahash", "futures", @@ -9205,15 +8948,15 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "array-bytes", "futures", "libp2p", "log", "parity-scale-codec", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "sc-client-api", "sc-network-common", "sc-peerset", @@ -9226,7 +8969,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "array-bytes", "fork-tree", @@ -9234,13 +8977,15 @@ dependencies = [ "libp2p", "log", "lru 0.7.8", + "mockall", "parity-scale-codec", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "sc-client-api", "sc-consensus", "sc-network-common", "sc-peerset", + "sc-utils", "smallvec", "sp-arithmetic", "sp-blockchain", @@ -9254,7 +8999,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "array-bytes", "futures", @@ -9273,7 +9018,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "array-bytes", "bytes", @@ -9303,7 +9048,7 @@ dependencies = [ [[package]] name = "sc-peerset" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "futures", "libp2p", @@ -9316,7 +9061,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -9325,7 +9070,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "futures", "hash-db", @@ -9355,7 +9100,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "futures", "jsonrpsee", @@ -9378,7 +9123,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "futures", "jsonrpsee", @@ -9388,10 +9133,29 @@ dependencies = [ "tokio", ] +[[package]] +name = "sc-rpc-spec-v2" +version = "0.10.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" +dependencies = [ + "futures", + "hex", + "jsonrpsee", + "parity-scale-codec", + "sc-chain-spec", + "sc-transaction-pool-api", + "serde", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-runtime", + "thiserror", +] + [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "async-trait", "directories", @@ -9423,6 +9187,7 @@ dependencies = [ "sc-offchain", "sc-rpc", "sc-rpc-server", + "sc-rpc-spec-v2", "sc-sysinfo", "sc-telemetry", "sc-tracing", @@ -9461,7 +9226,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "log", "parity-scale-codec", @@ -9475,7 +9240,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -9494,7 +9259,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "futures", "libc", @@ -9513,7 +9278,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "chrono", "futures", @@ -9531,7 +9296,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "ansi_term", "atty", @@ -9562,7 +9327,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9573,8 +9338,9 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ + "async-trait", "futures", "futures-timer", "linked-hash-map", @@ -9599,8 +9365,9 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ + "async-trait", "futures", "log", "serde", @@ -9612,7 +9379,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "futures", "futures-timer", @@ -9918,18 +9685,6 @@ dependencies = [ "digest 0.10.3", ] -[[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug 0.3.0", -] - [[package]] name = "sha3" version = "0.10.0" @@ -10098,7 +9853,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "hash-db", "log", @@ -10116,7 +9871,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "blake2", "proc-macro-crate", @@ -10128,7 +9883,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "parity-scale-codec", "scale-info", @@ -10141,7 +9896,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "integer-sqrt", "num-traits", @@ -10156,7 +9911,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "parity-scale-codec", "scale-info", @@ -10169,7 +9924,7 @@ dependencies = [ [[package]] name = "sp-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "async-trait", "parity-scale-codec", @@ -10181,7 +9936,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "parity-scale-codec", "sp-api", @@ -10193,7 +9948,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "futures", "log", @@ -10211,7 +9966,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "async-trait", "futures", @@ -10230,7 +9985,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "async-trait", "merlin", @@ -10253,7 +10008,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "parity-scale-codec", "scale-info", @@ -10267,7 +10022,7 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "parity-scale-codec", "scale-info", @@ -10280,7 +10035,7 @@ dependencies = [ [[package]] name = "sp-core" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "array-bytes", "base58", @@ -10326,13 +10081,13 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "blake2", "byteorder", "digest 0.10.3", "sha2 0.10.2", - "sha3 0.10.0", + "sha3", "sp-std", "twox-hash", ] @@ -10340,7 +10095,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "proc-macro2", "quote", @@ -10351,7 +10106,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -10360,7 +10115,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "proc-macro2", "quote", @@ -10370,7 +10125,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "environmental", "parity-scale-codec", @@ -10381,7 +10136,7 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "finality-grandpa", "log", @@ -10399,7 +10154,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -10413,7 +10168,7 @@ dependencies = [ [[package]] name = "sp-io" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "bytes", "futures", @@ -10439,7 +10194,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "lazy_static", "sp-core", @@ -10450,7 +10205,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "async-trait", "futures", @@ -10467,7 +10222,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "thiserror", "zstd", @@ -10476,7 +10231,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "log", "parity-scale-codec", @@ -10492,7 +10247,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "parity-scale-codec", "scale-info", @@ -10506,7 +10261,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "sp-api", "sp-core", @@ -10516,7 +10271,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "backtrace", "lazy_static", @@ -10526,7 +10281,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "rustc-hash", "serde", @@ -10536,7 +10291,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "either", "hash256-std-hasher", @@ -10559,7 +10314,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -10577,7 +10332,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "Inflector", "proc-macro-crate", @@ -10589,7 +10344,7 @@ dependencies = [ [[package]] name = "sp-sandbox" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "log", "parity-scale-codec", @@ -10603,7 +10358,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "parity-scale-codec", "scale-info", @@ -10617,7 +10372,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "parity-scale-codec", "scale-info", @@ -10628,7 +10383,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "hash-db", "log", @@ -10650,12 +10405,12 @@ dependencies = [ [[package]] name = "sp-std" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" [[package]] name = "sp-storage" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "impl-serde", "parity-scale-codec", @@ -10668,7 +10423,7 @@ dependencies = [ [[package]] name = "sp-tasks" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "log", "sp-core", @@ -10681,7 +10436,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "async-trait", "futures-timer", @@ -10697,7 +10452,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "parity-scale-codec", "sp-std", @@ -10709,7 +10464,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "sp-api", "sp-runtime", @@ -10718,7 +10473,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "async-trait", "log", @@ -10734,7 +10489,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "ahash", "hash-db", @@ -10757,7 +10512,7 @@ dependencies = [ [[package]] name = "sp-version" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "impl-serde", "parity-scale-codec", @@ -10774,7 +10529,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -10785,7 +10540,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "impl-trait-for-tuples", "log", @@ -10798,7 +10553,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -11013,7 +10768,7 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "platforms", ] @@ -11021,7 +10776,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -11042,7 +10797,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "futures-util", "hyper", @@ -11052,10 +10807,23 @@ dependencies = [ "tokio", ] +[[package]] +name = "substrate-rpc-client" +version = "0.10.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" +dependencies = [ + "async-trait", + "jsonrpsee", + "log", + "sc-rpc-api", + "serde", + "sp-runtime", +] + [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "jsonrpsee", "log", @@ -11076,7 +10844,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "array-bytes", "async-trait", @@ -11102,7 +10870,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "futures", "substrate-test-utils-derive", @@ -11112,7 +10880,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -11123,7 +10891,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "ansi_term", "build-helper", @@ -11368,12 +11136,6 @@ dependencies = [ "sp-runtime", ] -[[package]] -name = "textwrap" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" - [[package]] name = "thiserror" version = "1.0.31" @@ -11786,9 +11548,9 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c31f240f59877c3d4bb3b3ea0ec5a6a0cff07323580ff8c7a605cd7d08b255d" +checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" dependencies = [ "async-trait", "cfg-if 1.0.0", @@ -11800,30 +11562,30 @@ dependencies = [ "idna", "ipnet", "lazy_static", - "log", "rand 0.8.5", "smallvec", "thiserror", "tinyvec", + "tracing", "url", ] [[package]] name = "trust-dns-resolver" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ba72c2ea84515690c9fcef4c6c660bb9df3036ed1051686de84605b74fd558" +checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" dependencies = [ "cfg-if 1.0.0", "futures-util", "ipconfig", "lazy_static", - "log", "lru-cache", "parking_lot 0.12.1", "resolv-conf", "smallvec", "thiserror", + "tracing", "trust-dns-proto", ] @@ -11836,11 +11598,10 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#0c1ccdaa53556a106aa69c23f19527e435970237" +source = "git+https://github.com/paritytech/substrate?branch=master#f5ed51def33950ac24709eb11940790ca446421d" dependencies = [ "clap", "frame-try-runtime", - "jsonrpsee", "log", "parity-scale-codec", "remote-externalities", @@ -11856,6 +11617,8 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-version", + "sp-weights", + "substrate-rpc-client", "zstd", ] @@ -12632,15 +12395,15 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.29.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aac7fef12f4b59cd0a29339406cc9203ab44e440ddff6b3f5a41455349fa9cf3" +checksum = "45296b64204227616fdbf2614cefa4c236b98ee64dfaaaa435207ed99fe7829f" dependencies = [ - "windows_aarch64_msvc 0.29.0", - "windows_i686_gnu 0.29.0", - "windows_i686_msvc 0.29.0", - "windows_x86_64_gnu 0.29.0", - "windows_x86_64_msvc 0.29.0", + "windows_aarch64_msvc 0.34.0", + "windows_i686_gnu 0.34.0", + "windows_i686_msvc 0.34.0", + "windows_x86_64_gnu 0.34.0", + "windows_x86_64_msvc 0.34.0", ] [[package]] @@ -12671,15 +12434,15 @@ dependencies = [ [[package]] name = "windows_aarch64_msvc" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d027175d00b01e0cbeb97d6ab6ebe03b12330a35786cbaca5252b1c4bf5d9b" +checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" [[package]] name = "windows_aarch64_msvc" -version = "0.32.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" +checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" [[package]] name = "windows_aarch64_msvc" @@ -12689,15 +12452,15 @@ checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" [[package]] name = "windows_i686_gnu" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8793f59f7b8e8b01eda1a652b2697d87b93097198ae85f823b969ca5b89bba58" +checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" [[package]] name = "windows_i686_gnu" -version = "0.32.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" +checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" [[package]] name = "windows_i686_gnu" @@ -12707,15 +12470,15 @@ checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" [[package]] name = "windows_i686_msvc" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8602f6c418b67024be2996c512f5f995de3ba417f4c75af68401ab8756796ae4" +checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" [[package]] name = "windows_i686_msvc" -version = "0.32.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" +checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" [[package]] name = "windows_i686_msvc" @@ -12725,15 +12488,15 @@ checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" [[package]] name = "windows_x86_64_gnu" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d615f419543e0bd7d2b3323af0d86ff19cbc4f816e6453f36a2c2ce889c354" +checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" [[package]] name = "windows_x86_64_gnu" -version = "0.32.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" +checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" [[package]] name = "windows_x86_64_gnu" @@ -12743,15 +12506,15 @@ checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" [[package]] name = "windows_x86_64_msvc" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d95421d9ed3672c280884da53201a5c46b7b2765ca6faf34b0d71cf34a3561" +checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" [[package]] name = "windows_x86_64_msvc" -version = "0.32.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" +checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" [[package]] name = "windows_x86_64_msvc" diff --git a/Cargo.toml b/Cargo.toml index 56ddde48fd5f..b6f9cfc74b6d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ assert_cmd = "2.0.4" nix = "0.24.1" tempfile = "3.2.0" tokio = "1.19.2" -remote-externalities = { git = "https://github.com/paritytech/substrate", branch = "master" } +substrate-rpc-client = { git = "https://github.com/paritytech/substrate", branch = "master" } polkadot-core-primitives = { path = "core-primitives" } [workspace] diff --git a/cli/Cargo.toml b/cli/Cargo.toml index eaacd22640da..87c2f6130986 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -14,7 +14,7 @@ wasm-opt = false crate-type = ["cdylib", "rlib"] [dependencies] -clap = { version = "3.1", features = ["derive"], optional = true } +clap = { version = "4.0.9", features = ["derive"], optional = true } log = "0.4.17" thiserror = "1.0.31" futures = "0.3.21" @@ -63,7 +63,7 @@ runtime-benchmarks = [ ] trie-memory-tracker = ["sp-trie/memory-tracker"] full-node = ["service/full-node"] -try-runtime = ["service/try-runtime"] +try-runtime = ["service/try-runtime", "try-runtime-cli/try-runtime"] fast-runtime = ["service/fast-runtime"] pyroscope = ["pyro"] hostperfcheck = ["polkadot-performance-test"] diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 804c4a9f81c6..a6b7f4a3d5c9 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -43,16 +43,16 @@ pub enum Subcommand { Revert(sc_cli::RevertCmd), #[allow(missing_docs)] - #[clap(name = "prepare-worker", hide = true)] + #[command(name = "prepare-worker", hide = true)] PvfPrepareWorker(ValidationWorkerCommand), #[allow(missing_docs)] - #[clap(name = "execute-worker", hide = true)] + #[command(name = "execute-worker", hide = true)] PvfExecuteWorker(ValidationWorkerCommand), /// Sub-commands concerned with benchmarking. /// The pallet benchmarking moved to the `pallet` sub-command. - #[clap(subcommand)] + #[command(subcommand)] Benchmark(frame_benchmarking_cli::BenchmarkCmd), /// Runs performance checks such as PVF compilation in order to measure machine @@ -68,7 +68,7 @@ pub enum Subcommand { TryRuntime, /// Key management CLI utilities - #[clap(subcommand)] + #[command(subcommand)] Key(sc_cli::KeySubcommand), /// Db meta columns information. @@ -84,22 +84,22 @@ pub struct ValidationWorkerCommand { #[allow(missing_docs)] #[derive(Debug, Parser)] -#[cfg_attr(feature = "malus", derive(Clone))] +#[group(skip)] pub struct RunCmd { #[allow(missing_docs)] #[clap(flatten)] pub base: sc_cli::RunCmd, /// Force using Kusama native runtime. - #[clap(long = "force-kusama")] + #[arg(long = "force-kusama")] pub force_kusama: bool, /// Force using Westend native runtime. - #[clap(long = "force-westend")] + #[arg(long = "force-westend")] pub force_westend: bool, /// Force using Rococo native runtime. - #[clap(long = "force-rococo")] + #[arg(long = "force-rococo")] pub force_rococo: bool, /// Setup a GRANDPA scheduled voting pause. @@ -108,25 +108,25 @@ pub struct RunCmd { /// blocks). After the given block number is finalized the GRANDPA voter /// will temporarily stop voting for new blocks until the given delay has /// elapsed (i.e. until a block at height `pause_block + delay` is imported). - #[clap(long = "grandpa-pause", number_of_values(2))] + #[arg(long = "grandpa-pause", num_args = 2)] pub grandpa_pause: Vec, /// Enable the BEEFY gadget (only on Rococo or Wococo for now). - #[clap(long)] + #[arg(long)] pub beefy: bool, /// Add the destination address to the jaeger agent. /// /// Must be valid socket address, of format `IP:Port` /// commonly `127.0.0.1:6831`. - #[clap(long)] + #[arg(long)] pub jaeger_agent: Option, /// Add the destination address to the `pyroscope` agent. /// /// Must be valid socket address, of format `IP:Port` /// commonly `127.0.0.1:4040`. - #[clap(long)] + #[arg(long)] pub pyroscope_server: Option, /// Disable automatic hardware benchmarks. @@ -136,20 +136,20 @@ pub struct RunCmd { /// /// The results are then printed out in the logs, and also sent as part of /// telemetry, if telemetry is enabled. - #[clap(long)] + #[arg(long)] pub no_hardware_benchmarks: bool, /// Overseer message capacity override. /// /// **Dangerous!** Do not touch unless explicitly adviced to. - #[clap(long)] + #[arg(long)] pub overseer_channel_capacity_override: Option, } #[allow(missing_docs)] #[derive(Debug, Parser)] pub struct Cli { - #[clap(subcommand)] + #[command(subcommand)] pub subcommand: Option, #[clap(flatten)] pub run: RunCmd, diff --git a/erasure-coding/fuzzer/Cargo.toml b/erasure-coding/fuzzer/Cargo.toml index efabd7a77bca..bd8632a185f5 100644 --- a/erasure-coding/fuzzer/Cargo.toml +++ b/erasure-coding/fuzzer/Cargo.toml @@ -7,6 +7,7 @@ edition.workspace = true [dependencies] polkadot-erasure-coding = { path = ".." } honggfuzz = "0.5" +polkadot-primitives = { path = "../../primitives" } primitives = { package = "polkadot-node-primitives", path = "../../node/primitives/" } [[bin]] diff --git a/erasure-coding/fuzzer/src/round_trip.rs b/erasure-coding/fuzzer/src/round_trip.rs index 3d5ea5927458..daa57a566fbb 100644 --- a/erasure-coding/fuzzer/src/round_trip.rs +++ b/erasure-coding/fuzzer/src/round_trip.rs @@ -2,6 +2,7 @@ use polkadot_erasure_coding::*; use primitives::{AvailableData, BlockData, PoV}; use std::sync::Arc; use honggfuzz::fuzz; +use polkadot_primitives::v2::PersistedValidationData; fn main() { diff --git a/node/client/src/lib.rs b/node/client/src/lib.rs index 41b0048f0e46..a254973b99c2 100644 --- a/node/client/src/lib.rs +++ b/node/client/src/lib.rs @@ -128,7 +128,7 @@ pub trait RuntimeApiCollection: + ParachainHost + sp_block_builder::BlockBuilder + frame_system_rpc_runtime_api::AccountNonceApi - + sp_mmr_primitives::MmrApi::Hash> + + sp_mmr_primitives::MmrApi::Hash, BlockNumber> + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi + sp_api::Metadata + sp_offchain::OffchainWorkerApi @@ -149,7 +149,7 @@ where + ParachainHost + sp_block_builder::BlockBuilder + frame_system_rpc_runtime_api::AccountNonceApi - + sp_mmr_primitives::MmrApi::Hash> + + sp_mmr_primitives::MmrApi::Hash, BlockNumber> + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi + sp_api::Metadata + sp_offchain::OffchainWorkerApi @@ -421,63 +421,63 @@ impl sc_client_api::BlockBackend for Client { impl sc_client_api::StorageProvider for Client { fn storage( &self, - id: &BlockId, + hash: &::Hash, key: &StorageKey, ) -> sp_blockchain::Result> { with_client! { self, client, { - client.storage(id, key) + client.storage(hash, key) } } } fn storage_keys( &self, - id: &BlockId, + hash: &::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { with_client! { self, client, { - client.storage_keys(id, key_prefix) + client.storage_keys(hash, key_prefix) } } } fn storage_hash( &self, - id: &BlockId, + hash: &::Hash, key: &StorageKey, ) -> sp_blockchain::Result::Hash>> { with_client! { self, client, { - client.storage_hash(id, key) + client.storage_hash(hash, key) } } } fn storage_pairs( &self, - id: &BlockId, + hash: &::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { with_client! { self, client, { - client.storage_pairs(id, key_prefix) + client.storage_pairs(hash, key_prefix) } } } fn storage_keys_iter<'a>( &self, - id: &BlockId, + hash: &::Hash, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result< @@ -487,14 +487,14 @@ impl sc_client_api::StorageProvider for Client { self, client, { - client.storage_keys_iter(id, prefix, start_key) + client.storage_keys_iter(hash, prefix, start_key) } } } fn child_storage( &self, - id: &BlockId, + hash: &::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result> { @@ -502,14 +502,14 @@ impl sc_client_api::StorageProvider for Client { self, client, { - client.child_storage(id, child_info, key) + client.child_storage(hash, child_info, key) } } } fn child_storage_keys( &self, - id: &BlockId, + hash: &::Hash, child_info: &ChildInfo, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { @@ -517,14 +517,14 @@ impl sc_client_api::StorageProvider for Client { self, client, { - client.child_storage_keys(id, child_info, key_prefix) + client.child_storage_keys(hash, child_info, key_prefix) } } } fn child_storage_keys_iter<'a>( &self, - id: &BlockId, + hash: &::Hash, child_info: ChildInfo, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, @@ -535,14 +535,14 @@ impl sc_client_api::StorageProvider for Client { self, client, { - client.child_storage_keys_iter(id, child_info, prefix, start_key) + client.child_storage_keys_iter(hash, child_info, prefix, start_key) } } } fn child_storage_hash( &self, - id: &BlockId, + hash: &::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result::Hash>> { @@ -550,7 +550,7 @@ impl sc_client_api::StorageProvider for Client { self, client, { - client.child_storage_hash(id, child_info, key) + client.child_storage_hash(hash, child_info, key) } } } diff --git a/node/core/pvf/src/host.rs b/node/core/pvf/src/host.rs index 464f8d322648..6670ea48d4ec 100644 --- a/node/core/pvf/src/host.rs +++ b/node/core/pvf/src/host.rs @@ -38,6 +38,16 @@ use std::{ time::{Duration, SystemTime}, }; +/// The time period after which the precheck preparation worker is considered unresponsive and will +/// be killed. +// NOTE: If you change this make sure to fix the buckets of `pvf_preparation_time` metric. +pub const PRECHECK_COMPILATION_TIMEOUT: Duration = Duration::from_secs(60); + +/// The time period after which the execute preparation worker is considered unresponsive and will +/// be killed. +// NOTE: If you change this make sure to fix the buckets of `pvf_preparation_time` metric. +pub const EXECUTE_COMPILATION_TIMEOUT: Duration = Duration::from_secs(180); + /// An alias to not spell the type for the oneshot sender for the PVF execution result. pub(crate) type ResultSender = oneshot::Sender>; @@ -51,10 +61,11 @@ pub struct ValidationHost { } impl ValidationHost { - /// Precheck PVF with the given code, i.e. verify that it compiles within a reasonable time limit. - /// The result of execution will be sent to the provided result sender. + /// Precheck PVF with the given code, i.e. verify that it compiles within a reasonable time + /// limit. This will prepare the PVF. The result of preparation will be sent to the provided + /// result sender. /// - /// This is async to accommodate the fact a possibility of back-pressure. In the vast majority of + /// This is async to accommodate the possibility of back-pressure. In the vast majority of /// situations this function should return immediately. /// /// Returns an error if the request cannot be sent to the validation host, i.e. if it shut down. @@ -72,7 +83,7 @@ impl ValidationHost { /// Execute PVF with the given code, execution timeout, parameters and priority. /// The result of execution will be sent to the provided result sender. /// - /// This is async to accommodate the fact a possibility of back-pressure. In the vast majority of + /// This is async to accommodate the possibility of back-pressure. In the vast majority of /// situations this function should return immediately. /// /// Returns an error if the request cannot be sent to the validation host, i.e. if it shut down. @@ -92,7 +103,7 @@ impl ValidationHost { /// Sends a signal to the validation host requesting to prepare a list of the given PVFs. /// - /// This is async to accommodate the fact a possibility of back-pressure. In the vast majority of + /// This is async to accommodate the possibility of back-pressure. In the vast majority of /// situations this function should return immediately. /// /// Returns an error if the request cannot be sent to the validation host, i.e. if it shut down. @@ -418,6 +429,9 @@ async fn handle_to_host( Ok(()) } +/// Handles PVF prechecking. +/// +/// This tries to prepare the PVF by compiling the WASM blob within a given timeout ([`PRECHECK_COMPILATION_TIMEOUT`]). async fn handle_precheck_pvf( artifacts: &mut Artifacts, prepare_queue: &mut mpsc::Sender, @@ -440,12 +454,24 @@ async fn handle_precheck_pvf( } } else { artifacts.insert_preparing(artifact_id, vec![result_sender]); - send_prepare(prepare_queue, prepare::ToQueue::Enqueue { priority: Priority::Normal, pvf }) - .await?; + send_prepare( + prepare_queue, + prepare::ToQueue::Enqueue { + priority: Priority::Normal, + pvf, + compilation_timeout: PRECHECK_COMPILATION_TIMEOUT, + }, + ) + .await?; } Ok(()) } +/// Handles PVF execution. +/// +/// This will first try to prepare the PVF, if a prepared artifact does not already exist. If there is already a +/// preparation job, we coalesce the two preparation jobs. When preparing for execution, we use a more lenient timeout +/// ([`EXECUTE_COMPILATION_TIMEOUT`]) than when prechecking. async fn handle_execute_pvf( cache_path: &Path, artifacts: &mut Artifacts, @@ -462,7 +488,7 @@ async fn handle_execute_pvf( if let Some(state) = artifacts.artifact_state_mut(&artifact_id) { match state { - ArtifactState::Prepared { ref mut last_time_needed } => { + ArtifactState::Prepared { last_time_needed } => { *last_time_needed = SystemTime::now(); send_execute( @@ -485,9 +511,17 @@ async fn handle_execute_pvf( } } else { // Artifact is unknown: register it and enqueue a job with the corresponding priority and - // + // PVF. artifacts.insert_preparing(artifact_id.clone(), Vec::new()); - send_prepare(prepare_queue, prepare::ToQueue::Enqueue { priority, pvf }).await?; + send_prepare( + prepare_queue, + prepare::ToQueue::Enqueue { + priority, + pvf, + compilation_timeout: EXECUTE_COMPILATION_TIMEOUT, + }, + ) + .await?; awaiting_prepare.add(artifact_id, execution_timeout, params, result_tx); } @@ -520,7 +554,11 @@ async fn handle_heads_up( send_prepare( prepare_queue, - prepare::ToQueue::Enqueue { priority: Priority::Normal, pvf: active_pvf }, + prepare::ToQueue::Enqueue { + priority: Priority::Normal, + pvf: active_pvf, + compilation_timeout: EXECUTE_COMPILATION_TIMEOUT, + }, ) .await?; } diff --git a/node/core/pvf/src/metrics.rs b/node/core/pvf/src/metrics.rs index df0c619989f2..547ee65f3e9d 100644 --- a/node/core/pvf/src/metrics.rs +++ b/node/core/pvf/src/metrics.rs @@ -155,7 +155,8 @@ impl metrics::Metrics for Metrics { "Time spent in preparing PVF artifacts in seconds", ) .buckets(vec![ - // This is synchronized with COMPILATION_TIMEOUT=60s constant found in + // This is synchronized with the PRECHECK_COMPILATION_TIMEOUT=60s + // and EXECUTE_COMPILATION_TIMEOUT=180s constants found in // src/prepare/worker.rs 0.1, 0.5, @@ -166,6 +167,7 @@ impl metrics::Metrics for Metrics { 20.0, 30.0, 60.0, + 180.0, ]), )?, registry, diff --git a/node/core/pvf/src/prepare/pool.rs b/node/core/pvf/src/prepare/pool.rs index 4902c4c7e3b3..fad6ed167614 100644 --- a/node/core/pvf/src/prepare/pool.rs +++ b/node/core/pvf/src/prepare/pool.rs @@ -61,7 +61,12 @@ pub enum ToPool { /// /// In either case, the worker is considered busy and no further `StartWork` messages should be /// sent until either `Concluded` or `Rip` message is received. - StartWork { worker: Worker, code: Arc>, artifact_path: PathBuf }, + StartWork { + worker: Worker, + code: Arc>, + artifact_path: PathBuf, + compilation_timeout: Duration, + }, } /// A message sent from pool to its client. @@ -205,7 +210,7 @@ fn handle_to_pool( metrics.prepare_worker().on_begin_spawn(); mux.push(spawn_worker_task(program_path.to_owned(), spawn_timeout).boxed()); }, - ToPool::StartWork { worker, code, artifact_path } => { + ToPool::StartWork { worker, code, artifact_path, compilation_timeout } => { if let Some(data) = spawned.get_mut(worker) { if let Some(idle) = data.idle.take() { let preparation_timer = metrics.time_preparation(); @@ -216,6 +221,7 @@ fn handle_to_pool( code, cache_path.to_owned(), artifact_path, + compilation_timeout, preparation_timer, ) .boxed(), @@ -263,9 +269,11 @@ async fn start_work_task( code: Arc>, cache_path: PathBuf, artifact_path: PathBuf, + compilation_timeout: Duration, _preparation_timer: Option, ) -> PoolEvent { - let outcome = worker::start_work(idle, code, &cache_path, artifact_path).await; + let outcome = + worker::start_work(idle, code, &cache_path, artifact_path, compilation_timeout).await; PoolEvent::StartWork(worker, outcome) } diff --git a/node/core/pvf/src/prepare/queue.rs b/node/core/pvf/src/prepare/queue.rs index 5aa1402916d6..a77b88e00345 100644 --- a/node/core/pvf/src/prepare/queue.rs +++ b/node/core/pvf/src/prepare/queue.rs @@ -21,7 +21,10 @@ use crate::{artifacts::ArtifactId, metrics::Metrics, PrepareResult, Priority, Pv use always_assert::{always, never}; use async_std::path::PathBuf; use futures::{channel::mpsc, stream::StreamExt as _, Future, SinkExt}; -use std::collections::{HashMap, VecDeque}; +use std::{ + collections::{HashMap, VecDeque}, + time::Duration, +}; /// A request to pool. #[derive(Debug)] @@ -30,7 +33,7 @@ pub enum ToQueue { /// /// Note that it is incorrect to enqueue the same PVF again without first receiving the /// [`FromQueue`] response. - Enqueue { priority: Priority, pvf: Pvf }, + Enqueue { priority: Priority, pvf: Pvf, compilation_timeout: Duration }, } /// A response from queue. @@ -76,6 +79,8 @@ struct JobData { /// The priority of this job. Can be bumped. priority: Priority, pvf: Pvf, + /// The timeout for the preparation job. + compilation_timeout: Duration, worker: Option, } @@ -91,7 +96,7 @@ impl WorkerData { } /// A queue structured like this is prone to starving, however, we don't care that much since we expect -/// there is going to be a limited number of critical jobs and we don't really care if background starve. +/// there is going to be a limited number of critical jobs and we don't really care if background starve. #[derive(Default)] struct Unscheduled { normal: VecDeque, @@ -203,18 +208,24 @@ impl Queue { async fn handle_to_queue(queue: &mut Queue, to_queue: ToQueue) -> Result<(), Fatal> { match to_queue { - ToQueue::Enqueue { priority, pvf } => { - handle_enqueue(queue, priority, pvf).await?; + ToQueue::Enqueue { priority, pvf, compilation_timeout } => { + handle_enqueue(queue, priority, pvf, compilation_timeout).await?; }, } Ok(()) } -async fn handle_enqueue(queue: &mut Queue, priority: Priority, pvf: Pvf) -> Result<(), Fatal> { +async fn handle_enqueue( + queue: &mut Queue, + priority: Priority, + pvf: Pvf, + compilation_timeout: Duration, +) -> Result<(), Fatal> { gum::debug!( target: LOG_TARGET, validation_code_hash = ?pvf.code_hash, ?priority, + ?compilation_timeout, "PVF is enqueued for preparation.", ); queue.metrics.prepare_enqueued(); @@ -225,7 +236,7 @@ async fn handle_enqueue(queue: &mut Queue, priority: Priority, pvf: Pvf) -> Resu "second Enqueue sent for a known artifact" ) { // This function is called in response to a `Enqueue` message; - // Precondtion for `Enqueue` is that it is sent only once for a PVF; + // Precondition for `Enqueue` is that it is sent only once for a PVF; // Thus this should always be `false`; // qed. gum::warn!( @@ -236,7 +247,7 @@ async fn handle_enqueue(queue: &mut Queue, priority: Priority, pvf: Pvf) -> Resu return Ok(()) } - let job = queue.jobs.insert(JobData { priority, pvf, worker: None }); + let job = queue.jobs.insert(JobData { priority, pvf, compilation_timeout, worker: None }); queue.artifact_id_to_job.insert(artifact_id, job); if let Some(available) = find_idle_worker(queue) { @@ -424,7 +435,12 @@ async fn assign(queue: &mut Queue, worker: Worker, job: Job) -> Result<(), Fatal send_pool( &mut queue.to_pool_tx, - pool::ToPool::StartWork { worker, code: job_data.pvf.code.clone(), artifact_path }, + pool::ToPool::StartWork { + worker, + code: job_data.pvf.code.clone(), + artifact_path, + compilation_timeout: job_data.compilation_timeout, + }, ) .await?; @@ -478,7 +494,7 @@ pub fn start( #[cfg(test)] mod tests { use super::*; - use crate::error::PrepareError; + use crate::{error::PrepareError, host::PRECHECK_COMPILATION_TIMEOUT}; use assert_matches::assert_matches; use futures::{future::BoxFuture, FutureExt}; use slotmap::SlotMap; @@ -571,7 +587,6 @@ mod tests { async fn poll_ensure_to_pool_is_empty(&mut self) { use futures_timer::Delay; - use std::time::Duration; let to_pool_rx = &mut self.to_pool_rx; run_until( @@ -594,7 +609,11 @@ mod tests { async fn properly_concludes() { let mut test = Test::new(2, 2); - test.send_queue(ToQueue::Enqueue { priority: Priority::Normal, pvf: pvf(1) }); + test.send_queue(ToQueue::Enqueue { + priority: Priority::Normal, + pvf: pvf(1), + compilation_timeout: PRECHECK_COMPILATION_TIMEOUT, + }); assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Spawn); let w = test.workers.insert(()); @@ -607,10 +626,12 @@ mod tests { #[async_std::test] async fn dont_spawn_over_soft_limit_unless_critical() { let mut test = Test::new(2, 3); + let compilation_timeout = PRECHECK_COMPILATION_TIMEOUT; - test.send_queue(ToQueue::Enqueue { priority: Priority::Normal, pvf: pvf(1) }); - test.send_queue(ToQueue::Enqueue { priority: Priority::Normal, pvf: pvf(2) }); - test.send_queue(ToQueue::Enqueue { priority: Priority::Normal, pvf: pvf(3) }); + let priority = Priority::Normal; + test.send_queue(ToQueue::Enqueue { priority, pvf: pvf(1), compilation_timeout }); + test.send_queue(ToQueue::Enqueue { priority, pvf: pvf(2), compilation_timeout }); + test.send_queue(ToQueue::Enqueue { priority, pvf: pvf(3), compilation_timeout }); // Receive only two spawns. assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Spawn); @@ -631,7 +652,11 @@ mod tests { assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. }); // Enqueue a critical job. - test.send_queue(ToQueue::Enqueue { priority: Priority::Critical, pvf: pvf(4) }); + test.send_queue(ToQueue::Enqueue { + priority: Priority::Critical, + pvf: pvf(4), + compilation_timeout, + }); // 2 out of 2 are working, but there is a critical job incoming. That means that spawning // another worker is warranted. @@ -641,15 +666,24 @@ mod tests { #[async_std::test] async fn cull_unwanted() { let mut test = Test::new(1, 2); + let compilation_timeout = PRECHECK_COMPILATION_TIMEOUT; - test.send_queue(ToQueue::Enqueue { priority: Priority::Normal, pvf: pvf(1) }); + test.send_queue(ToQueue::Enqueue { + priority: Priority::Normal, + pvf: pvf(1), + compilation_timeout, + }); assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Spawn); let w1 = test.workers.insert(()); test.send_from_pool(pool::FromPool::Spawned(w1)); assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. }); // Enqueue a critical job, which warrants spawning over the soft limit. - test.send_queue(ToQueue::Enqueue { priority: Priority::Critical, pvf: pvf(2) }); + test.send_queue(ToQueue::Enqueue { + priority: Priority::Critical, + pvf: pvf(2), + compilation_timeout, + }); assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Spawn); // However, before the new worker had a chance to spawn, the first worker finishes with its @@ -667,9 +701,10 @@ mod tests { async fn worker_mass_die_out_doesnt_stall_queue() { let mut test = Test::new(2, 2); - test.send_queue(ToQueue::Enqueue { priority: Priority::Normal, pvf: pvf(1) }); - test.send_queue(ToQueue::Enqueue { priority: Priority::Normal, pvf: pvf(2) }); - test.send_queue(ToQueue::Enqueue { priority: Priority::Normal, pvf: pvf(3) }); + let (priority, compilation_timeout) = (Priority::Normal, PRECHECK_COMPILATION_TIMEOUT); + test.send_queue(ToQueue::Enqueue { priority, pvf: pvf(1), compilation_timeout }); + test.send_queue(ToQueue::Enqueue { priority, pvf: pvf(2), compilation_timeout }); + test.send_queue(ToQueue::Enqueue { priority, pvf: pvf(3), compilation_timeout }); assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Spawn); assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Spawn); @@ -696,7 +731,11 @@ mod tests { async fn doesnt_resurrect_ripped_worker_if_no_work() { let mut test = Test::new(2, 2); - test.send_queue(ToQueue::Enqueue { priority: Priority::Normal, pvf: pvf(1) }); + test.send_queue(ToQueue::Enqueue { + priority: Priority::Normal, + pvf: pvf(1), + compilation_timeout: PRECHECK_COMPILATION_TIMEOUT, + }); assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Spawn); @@ -717,7 +756,11 @@ mod tests { async fn rip_for_start_work() { let mut test = Test::new(2, 2); - test.send_queue(ToQueue::Enqueue { priority: Priority::Normal, pvf: pvf(1) }); + test.send_queue(ToQueue::Enqueue { + priority: Priority::Normal, + pvf: pvf(1), + compilation_timeout: PRECHECK_COMPILATION_TIMEOUT, + }); assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Spawn); diff --git a/node/core/pvf/src/prepare/worker.rs b/node/core/pvf/src/prepare/worker.rs index a9124b3926c5..77570b47360b 100644 --- a/node/core/pvf/src/prepare/worker.rs +++ b/node/core/pvf/src/prepare/worker.rs @@ -32,10 +32,6 @@ use parity_scale_codec::{Decode, Encode}; use sp_core::hexdisplay::HexDisplay; use std::{panic, sync::Arc, time::Duration}; -/// The time period after which the preparation worker is considered unresponsive and will be killed. -// NOTE: If you change this make sure to fix the buckets of `pvf_preparation_time` metric. -const COMPILATION_TIMEOUT: Duration = Duration::from_secs(60); - /// Spawns a new worker with the given program path that acts as the worker and the spawn timeout. /// /// The program should be able to handle ` prepare-worker ` invocation. @@ -69,6 +65,7 @@ pub async fn start_work( code: Arc>, cache_path: &Path, artifact_path: PathBuf, + compilation_timeout: Duration, ) -> Outcome { let IdleWorker { mut stream, pid } = worker; @@ -103,7 +100,7 @@ pub async fn start_work( } let selected = - match async_std::future::timeout(COMPILATION_TIMEOUT, framed_recv(&mut stream)).await { + match async_std::future::timeout(compilation_timeout, framed_recv(&mut stream)).await { Ok(Ok(response_bytes)) => { // Received bytes from worker within the time limit. // By convention we expect encoded `PrepareResult`. diff --git a/node/malus/Cargo.toml b/node/malus/Cargo.toml index b5e9823f5ed1..360cfe60b735 100644 --- a/node/malus/Cargo.toml +++ b/node/malus/Cargo.toml @@ -29,11 +29,12 @@ assert_matches = "1.5" async-trait = "0.1.57" sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -clap = { version = "3.1", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../gum/" } erasure = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } +rand = "0.8.5" [features] default = [] diff --git a/node/malus/integrationtests/0001-dispute-valid-block.zndsl b/node/malus/integrationtests/0001-dispute-valid-block.zndsl index fe0c14c791e4..f778b0231ba9 100644 --- a/node/malus/integrationtests/0001-dispute-valid-block.zndsl +++ b/node/malus/integrationtests/0001-dispute-valid-block.zndsl @@ -6,7 +6,7 @@ Creds: config alice: is up bob: is up charlie: is up -david is up +dave: is up alice: reports node_roles is 4 bob: reports node_roles is 4 alice: reports sub_libp2p_is_major_syncing is 0 diff --git a/node/malus/src/malus.rs b/node/malus/src/malus.rs index dd9f1377f14a..bb466d4ba4de 100644 --- a/node/malus/src/malus.rs +++ b/node/malus/src/malus.rs @@ -18,7 +18,6 @@ use clap::Parser; use color_eyre::eyre; -use polkadot_cli::{Cli, RunCmd}; pub(crate) mod interceptor; pub(crate) mod shared; @@ -29,52 +28,66 @@ use variants::*; /// Define the different variants of behavior. #[derive(Debug, Parser)] -#[clap(about = "Malus - the nemesis of polkadot.", version)] -#[clap(rename_all = "kebab-case")] +#[command(about = "Malus - the nemesis of polkadot.", version, rename_all = "kebab-case")] enum NemesisVariant { /// Suggest a candidate with an invalid proof of validity. - SuggestGarbageCandidate(RunCmd), + SuggestGarbageCandidate(SuggestGarbageCandidateOptions), /// Back a candidate with a specifically crafted proof of validity. - BackGarbageCandidate(RunCmd), + BackGarbageCandidate(BackGarbageCandidateOptions), /// Delayed disputing of ancestors that are perfectly fine. DisputeAncestor(DisputeAncestorOptions), #[allow(missing_docs)] - #[clap(name = "prepare-worker", hide = true)] + #[command(name = "prepare-worker", hide = true)] PvfPrepareWorker(polkadot_cli::ValidationWorkerCommand), #[allow(missing_docs)] - #[clap(name = "execute-worker", hide = true)] + #[command(name = "execute-worker", hide = true)] PvfExecuteWorker(polkadot_cli::ValidationWorkerCommand), } #[derive(Debug, Parser)] #[allow(missing_docs)] struct MalusCli { - #[clap(subcommand)] + #[command(subcommand)] pub variant: NemesisVariant, /// Sets the minimum delay between the best and finalized block. pub finality_delay: Option, } -fn run_cmd(run: RunCmd) -> Cli { - Cli { subcommand: None, run } -} - impl MalusCli { /// Launch a malus node. fn launch(self) -> eyre::Result<()> { let finality_delay = self.finality_delay; match self.variant { - NemesisVariant::BackGarbageCandidate(cmd) => - polkadot_cli::run_node(run_cmd(cmd), BackGarbageCandidate, finality_delay)?, - NemesisVariant::SuggestGarbageCandidate(cmd) => - polkadot_cli::run_node(run_cmd(cmd), BackGarbageCandidateWrapper, finality_delay)?, - NemesisVariant::DisputeAncestor(opts) => polkadot_cli::run_node( - run_cmd(opts.clone().cmd), - DisputeValidCandidates::new(opts), - finality_delay, - )?, + NemesisVariant::BackGarbageCandidate(opts) => { + let BackGarbageCandidateOptions { percentage, cli } = opts; + + polkadot_cli::run_node(cli, BackGarbageCandidates { percentage }, finality_delay)? + }, + NemesisVariant::SuggestGarbageCandidate(opts) => { + let SuggestGarbageCandidateOptions { percentage, cli } = opts; + + polkadot_cli::run_node( + cli, + SuggestGarbageCandidates { percentage }, + finality_delay, + )? + }, + NemesisVariant::DisputeAncestor(opts) => { + let DisputeAncestorOptions { + fake_validation, + fake_validation_error, + percentage, + cli, + } = opts; + + polkadot_cli::run_node( + cli, + DisputeValidCandidates { fake_validation, fake_validation_error, percentage }, + finality_delay, + )? + }, NemesisVariant::PvfPrepareWorker(cmd) => { #[cfg(target_os = "android")] { @@ -126,7 +139,80 @@ mod tests { variant: NemesisVariant::DisputeAncestor(run), .. } => { - assert!(run.cmd.base.bob); + assert!(run.cli.run.base.bob); + }); + } + + #[test] + fn percentage_works_suggest_garbage() { + let cli = MalusCli::try_parse_from(IntoIterator::into_iter([ + "malus", + "suggest-garbage-candidate", + "--percentage", + "100", + "--bob", + ])) + .unwrap(); + assert_matches::assert_matches!(cli, MalusCli { + variant: NemesisVariant::SuggestGarbageCandidate(run), + .. + } => { + assert!(run.cli.run.base.bob); + }); + } + + #[test] + fn percentage_works_dispute_ancestor() { + let cli = MalusCli::try_parse_from(IntoIterator::into_iter([ + "malus", + "dispute-ancestor", + "--percentage", + "100", + "--bob", + ])) + .unwrap(); + assert_matches::assert_matches!(cli, MalusCli { + variant: NemesisVariant::DisputeAncestor(run), + .. + } => { + assert!(run.cli.run.base.bob); + }); + } + + #[test] + fn percentage_works_back_garbage() { + let cli = MalusCli::try_parse_from(IntoIterator::into_iter([ + "malus", + "back-garbage-candidate", + "--percentage", + "100", + "--bob", + ])) + .unwrap(); + assert_matches::assert_matches!(cli, MalusCli { + variant: NemesisVariant::BackGarbageCandidate(run), + .. + } => { + assert!(run.cli.run.base.bob); + }); + } + + #[test] + #[should_panic] + fn validate_range_for_percentage() { + let cli = MalusCli::try_parse_from(IntoIterator::into_iter([ + "malus", + "suggest-garbage-candidate", + "--percentage", + "101", + "--bob", + ])) + .unwrap(); + assert_matches::assert_matches!(cli, MalusCli { + variant: NemesisVariant::DisputeAncestor(run), + .. + } => { + assert!(run.cli.run.base.bob); }); } } diff --git a/node/malus/src/variants/back_garbage_candidate.rs b/node/malus/src/variants/back_garbage_candidate.rs index cf72776b5f28..b17b8bca5887 100644 --- a/node/malus/src/variants/back_garbage_candidate.rs +++ b/node/malus/src/variants/back_garbage_candidate.rs @@ -25,6 +25,7 @@ use polkadot_cli::{ OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, ParachainHost, ProvideRuntimeApi, }, + Cli, }; use polkadot_node_subsystem::SpawnGlue; use sp_core::traits::SpawnNamed; @@ -36,11 +37,27 @@ use crate::{ use std::sync::Arc; +#[derive(Debug, clap::Parser)] +#[clap(rename_all = "kebab-case")] +#[allow(missing_docs)] +pub struct BackGarbageCandidateOptions { + /// Determines the percentage of garbage candidates that should be backed. + /// Defaults to 100% of garbage candidates being backed. + #[clap(short, long, ignore_case = true, default_value_t = 100, value_parser = clap::value_parser!(u8).range(0..=100))] + pub percentage: u8, + + #[clap(flatten)] + pub cli: Cli, +} + /// Generates an overseer that replaces the candidate validation subsystem with our malicious /// variant. -pub(crate) struct BackGarbageCandidate; +pub(crate) struct BackGarbageCandidates { + /// The probability of behaving maliciously. + pub percentage: u8, +} -impl OverseerGen for BackGarbageCandidate { +impl OverseerGen for BackGarbageCandidates { fn generate<'a, Spawner, RuntimeClient>( &self, connector: OverseerConnector, @@ -55,6 +72,7 @@ impl OverseerGen for BackGarbageCandidate { let validation_filter = ReplaceValidationResult::new( FakeCandidateValidation::BackingAndApprovalValid, FakeCandidateValidationError::InvalidOutputs, + f64::from(self.percentage), SpawnGlue(spawner), ); diff --git a/node/malus/src/variants/common.rs b/node/malus/src/variants/common.rs index e112aa49f83e..610b43bc33a4 100644 --- a/node/malus/src/variants/common.rs +++ b/node/malus/src/variants/common.rs @@ -33,9 +33,10 @@ use polkadot_primitives::v2::{ }; use futures::channel::oneshot; +use rand::distributions::{Bernoulli, Distribution}; -#[derive(clap::ArgEnum, Clone, Copy, Debug, PartialEq)] -#[clap(rename_all = "kebab-case")] +#[derive(clap::ValueEnum, Clone, Copy, Debug, PartialEq)] +#[value(rename_all = "kebab-case")] #[non_exhaustive] pub enum FakeCandidateValidation { Disabled, @@ -48,8 +49,8 @@ pub enum FakeCandidateValidation { } /// Candidate invalidity details -#[derive(clap::ArgEnum, Clone, Copy, Debug, PartialEq)] -#[clap(rename_all = "kebab-case")] +#[derive(clap::ValueEnum, Clone, Copy, Debug, PartialEq)] +#[value(rename_all = "kebab-case")] pub enum FakeCandidateValidationError { /// Validation outputs check doesn't pass. InvalidOutputs, @@ -109,6 +110,7 @@ impl Into for FakeCandidateValidationError { pub struct ReplaceValidationResult { fake_validation: FakeCandidateValidation, fake_validation_error: FakeCandidateValidationError, + distribution: Bernoulli, spawner: Spawner, } @@ -119,9 +121,12 @@ where pub fn new( fake_validation: FakeCandidateValidation, fake_validation_error: FakeCandidateValidationError, + percentage: f64, spawner: Spawner, ) -> Self { - Self { fake_validation, fake_validation_error, spawner } + let distribution = Bernoulli::new(percentage / 100.0) + .expect("Invalid probability! Percentage must be in range [0..=100]."); + Self { fake_validation, fake_validation_error, distribution, spawner } } /// Creates and sends the validation response for a given candidate. Queries the runtime to obtain the validation data for the @@ -202,13 +207,14 @@ where { type Message = CandidateValidationMessage; - // Capture all candidate validation requests and depending on configuration fail them. + // Capture all (approval and backing) candidate validation requests and depending on configuration fail them. fn intercept_incoming( &self, subsystem_sender: &mut Sender, msg: FromOrchestra, ) -> Option> { match msg { + // Message sent by the approval voting subsystem FromOrchestra::Communication { msg: CandidateValidationMessage::ValidateFromExhaustive( @@ -236,28 +242,84 @@ where ), }) } - create_validation_response( - validation_data, - candidate_receipt.descriptor, - sender, - ); - None + // Create the fake response with probability `p` if the `PoV` is malicious, + // where 'p' defaults to 100% for suggest-garbage-candidate variant. + let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); + match behave_maliciously { + true => { + gum::info!( + target: MALUS, + ?behave_maliciously, + "😈 Creating malicious ValidationResult::Valid message with fake candidate commitments.", + ); + + create_validation_response( + validation_data, + candidate_receipt.descriptor, + sender, + ); + None + }, + false => { + // Behave normally with probability `(1-p)` for a malicious `PoV`. + gum::info!( + target: MALUS, + ?behave_maliciously, + "😈 Passing CandidateValidationMessage::ValidateFromExhaustive to the candidate validation subsystem.", + ); + + Some(FromOrchestra::Communication { + msg: CandidateValidationMessage::ValidateFromExhaustive( + validation_data, + validation_code, + candidate_receipt, + pov, + timeout, + sender, + ), + }) + }, + } }, FakeCandidateValidation::ApprovalInvalid | FakeCandidateValidation::BackingAndApprovalInvalid => { - let validation_result = - ValidationResult::Invalid(InvalidCandidate::InvalidOutputs); + // Set the validation result to invalid with probability `p` and trigger a dispute + let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); + match behave_maliciously { + true => { + let validation_result = + ValidationResult::Invalid(InvalidCandidate::InvalidOutputs); + + gum::info!( + target: MALUS, + ?behave_maliciously, + para_id = ?candidate_receipt.descriptor.para_id, + "😈 Maliciously sending invalid validation result: {:?}.", + &validation_result, + ); + + // We're not even checking the candidate, this makes us appear faster than honest validators. + sender.send(Ok(validation_result)).unwrap(); + None + }, + false => { + // Behave normally with probability `(1-p)` + gum::info!(target: MALUS, "😈 'Decided' to not act maliciously.",); - gum::debug!( - target: MALUS, - para_id = ?candidate_receipt.descriptor.para_id, - "ValidateFromExhaustive result: {:?}", - &validation_result - ); - // We're not even checking the candidate, this makes us appear faster than honest validators. - sender.send(Ok(validation_result)).unwrap(); - None + Some(FromOrchestra::Communication { + msg: CandidateValidationMessage::ValidateFromExhaustive( + validation_data, + validation_code, + candidate_receipt, + pov, + timeout, + sender, + ), + }) + }, + } }, + // Handle FakeCandidateValidation::Disabled _ => Some(FromOrchestra::Communication { msg: CandidateValidationMessage::ValidateFromExhaustive( validation_data, @@ -270,6 +332,7 @@ where }), } }, + // Behaviour related to the backing subsystem FromOrchestra::Communication { msg: CandidateValidationMessage::ValidateFromChainState( @@ -293,27 +356,68 @@ where ), }) } - self.send_validation_response( - candidate_receipt.descriptor, - subsystem_sender.clone(), - response_sender, - ); - None + // If the `PoV` is malicious, back the candidate with some probability `p`, + // where 'p' defaults to 100% for suggest-garbage-candidate variant. + let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); + match behave_maliciously { + true => { + gum::info!( + target: MALUS, + ?behave_maliciously, + "😈 Backing candidate with malicious PoV.", + ); + + self.send_validation_response( + candidate_receipt.descriptor, + subsystem_sender.clone(), + response_sender, + ); + None + }, + // If the `PoV` is malicious, we behave normally with some probability `(1-p)` + false => Some(FromOrchestra::Communication { + msg: CandidateValidationMessage::ValidateFromChainState( + candidate_receipt, + pov, + timeout, + response_sender, + ), + }), + } }, FakeCandidateValidation::BackingInvalid | FakeCandidateValidation::BackingAndApprovalInvalid => { - let validation_result = - ValidationResult::Invalid(self.fake_validation_error.clone().into()); - gum::debug!( - target: MALUS, - para_id = ?candidate_receipt.descriptor.para_id, - "ValidateFromChainState result: {:?}", - &validation_result - ); + // Maliciously set the validation result to invalid for a valid candidate with probability `p` + let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); + match behave_maliciously { + true => { + let validation_result = ValidationResult::Invalid( + self.fake_validation_error.clone().into(), + ); + gum::info!( + target: MALUS, + para_id = ?candidate_receipt.descriptor.para_id, + "😈 Maliciously sending invalid validation result: {:?}.", + &validation_result, + ); + // We're not even checking the candidate, this makes us appear faster than honest validators. + response_sender.send(Ok(validation_result)).unwrap(); + None + }, + // With some probability `(1-p)` we behave normally + false => { + gum::info!(target: MALUS, "😈 'Decided' to not act maliciously.",); - // We're not even checking the candidate, this makes us appear faster than honest validators. - response_sender.send(Ok(validation_result)).unwrap(); - None + Some(FromOrchestra::Communication { + msg: CandidateValidationMessage::ValidateFromChainState( + candidate_receipt, + pov, + timeout, + response_sender, + ), + }) + }, + } }, _ => Some(FromOrchestra::Communication { msg: CandidateValidationMessage::ValidateFromChainState( diff --git a/node/malus/src/variants/dispute_valid_candidates.rs b/node/malus/src/variants/dispute_valid_candidates.rs index 17ac070e619b..cea058023d6f 100644 --- a/node/malus/src/variants/dispute_valid_candidates.rs +++ b/node/malus/src/variants/dispute_valid_candidates.rs @@ -29,7 +29,7 @@ use polkadot_cli::{ OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, ParachainHost, ProvideRuntimeApi, }, - RunCmd, + Cli, }; use polkadot_node_subsystem::SpawnGlue; use sp_core::traits::SpawnNamed; @@ -40,34 +40,37 @@ use crate::{interceptor::*, variants::ReplaceValidationResult}; use std::sync::Arc; -#[derive(Clone, Debug, clap::Parser)] -#[clap(rename_all = "kebab-case")] +#[derive(Debug, clap::Parser)] +#[command(rename_all = "kebab-case")] #[allow(missing_docs)] pub struct DisputeAncestorOptions { /// Malicious candidate validation subsystem configuration. When enabled, node PVF execution is skipped /// during backing and/or approval and it's result can by specified by this option and `--fake-validation-error` /// for invalid candidate outcomes. - #[clap(long, arg_enum, ignore_case = true, default_value_t = FakeCandidateValidation::BackingAndApprovalInvalid)] + #[arg(long, value_enum, ignore_case = true, default_value_t = FakeCandidateValidation::BackingAndApprovalInvalid)] pub fake_validation: FakeCandidateValidation, /// Applies only when `--fake-validation` is configured to reject candidates as invalid. It allows /// to specify the exact error to return from the malicious candidate validation subsystem. - #[clap(long, arg_enum, ignore_case = true, default_value_t = FakeCandidateValidationError::InvalidOutputs)] + #[arg(long, value_enum, ignore_case = true, default_value_t = FakeCandidateValidationError::InvalidOutputs)] pub fake_validation_error: FakeCandidateValidationError, + /// Determines the percentage of candidates that should be disputed. Allows for fine-tuning + /// the intensity of the behavior of the malicious node. Value must be in the range [0..=100]. + #[clap(short, long, ignore_case = true, default_value_t = 100, value_parser = clap::value_parser!(u8).range(0..=100))] + pub percentage: u8, + #[clap(flatten)] - pub cmd: RunCmd, + pub cli: Cli, } pub(crate) struct DisputeValidCandidates { /// Fake validation config (applies to disputes as well). - opts: DisputeAncestorOptions, -} - -impl DisputeValidCandidates { - pub fn new(opts: DisputeAncestorOptions) -> Self { - Self { opts } - } + pub fake_validation: FakeCandidateValidation, + /// Fake validation error config. + pub fake_validation_error: FakeCandidateValidationError, + /// The probability of behaving maliciously. + pub percentage: u8, } impl OverseerGen for DisputeValidCandidates { @@ -83,8 +86,9 @@ impl OverseerGen for DisputeValidCandidates { { let spawner = args.spawner.clone(); let validation_filter = ReplaceValidationResult::new( - self.opts.fake_validation, - self.opts.fake_validation_error, + self.fake_validation, + self.fake_validation_error, + f64::from(self.percentage), SpawnGlue(spawner.clone()), ); diff --git a/node/malus/src/variants/mod.rs b/node/malus/src/variants/mod.rs index d57580fdf8d3..6f9a9359e025 100644 --- a/node/malus/src/variants/mod.rs +++ b/node/malus/src/variants/mod.rs @@ -22,8 +22,8 @@ mod dispute_valid_candidates; mod suggest_garbage_candidate; pub(crate) use self::{ - back_garbage_candidate::BackGarbageCandidate, + back_garbage_candidate::{BackGarbageCandidateOptions, BackGarbageCandidates}, dispute_valid_candidates::{DisputeAncestorOptions, DisputeValidCandidates}, - suggest_garbage_candidate::BackGarbageCandidateWrapper, + suggest_garbage_candidate::{SuggestGarbageCandidateOptions, SuggestGarbageCandidates}, }; pub(crate) use common::*; diff --git a/node/malus/src/variants/suggest_garbage_candidate.rs b/node/malus/src/variants/suggest_garbage_candidate.rs index b8aaaa18c10d..86b0c49e7125 100644 --- a/node/malus/src/variants/suggest_garbage_candidate.rs +++ b/node/malus/src/variants/suggest_garbage_candidate.rs @@ -29,14 +29,17 @@ use polkadot_cli::{ OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, ParachainHost, ProvideRuntimeApi, }, + Cli, }; use polkadot_node_core_candidate_validation::find_validation_data; use polkadot_node_primitives::{AvailableData, BlockData, PoV}; -use polkadot_primitives::v2::{CandidateDescriptor, CandidateHash}; +use polkadot_primitives::v2::CandidateDescriptor; use polkadot_node_subsystem_util::request_validators; use sp_core::traits::SpawnNamed; +use rand::distributions::{Bernoulli, Distribution}; + // Filter wrapping related types. use crate::{ interceptor::*, @@ -49,28 +52,16 @@ use crate::{ // Import extra types relevant to the particular // subsystem. -use polkadot_node_subsystem::{ - messages::{CandidateBackingMessage, CollatorProtocolMessage}, - SpawnGlue, -}; +use polkadot_node_subsystem::{messages::CandidateBackingMessage, SpawnGlue}; use polkadot_primitives::v2::CandidateReceipt; -use std::{ - collections::HashMap, - sync::{Arc, Mutex}, -}; - -struct Inner { - /// Maps malicious candidate hash to original candidate hash. - /// It is used to replace outgoing collator protocol seconded messages. - map: HashMap, -} +use std::sync::Arc; /// Replace outgoing approval messages with disputes. #[derive(Clone)] struct NoteCandidate { - inner: Arc>, spawner: Spawner, + percentage: f64, } impl MessageInterceptor for NoteCandidate @@ -80,7 +71,7 @@ where { type Message = CandidateBackingMessage; - /// Intercept incoming `Second` requests from the `collator-protocol` subsystem. We take + /// Intercept incoming `Second` requests from the `collator-protocol` subsystem. fn intercept_incoming( &self, subsystem_sender: &mut Sender, @@ -88,163 +79,174 @@ where ) -> Option> { match msg { FromOrchestra::Communication { - msg: CandidateBackingMessage::Second(relay_parent, candidate, _pov), + msg: CandidateBackingMessage::Second(relay_parent, ref candidate, ref _pov), } => { gum::debug!( target: MALUS, candidate_hash = ?candidate.hash(), ?relay_parent, - "Received request to second candidate" - ); - - let pov = PoV { block_data: BlockData(MALICIOUS_POV.into()) }; - - let (sender, receiver) = std::sync::mpsc::channel(); - let mut new_sender = subsystem_sender.clone(); - let _candidate = candidate.clone(); - self.spawner.spawn_blocking( - "malus-get-validation-data", - Some("malus"), - Box::pin(async move { - gum::trace!(target: MALUS, "Requesting validators"); - let n_validators = request_validators(relay_parent, &mut new_sender) - .await - .await - .unwrap() - .unwrap() - .len(); - gum::trace!(target: MALUS, "Validators {}", n_validators); - match find_validation_data(&mut new_sender, &_candidate.descriptor()).await - { - Ok(Some((validation_data, validation_code))) => { - sender - .send((validation_data, validation_code, n_validators)) - .expect("channel is still open"); - }, - _ => { - panic!("Unable to fetch validation data"); - }, - } - }), - ); - - let (validation_data, validation_code, n_validators) = receiver.recv().unwrap(); - - let validation_data_hash = validation_data.hash(); - let validation_code_hash = validation_code.hash(); - let validation_data_relay_parent_number = validation_data.relay_parent_number; - - gum::trace!( - target: MALUS, - candidate_hash = ?candidate.hash(), - ?relay_parent, - ?n_validators, - ?validation_data_hash, - ?validation_code_hash, - ?validation_data_relay_parent_number, - "Fetched validation data." + "Received request to second candidate", ); - let malicious_available_data = - AvailableData { pov: Arc::new(pov.clone()), validation_data }; - - let pov_hash = pov.hash(); - let erasure_root = { - let chunks = - erasure::obtain_chunks_v1(n_validators as usize, &malicious_available_data) - .unwrap(); - - let branches = erasure::branches(chunks.as_ref()); - branches.root() - }; - - let (collator_id, collator_signature) = { - use polkadot_primitives::v2::CollatorPair; - use sp_core::crypto::Pair; - - let collator_pair = CollatorPair::generate().0; - let signature_payload = polkadot_primitives::v2::collator_signature_payload( - &relay_parent, - &candidate.descriptor().para_id, - &validation_data_hash, - &pov_hash, - &validation_code_hash, + // Need to draw value from Bernoulli distribution with given probability of success defined by the clap parameter. + // Note that clap parameter must be f64 since this is expected by the Bernoulli::new() function. + // It must be converted from u8, due to the lack of support for the .range() call on u64 in the clap crate. + let distribution = Bernoulli::new(self.percentage / 100.0) + .expect("Invalid probability! Percentage must be in range [0..=100]."); + + // Draw a random boolean from the Bernoulli distribution with probability of true equal to `p`. + // We use `rand::thread_rng` as the source of randomness. + let generate_malicious_candidate = distribution.sample(&mut rand::thread_rng()); + + if generate_malicious_candidate == true { + gum::debug!(target: MALUS, "😈 Suggesting malicious candidate.",); + + let pov = PoV { block_data: BlockData(MALICIOUS_POV.into()) }; + + let (sender, receiver) = std::sync::mpsc::channel(); + let mut new_sender = subsystem_sender.clone(); + let _candidate = candidate.clone(); + self.spawner.spawn_blocking( + "malus-get-validation-data", + Some("malus"), + Box::pin(async move { + gum::trace!(target: MALUS, "Requesting validators"); + let n_validators = request_validators(relay_parent, &mut new_sender) + .await + .await + .unwrap() + .unwrap() + .len(); + gum::trace!(target: MALUS, "Validators {}", n_validators); + match find_validation_data(&mut new_sender, &_candidate.descriptor()) + .await + { + Ok(Some((validation_data, validation_code))) => { + sender + .send((validation_data, validation_code, n_validators)) + .expect("channel is still open"); + }, + _ => { + panic!("Unable to fetch validation data"); + }, + } + }), ); - (collator_pair.public(), collator_pair.sign(&signature_payload)) - }; - - let malicious_commitments = - create_fake_candidate_commitments(&malicious_available_data.validation_data); - - let malicious_candidate = CandidateReceipt { - descriptor: CandidateDescriptor { - para_id: candidate.descriptor().para_id, - relay_parent, - collator: collator_id, - persisted_validation_data_hash: validation_data_hash, - pov_hash, - erasure_root, - signature: collator_signature, - para_head: malicious_commitments.head_data.hash(), - validation_code_hash, - }, - commitments_hash: malicious_commitments.hash(), - }; - let malicious_candidate_hash = malicious_candidate.hash(); - - gum::debug!( - target: MALUS, - candidate_hash = ?candidate.hash(), - ?malicious_candidate_hash, - "Created malicious candidate" - ); - - // Map malicious candidate to the original one. We need this mapping to send back the correct seconded statement - // to the collators. - self.inner - .lock() - .expect("bad lock") - .map - .insert(malicious_candidate_hash, candidate.hash()); + let (validation_data, validation_code, n_validators) = receiver.recv().unwrap(); + + let validation_data_hash = validation_data.hash(); + let validation_code_hash = validation_code.hash(); + let validation_data_relay_parent_number = validation_data.relay_parent_number; + + gum::trace!( + target: MALUS, + candidate_hash = ?candidate.hash(), + ?relay_parent, + ?n_validators, + ?validation_data_hash, + ?validation_code_hash, + ?validation_data_relay_parent_number, + "Fetched validation data." + ); - let message = FromOrchestra::Communication { - msg: CandidateBackingMessage::Second(relay_parent, malicious_candidate, pov), - }; + let malicious_available_data = + AvailableData { pov: Arc::new(pov.clone()), validation_data }; + + let pov_hash = pov.hash(); + let erasure_root = { + let chunks = erasure::obtain_chunks_v1( + n_validators as usize, + &malicious_available_data, + ) + .unwrap(); + + let branches = erasure::branches(chunks.as_ref()); + branches.root() + }; + + let (collator_id, collator_signature) = { + use polkadot_primitives::v2::CollatorPair; + use sp_core::crypto::Pair; + + let collator_pair = CollatorPair::generate().0; + let signature_payload = polkadot_primitives::v2::collator_signature_payload( + &relay_parent, + &candidate.descriptor().para_id, + &validation_data_hash, + &pov_hash, + &validation_code_hash, + ); + + (collator_pair.public(), collator_pair.sign(&signature_payload)) + }; + + let malicious_commitments = create_fake_candidate_commitments( + &malicious_available_data.validation_data, + ); - Some(message) + let malicious_candidate = CandidateReceipt { + descriptor: CandidateDescriptor { + para_id: candidate.descriptor().para_id, + relay_parent, + collator: collator_id, + persisted_validation_data_hash: validation_data_hash, + pov_hash, + erasure_root, + signature: collator_signature, + para_head: malicious_commitments.head_data.hash(), + validation_code_hash, + }, + commitments_hash: malicious_commitments.hash(), + }; + let malicious_candidate_hash = malicious_candidate.hash(); + + let message = FromOrchestra::Communication { + msg: CandidateBackingMessage::Second( + relay_parent, + malicious_candidate, + pov, + ), + }; + + gum::info!( + target: MALUS, + candidate_hash = ?candidate.hash(), + "😈 Intercepted CandidateBackingMessage::Second and created malicious candidate with hash: {:?}", + &malicious_candidate_hash + ); + Some(message) + } else { + Some(msg) + } }, FromOrchestra::Communication { msg } => Some(FromOrchestra::Communication { msg }), FromOrchestra::Signal(signal) => Some(FromOrchestra::Signal(signal)), } } +} - fn intercept_outgoing( - &self, - msg: overseer::CandidateBackingOutgoingMessages, - ) -> Option { - let msg = match msg { - overseer::CandidateBackingOutgoingMessages::CollatorProtocolMessage( - CollatorProtocolMessage::Seconded(relay_parent, statement), - ) => { - // `parachain::collator-protocol: received an unexpected `CollationSeconded`: unknown statement statement=...` - // TODO: Fix this error. We get this on colaltors because `malicious backing` creates a candidate that gets backed/included. - // It is harmless for test parachain collators, but it will prevent cumulus based collators to make progress - // as they wait for the relay chain to confirm the seconding of the collation. - overseer::CandidateBackingOutgoingMessages::CollatorProtocolMessage( - CollatorProtocolMessage::Seconded(relay_parent, statement), - ) - }, - msg => msg, - }; - Some(msg) - } +#[derive(Debug, clap::Parser)] +#[clap(rename_all = "kebab-case")] +#[allow(missing_docs)] +pub struct SuggestGarbageCandidateOptions { + /// Determines the percentage of malicious candidates that are suggested by malus, + /// based on the total number of intercepted CandidateBacking + /// Must be in the range [0..=100]. + #[clap(short, long, ignore_case = true, default_value_t = 100, value_parser = clap::value_parser!(u8).range(0..=100))] + pub percentage: u8, + + #[clap(flatten)] + pub cli: Cli, } /// Garbage candidate implementation wrapper which implements `OverseerGen` glue. -pub(crate) struct BackGarbageCandidateWrapper; +pub(crate) struct SuggestGarbageCandidates { + /// The probability of behaving maliciously. + pub percentage: u8, +} -impl OverseerGen for BackGarbageCandidateWrapper { +impl OverseerGen for SuggestGarbageCandidates { fn generate<'a, Spawner, RuntimeClient>( &self, connector: OverseerConnector, @@ -255,14 +257,21 @@ impl OverseerGen for BackGarbageCandidateWrapper { RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, Spawner: 'static + SpawnNamed + Clone + Unpin, { - let inner = Inner { map: std::collections::HashMap::new() }; - let inner_mut = Arc::new(Mutex::new(inner)); - let note_candidate = - NoteCandidate { inner: inner_mut.clone(), spawner: SpawnGlue(args.spawner.clone()) }; + gum::info!( + target: MALUS, + "😈 Started Malus node with a {:?} percent chance of behaving maliciously for a given candidate.", + &self.percentage, + ); + let note_candidate = NoteCandidate { + spawner: SpawnGlue(args.spawner.clone()), + percentage: f64::from(self.percentage), + }; + let fake_valid_probability = 100.0; let validation_filter = ReplaceValidationResult::new( FakeCandidateValidation::BackingAndApprovalValid, FakeCandidateValidationError::InvalidOutputs, + fake_valid_probability, SpawnGlue(args.spawner.clone()), ); diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index f0cb4fc24ff8..5afae66ae818 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -343,9 +343,13 @@ impl State { }) }, NetworkBridgeEvent::NewGossipTopology(topology) => { - let session = topology.session; - self.handle_new_session_topology(ctx, session, SessionGridTopology::from(topology)) - .await; + self.handle_new_session_topology( + ctx, + topology.session, + topology.topology, + topology.local_index, + ) + .await; }, NetworkBridgeEvent::PeerViewChange(peer_id, view) => { self.handle_peer_view_change(ctx, metrics, peer_id, view, rng).await; @@ -500,8 +504,14 @@ impl State { ctx: &mut Context, session: SessionIndex, topology: SessionGridTopology, + local_index: Option, ) { - self.topologies.insert_topology(session, topology); + if local_index.is_none() { + // this subsystem only matters to validators. + return + } + + self.topologies.insert_topology(session, topology, local_index); let topology = self.topologies.get_topology(session).expect("just inserted above; qed"); adjust_required_routing_and_propagate( @@ -511,7 +521,9 @@ impl State { |block_entry| block_entry.session == session, |required_routing, local, validator_index| { if *required_routing == RequiredRouting::PendingTopology { - *required_routing = topology.required_routing_by_index(*validator_index, local); + *required_routing = topology + .local_grid_neighbors() + .required_routing_by_index(*validator_index, local); } }, ) @@ -861,7 +873,7 @@ impl State { let local = source == MessageSource::Local; let required_routing = topology.map_or(RequiredRouting::PendingTopology, |t| { - t.required_routing_by_index(validator_index, local) + t.local_grid_neighbors().required_routing_by_index(validator_index, local) }); let message_state = match entry.candidates.get_mut(claimed_candidate_index as usize) { @@ -902,7 +914,10 @@ impl State { return false } - if let Some(true) = topology.as_ref().map(|t| t.route_to_peer(required_routing, peer)) { + if let Some(true) = topology + .as_ref() + .map(|t| t.local_grid_neighbors().route_to_peer(required_routing, peer)) + { return true } @@ -1169,7 +1184,8 @@ impl State { // the assignment to all aware peers in the required routing _except_ the original // source of the assignment. Hence the `in_topology_check`. // 3. Any randomly selected peers have been sent the assignment already. - let in_topology = topology.map_or(false, |t| t.route_to_peer(required_routing, peer)); + let in_topology = topology + .map_or(false, |t| t.local_grid_neighbors().route_to_peer(required_routing, peer)); in_topology || knowledge.sent.contains(message_subject, MessageKind::Assignment) }; @@ -1301,9 +1317,9 @@ impl State { let required_routing = message_state.required_routing; let rng = &mut *rng; let mut peer_filter = move |peer_id| { - let in_topology = topology - .as_ref() - .map_or(false, |t| t.route_to_peer(required_routing, peer_id)); + let in_topology = topology.as_ref().map_or(false, |t| { + t.local_grid_neighbors().route_to_peer(required_routing, peer_id) + }); in_topology || { let route_random = random_routing.sample(total_peers, rng); if route_random { @@ -1564,7 +1580,10 @@ async fn adjust_required_routing_and_propagate network_bridge_event::NewGossipTopology { - let mut t = network_bridge_event::NewGossipTopology { - session, - our_neighbors_x: HashMap::new(), - our_neighbors_y: HashMap::new(), + // This builds a grid topology which is a square matrix. + // The local validator occupies the top left-hand corner. + // The X peers occupy the same row and the Y peers occupy + // the same column. + + let local_index = 1; + + assert_eq!( + neighbors_x.len(), + neighbors_y.len(), + "mocking grid topology only implemented for squares", + ); + + let d = neighbors_x.len() + 1; + + let grid_size = d * d; + assert!(grid_size > 0); + assert!(all_peers.len() >= grid_size); + + let peer_info = |i: usize| TopologyPeerInfo { + peer_ids: vec![all_peers[i].0.clone()], + validator_index: ValidatorIndex::from(i as u32), + discovery_id: all_peers[i].1.clone(), }; - for &i in neighbors_x { - t.our_neighbors_x.insert( - all_peers[i].1.clone(), - network_bridge_event::TopologyPeerInfo { - peer_ids: vec![all_peers[i].0.clone()], - validator_index: ValidatorIndex::from(i as u32), - }, - ); + let mut canonical_shuffling: Vec<_> = (0..) + .filter(|i| local_index != *i) + .filter(|i| !neighbors_x.contains(i)) + .filter(|i| !neighbors_y.contains(i)) + .take(grid_size) + .map(peer_info) + .collect(); + + // filled with junk except for own. + let mut shuffled_indices = vec![d + 1; grid_size]; + shuffled_indices[local_index] = 0; + canonical_shuffling[0] = peer_info(local_index); + + for (x_pos, v) in neighbors_x.iter().enumerate() { + let pos = 1 + x_pos; + canonical_shuffling[pos] = peer_info(*v); } - for &i in neighbors_y { - t.our_neighbors_y.insert( - all_peers[i].1.clone(), - network_bridge_event::TopologyPeerInfo { - peer_ids: vec![all_peers[i].0.clone()], - validator_index: ValidatorIndex::from(i as u32), - }, - ); + for (y_pos, v) in neighbors_y.iter().enumerate() { + let pos = d * (1 + y_pos); + canonical_shuffling[pos] = peer_info(*v); + } + + let topology = SessionGridTopology::new(shuffled_indices, canonical_shuffling); + + // sanity check. + { + let g_n = topology + .compute_grid_neighbors_for(ValidatorIndex(local_index as _)) + .expect("topology just constructed with this validator index"); + + assert_eq!(g_n.validator_indices_x.len(), neighbors_x.len()); + assert_eq!(g_n.validator_indices_y.len(), neighbors_y.len()); + + for i in neighbors_x { + assert!(g_n.validator_indices_x.contains(&ValidatorIndex(*i as _))); + } + + for i in neighbors_y { + assert!(g_n.validator_indices_y.contains(&ValidatorIndex(*i as _))); + } } - t + network_bridge_event::NewGossipTopology { + session, + topology, + local_index: Some(ValidatorIndex(local_index as _)), + } } async fn setup_gossip_topology( diff --git a/node/network/availability-recovery/src/lib.rs b/node/network/availability-recovery/src/lib.rs index f2f92cc54490..a07f4e0baa52 100644 --- a/node/network/availability-recovery/src/lib.rs +++ b/node/network/availability-recovery/src/lib.rs @@ -362,7 +362,7 @@ impl RequestChunksFromValidators { sender .send_message(NetworkBridgeTxMessage::SendRequests( requests, - IfDisconnected::ImmediateError, + IfDisconnected::TryConnect, )) .await; } diff --git a/node/network/availability-recovery/src/tests.rs b/node/network/availability-recovery/src/tests.rs index 8a5191e70b26..3e898088c7f9 100644 --- a/node/network/availability-recovery/src/tests.rs +++ b/node/network/availability-recovery/src/tests.rs @@ -311,7 +311,7 @@ impl TestState { AllMessages::NetworkBridgeTx( NetworkBridgeTxMessage::SendRequests( requests, - IfDisconnected::ImmediateError, + _if_disconnected, ) ) => { for req in requests { diff --git a/node/network/bitfield-distribution/Cargo.toml b/node/network/bitfield-distribution/Cargo.toml index f826379c54e3..8ac7c2ac6bfb 100644 --- a/node/network/bitfield-distribution/Cargo.toml +++ b/node/network/bitfield-distribution/Cargo.toml @@ -18,6 +18,7 @@ polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } maplit = "1.0.2" diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index a0f82dc5ed1d..1b2167484b49 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -27,7 +27,7 @@ use futures::{channel::oneshot, FutureExt}; use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::{ - RandomRouting, RequiredRouting, SessionBoundGridTopologyStorage, SessionGridTopology, + GridNeighbors, RandomRouting, RequiredRouting, SessionBoundGridTopologyStorage, }, v1 as protocol_v1, OurView, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; @@ -327,7 +327,7 @@ async fn handle_bitfield_distribution( }; let msg = BitfieldGossipMessage { relay_parent, signed_availability }; - let topology = state.topologies.get_topology_or_fallback(session_idx); + let topology = state.topologies.get_topology_or_fallback(session_idx).local_grid_neighbors(); let required_routing = topology.required_routing_by_index(validator_index, true); relay_message( @@ -352,7 +352,7 @@ async fn handle_bitfield_distribution( async fn relay_message( ctx: &mut Context, job_data: &mut PerRelayParentData, - topology: &SessionGridTopology, + topology_neighbors: &GridNeighbors, peer_views: &mut HashMap, validator: ValidatorId, message: BitfieldGossipMessage, @@ -384,7 +384,7 @@ async fn relay_message( let message_needed = job_data.message_from_validator_needed_by_peer(&peer, &validator); if message_needed { - let in_topology = topology.route_to_peer(required_routing, &peer); + let in_topology = topology_neighbors.route_to_peer(required_routing, &peer); let need_routing = in_topology || { let route_random = random_routing.sample(total_peers, rng); if route_random { @@ -533,7 +533,8 @@ async fn process_incoming_peer_message( let topology = state .topologies - .get_topology_or_fallback(job_data.signing_context.session_index); + .get_topology_or_fallback(job_data.signing_context.session_index) + .local_grid_neighbors(); let required_routing = topology.required_routing_by_index(validator_index, false); metrics.on_bitfield_received(); @@ -579,14 +580,24 @@ async fn handle_network_msg( }, NetworkBridgeEvent::NewGossipTopology(gossip_topology) => { let session_index = gossip_topology.session; - let new_topology = SessionGridTopology::from(gossip_topology); - let newly_added = new_topology.peers_diff(&new_topology); - state.topologies.update_topology(session_index, new_topology); + let new_topology = gossip_topology.topology; + let prev_neighbors = + state.topologies.get_current_topology().local_grid_neighbors().clone(); + + state.topologies.update_topology( + session_index, + new_topology, + gossip_topology.local_index, + ); + let current_topology = state.topologies.get_current_topology(); + + let newly_added = current_topology.local_grid_neighbors().peers_diff(&prev_neighbors); + gum::debug!( target: LOG_TARGET, ?session_index, - "New gossip topology received {} unseen peers", - newly_added.len() + newly_added_peers = ?newly_added.len(), + "New gossip topology received", ); for new_peer in newly_added { @@ -651,7 +662,7 @@ async fn handle_peer_view_change( .cloned() .collect::>(); - let topology = state.topologies.get_current_topology(); + let topology = state.topologies.get_current_topology().local_grid_neighbors(); let is_gossip_peer = topology.route_to_peer(RequiredRouting::GridXY, &origin); let lucky = is_gossip_peer || util::gen_ratio_rng( diff --git a/node/network/bitfield-distribution/src/tests.rs b/node/network/bitfield-distribution/src/tests.rs index f3894d61c5f9..5eb610fe8508 100644 --- a/node/network/bitfield-distribution/src/tests.rs +++ b/node/network/bitfield-distribution/src/tests.rs @@ -20,8 +20,10 @@ use bitvec::bitvec; use futures::executor; use maplit::hashmap; use polkadot_node_network_protocol::{ - grid_topology::SessionBoundGridTopologyStorage, our_view, peer_set::ValidationVersion, view, - ObservedRole, + grid_topology::{SessionBoundGridTopologyStorage, SessionGridTopology, TopologyPeerInfo}, + our_view, + peer_set::ValidationVersion, + view, ObservedRole, }; use polkadot_node_subsystem::{ jaeger, @@ -32,6 +34,7 @@ use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::v2::{AvailabilityBitfield, Signed, ValidatorIndex}; use rand_chacha::ChaCha12Rng; use sp_application_crypto::AppKey; +use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; use sp_core::Pair as PairT; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::KeyStore, SyncCryptoStore, SyncCryptoStorePtr}; @@ -61,10 +64,11 @@ fn prewarmed_state( peers: Vec, ) -> ProtocolState { let relay_parent = known_message.relay_parent.clone(); - let mut topology: SessionGridTopology = Default::default(); - topology.peers_x = peers.iter().cloned().collect(); let mut topologies = SessionBoundGridTopologyStorage::default(); - topologies.update_topology(0_u32, topology); + topologies.update_topology(0_u32, SessionGridTopology::new(Vec::new(), Vec::new()), None); + topologies.get_current_topology_mut().local_grid_neighbors_mut().peers_x = + peers.iter().cloned().collect(); + ProtocolState { per_relay_parent: hashmap! { relay_parent.clone() => @@ -456,10 +460,9 @@ fn do_not_relay_message_twice() { let mut rng = dummy_rng(); executor::block_on(async move { - let gossip_peers = SessionGridTopology { - peers_x: HashSet::from_iter(vec![peer_a.clone(), peer_b.clone()].into_iter()), - ..Default::default() - }; + let mut gossip_peers = GridNeighbors::empty(); + gossip_peers.peers_x = HashSet::from_iter(vec![peer_a.clone(), peer_b.clone()].into_iter()); + relay_message( &mut ctx, state.per_relay_parent.get_mut(&hash).unwrap(), @@ -780,33 +783,43 @@ fn topology_test() { .try_init(); let hash: Hash = [0; 32].into(); - let peers_x = (0..25).map(|_| PeerId::random()).collect::>(); - let peers_y = (0..25).map(|_| PeerId::random()).collect::>(); - - // ensure all unique - assert_eq!( - peers_x.iter().chain(peers_y.iter()).collect::>().len(), - peers_x.len() + peers_y.len() - ); // validator 0 key pair let (mut state, signing_context, keystore, validator) = state_with_view(our_view![hash], hash); - // Create a simple grid - let mut topology: SessionGridTopology = Default::default(); - topology.peers_x = peers_x.iter().cloned().collect::>(); - topology.validator_indices_x = peers_x + // Create a simple grid without any shuffling. We occupy position 1. + let topology_peer_info: Vec<_> = (0..49) + .map(|i| TopologyPeerInfo { + peer_ids: vec![PeerId::random()], + validator_index: ValidatorIndex(i as _), + discovery_id: AuthorityDiscoveryPair::generate().0.public(), + }) + .collect(); + + let topology = SessionGridTopology::new((0usize..49).collect(), topology_peer_info.clone()); + state.topologies.update_topology(0_u32, topology, Some(ValidatorIndex(1))); + + let peers_x: Vec<_> = [0, 2, 3, 4, 5, 6] .iter() - .enumerate() - .map(|(idx, _)| ValidatorIndex(idx as u32)) - .collect::>(); - topology.peers_y = peers_y.iter().cloned().collect::>(); - topology.validator_indices_y = peers_y + .cloned() + .map(|i| topology_peer_info[i].peer_ids[0].clone()) + .collect(); + + let peers_y: Vec<_> = [8, 15, 22, 29, 36, 43] .iter() - .enumerate() - .map(|(idx, _)| ValidatorIndex((idx + peers_x.len()) as u32)) - .collect::>(); - state.topologies.update_topology(0_u32, topology); + .cloned() + .map(|i| topology_peer_info[i].peer_ids[0].clone()) + .collect(); + + { + let t = state.topologies.get_current_topology().local_grid_neighbors(); + for p_x in &peers_x { + assert!(t.peers_x.contains(p_x)); + } + for p_y in &peers_y { + assert!(t.peers_y.contains(p_y)); + } + } // create a signed message by validator 0 let payload = AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); @@ -860,7 +873,7 @@ fn topology_test() { AllMessages::NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage(peers, send_msg), ) => { - let topology = state.topologies.get_current_topology(); + let topology = state.topologies.get_current_topology().local_grid_neighbors(); // It should send message to all peers in y direction and to 4 random peers in x direction assert_eq!(peers_y.len() + 4, peers.len()); assert!(topology.peers_y.iter().all(|peer| peers.contains(&peer))); diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs index b93024b43dfb..a08596cd15ac 100644 --- a/node/network/bridge/src/rx/mod.rs +++ b/node/network/bridge/src/rx/mod.rs @@ -27,6 +27,7 @@ use sp_consensus::SyncOracle; use polkadot_node_network_protocol::{ self as net_protocol, + grid_topology::{SessionGridTopology, TopologyPeerInfo}, peer_set::{ CollationVersion, PeerSet, PeerSetProtocolNames, PerPeerSet, ProtocolVersion, ValidationVersion, @@ -37,10 +38,9 @@ use polkadot_node_network_protocol::{ use polkadot_node_subsystem::{ errors::SubsystemError, messages::{ - network_bridge_event::{NewGossipTopology, TopologyPeerInfo}, - ApprovalDistributionMessage, BitfieldDistributionMessage, CollatorProtocolMessage, - GossipSupportMessage, NetworkBridgeEvent, NetworkBridgeRxMessage, - StatementDistributionMessage, + network_bridge_event::NewGossipTopology, ApprovalDistributionMessage, + BitfieldDistributionMessage, CollatorProtocolMessage, GossipSupportMessage, + NetworkBridgeEvent, NetworkBridgeRxMessage, StatementDistributionMessage, }, overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, }; @@ -129,28 +129,6 @@ where } } -async fn update_gossip_peers_1d( - ads: &mut AD, - neighbors: N, -) -> HashMap -where - AD: validator_discovery::AuthorityDiscovery, - N: IntoIterator, - N::IntoIter: std::iter::ExactSizeIterator, -{ - let neighbors = neighbors.into_iter(); - let mut peers = HashMap::with_capacity(neighbors.len()); - for (authority, validator_index) in neighbors { - let addr = get_peer_id_by_authority_id(ads, authority.clone()).await; - - if let Some(peer_id) = addr { - peers.insert(authority, TopologyPeerInfo { peer_ids: vec![peer_id], validator_index }); - } - } - - peers -} - async fn handle_network_messages( mut sender: impl overseer::NetworkBridgeRxSenderTrait, mut network_service: impl Network, @@ -507,6 +485,26 @@ where } } +async fn flesh_out_topology_peers(ads: &mut AD, neighbors: N) -> Vec +where + AD: validator_discovery::AuthorityDiscovery, + N: IntoIterator, + N::IntoIter: std::iter::ExactSizeIterator, +{ + let neighbors = neighbors.into_iter(); + let mut peers = Vec::with_capacity(neighbors.len()); + for (discovery_id, validator_index) in neighbors { + let addr = get_peer_id_by_authority_id(ads, discovery_id.clone()).await; + peers.push(TopologyPeerInfo { + peer_ids: addr.into_iter().collect(), + validator_index, + discovery_id, + }); + } + + peers +} + #[overseer::contextbounds(NetworkBridgeRx, prefix = self::overseer)] async fn run_incoming_orchestra_signals( mut ctx: Context, @@ -532,29 +530,28 @@ where msg: NetworkBridgeRxMessage::NewGossipTopology { session, - our_neighbors_x, - our_neighbors_y, + local_index, + canonical_shuffling, + shuffled_indices, }, } => { gum::debug!( target: LOG_TARGET, action = "NewGossipTopology", - neighbors_x = our_neighbors_x.len(), - neighbors_y = our_neighbors_y.len(), + ?session, + ?local_index, "Gossip topology has changed", ); - let gossip_peers_x = - update_gossip_peers_1d(&mut authority_discovery_service, our_neighbors_x).await; - - let gossip_peers_y = - update_gossip_peers_1d(&mut authority_discovery_service, our_neighbors_y).await; + let topology_peers = + flesh_out_topology_peers(&mut authority_discovery_service, canonical_shuffling) + .await; dispatch_validation_event_to_all_unbounded( NetworkBridgeEvent::NewGossipTopology(NewGossipTopology { session, - our_neighbors_x: gossip_peers_x, - our_neighbors_y: gossip_peers_y, + topology: SessionGridTopology::new(shuffled_indices, topology_peers), + local_index, }), ctx.sender(), ); diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index 823835aa7638..36459f9c8dab 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -525,73 +525,37 @@ async fn update_gossip_topology( sp_core::blake2_256(&subject) }; - // shuffle the indices - let mut rng: ChaCha20Rng = SeedableRng::from_seed(random_seed); - let len = authorities.len(); - let mut indices: Vec = (0..len).collect(); - indices.shuffle(&mut rng); - let our_shuffled_position = indices - .iter() - .position(|i| *i == our_index) - .expect("our_index < len; indices contains it; qed"); - - let neighbors = matrix_neighbors(our_shuffled_position, len); - let row_neighbors = neighbors - .row_neighbors - .map(|i| indices[i]) - .map(|i| (authorities[i].clone(), ValidatorIndex::from(i as u32))) - .collect(); - - let column_neighbors = neighbors - .column_neighbors - .map(|i| indices[i]) - .map(|i| (authorities[i].clone(), ValidatorIndex::from(i as u32))) - .collect(); + // shuffle the validators and create the index mapping + let (shuffled_indices, canonical_shuffling) = { + let mut rng: ChaCha20Rng = SeedableRng::from_seed(random_seed); + let len = authorities.len(); + let mut shuffled_indices = vec![0; len]; + let mut canonical_shuffling: Vec<_> = authorities + .iter() + .enumerate() + .map(|(i, a)| (a.clone(), ValidatorIndex(i as _))) + .collect(); + + canonical_shuffling.shuffle(&mut rng); + for (i, (_, validator_index)) in canonical_shuffling.iter().enumerate() { + shuffled_indices[validator_index.0 as usize] = i; + } + + (shuffled_indices, canonical_shuffling) + }; sender .send_message(NetworkBridgeRxMessage::NewGossipTopology { session: session_index, - our_neighbors_x: row_neighbors, - our_neighbors_y: column_neighbors, + local_index: Some(ValidatorIndex(our_index as _)), + canonical_shuffling, + shuffled_indices, }) .await; Ok(()) } -struct MatrixNeighbors { - row_neighbors: R, - column_neighbors: C, -} - -/// Compute our row and column neighbors in a matrix -fn matrix_neighbors( - our_index: usize, - len: usize, -) -> MatrixNeighbors, impl Iterator> { - assert!(our_index < len, "our_index is computed using `enumerate`; qed"); - - // e.g. for size 11 the matrix would be - // - // 0 1 2 - // 3 4 5 - // 6 7 8 - // 9 10 - // - // and for index 10, the neighbors would be 1, 4, 7, 9 - - let sqrt = (len as f64).sqrt() as usize; - let our_row = our_index / sqrt; - let our_column = our_index % sqrt; - let row_neighbors = our_row * sqrt..std::cmp::min(our_row * sqrt + sqrt, len); - let column_neighbors = (our_column..len).step_by(sqrt); - - MatrixNeighbors { - row_neighbors: row_neighbors.filter(move |i| *i != our_index), - column_neighbors: column_neighbors.filter(move |i| *i != our_index), - } -} - #[overseer::subsystem(GossipSupport, error = SubsystemError, prefix = self::overseer)] impl GossipSupport where diff --git a/node/network/gossip-support/src/tests.rs b/node/network/gossip-support/src/tests.rs index cde47e2ba977..79f2a9a6db42 100644 --- a/node/network/gossip-support/src/tests.rs +++ b/node/network/gossip-support/src/tests.rs @@ -29,6 +29,7 @@ use sp_consensus_babe::{AllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch use sp_core::crypto::Pair as PairT; use sp_keyring::Sr25519Keyring; +use polkadot_node_network_protocol::grid_topology::{SessionGridTopology, TopologyPeerInfo}; use polkadot_node_subsystem::{ jaeger, messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}, @@ -73,13 +74,15 @@ lazy_static! { // [1 3] // [0 ] - static ref ROW_NEIGHBORS: Vec<(AuthorityDiscoveryId, ValidatorIndex)> = vec![ - (Sr25519Keyring::Charlie.public().into(), ValidatorIndex::from(2)), + static ref EXPECTED_SHUFFLING: Vec = vec![6, 4, 0, 5, 2, 3, 1]; + + static ref ROW_NEIGHBORS: Vec = vec![ + ValidatorIndex::from(2), ]; - static ref COLUMN_NEIGHBORS: Vec<(AuthorityDiscoveryId, ValidatorIndex)> = vec![ - (Sr25519Keyring::Two.public().into(), ValidatorIndex::from(5)), - (Sr25519Keyring::Eve.public().into(), ValidatorIndex::from(3)), + static ref COLUMN_NEIGHBORS: Vec = vec![ + ValidatorIndex::from(3), + ValidatorIndex::from(5), ]; } @@ -257,12 +260,31 @@ async fn test_neighbors(overseer: &mut VirtualOverseer, expected_session: Sessio overseer_recv(overseer).await, AllMessages::NetworkBridgeRx(NetworkBridgeRxMessage::NewGossipTopology { session: got_session, - our_neighbors_x, - our_neighbors_y, + local_index, + canonical_shuffling, + shuffled_indices, }) => { assert_eq!(expected_session, got_session); - let mut got_row: Vec<_> = our_neighbors_x.into_iter().collect(); - let mut got_column: Vec<_> = our_neighbors_y.into_iter().collect(); + assert_eq!(local_index, Some(ValidatorIndex(6))); + assert_eq!(shuffled_indices, EXPECTED_SHUFFLING.clone()); + + let grid_topology = SessionGridTopology::new( + shuffled_indices, + canonical_shuffling.into_iter() + .map(|(a, v)| TopologyPeerInfo { + validator_index: v, + discovery_id: a, + peer_ids: Vec::new(), + }) + .collect(), + ); + + let grid_neighbors = grid_topology + .compute_grid_neighbors_for(local_index.unwrap()) + .unwrap(); + + let mut got_row: Vec<_> = grid_neighbors.validator_indices_x.into_iter().collect(); + let mut got_column: Vec<_> = grid_neighbors.validator_indices_y.into_iter().collect(); got_row.sort(); got_column.sort(); assert_eq!(got_row, ROW_NEIGHBORS.clone()); @@ -694,26 +716,3 @@ fn issues_a_connection_request_when_last_request_was_mostly_unresolved() { assert_eq!(state.last_session_index, Some(1)); assert!(state.last_failure.is_none()); } - -#[test] -fn test_matrix_neighbors() { - for (our_index, len, expected_row, expected_column) in vec![ - (0usize, 1usize, vec![], vec![]), - (1, 2, vec![], vec![0usize]), - (0, 9, vec![1, 2], vec![3, 6]), - (9, 10, vec![], vec![0, 3, 6]), - (10, 11, vec![9], vec![1, 4, 7]), - (7, 11, vec![6, 8], vec![1, 4, 10]), - ] - .into_iter() - { - let matrix = matrix_neighbors(our_index, len); - let mut row_result: Vec<_> = matrix.row_neighbors.collect(); - let mut column_result: Vec<_> = matrix.column_neighbors.collect(); - row_result.sort(); - column_result.sort(); - - assert_eq!(row_result, expected_row); - assert_eq!(column_result, expected_column); - } -} diff --git a/node/network/protocol/src/grid_topology.rs b/node/network/protocol/src/grid_topology.rs index 73de9cfc25b1..100ef66957bd 100644 --- a/node/network/protocol/src/grid_topology.rs +++ b/node/network/protocol/src/grid_topology.rs @@ -30,7 +30,7 @@ //! use crate::PeerId; -use polkadot_primitives::v2::{SessionIndex, ValidatorIndex}; +use polkadot_primitives::v2::{AuthorityDiscoveryId, SessionIndex, ValidatorIndex}; use rand::{CryptoRng, Rng}; use std::{ collections::{hash_map, HashMap, HashSet}, @@ -48,9 +48,106 @@ pub const DEFAULT_RANDOM_SAMPLE_RATE: usize = crate::MIN_GOSSIP_PEERS; /// The number of peers to randomly propagate messages to. pub const DEFAULT_RANDOM_CIRCULATION: usize = 4; -/// Topology representation -#[derive(Default, Clone, Debug)] +/// Information about a peer in the gossip topology for a session. +#[derive(Debug, Clone, PartialEq)] +pub struct TopologyPeerInfo { + /// The validator's known peer IDs. + pub peer_ids: Vec, + /// The index of the validator in the discovery keys of the corresponding + /// `SessionInfo`. This can extend _beyond_ the set of active parachain validators. + pub validator_index: ValidatorIndex, + /// The authority discovery public key of the validator in the corresponding + /// `SessionInfo`. + pub discovery_id: AuthorityDiscoveryId, +} + +/// Topology representation for a session. +#[derive(Default, Clone, Debug, PartialEq)] pub struct SessionGridTopology { + /// An array mapping validator indices to their indices in the + /// shuffling itself. This has the same size as the number of validators + /// in the session. + shuffled_indices: Vec, + /// The canonical shuffling of validators for the session. + canonical_shuffling: Vec, +} + +impl SessionGridTopology { + /// Create a new session grid topology. + pub fn new(shuffled_indices: Vec, canonical_shuffling: Vec) -> Self { + SessionGridTopology { shuffled_indices, canonical_shuffling } + } + + /// Produces the outgoing routing logic for a particular peer. + /// + /// Returns `None` if the validator index is out of bounds. + pub fn compute_grid_neighbors_for(&self, v: ValidatorIndex) -> Option { + if self.shuffled_indices.len() != self.canonical_shuffling.len() { + return None + } + let shuffled_val_index = *self.shuffled_indices.get(v.0 as usize)?; + + let neighbors = matrix_neighbors(shuffled_val_index, self.shuffled_indices.len())?; + + let mut grid_subset = GridNeighbors::empty(); + for r_n in neighbors.row_neighbors { + let n = &self.canonical_shuffling[r_n]; + grid_subset.validator_indices_x.insert(n.validator_index); + for p in &n.peer_ids { + grid_subset.peers_x.insert(p.clone()); + } + } + + for c_n in neighbors.column_neighbors { + let n = &self.canonical_shuffling[c_n]; + grid_subset.validator_indices_y.insert(n.validator_index); + for p in &n.peer_ids { + grid_subset.peers_y.insert(p.clone()); + } + } + + Some(grid_subset) + } +} + +struct MatrixNeighbors { + row_neighbors: R, + column_neighbors: C, +} + +/// Compute the row and column neighbors of `val_index` in a matrix +fn matrix_neighbors( + val_index: usize, + len: usize, +) -> Option, impl Iterator>> { + if val_index >= len { + return None + } + + // e.g. for size 11 the matrix would be + // + // 0 1 2 + // 3 4 5 + // 6 7 8 + // 9 10 + // + // and for index 10, the neighbors would be 1, 4, 7, 9 + + let sqrt = (len as f64).sqrt() as usize; + let our_row = val_index / sqrt; + let our_column = val_index % sqrt; + let row_neighbors = our_row * sqrt..std::cmp::min(our_row * sqrt + sqrt, len); + let column_neighbors = (our_column..len).step_by(sqrt); + + Some(MatrixNeighbors { + row_neighbors: row_neighbors.filter(move |i| *i != val_index), + column_neighbors: column_neighbors.filter(move |i| *i != val_index), + }) +} + +/// Information about the grid neighbors for a particular node in the topology. +#[derive(Debug, Clone, PartialEq)] +pub struct GridNeighbors { /// Represent peers in the X axis pub peers_x: HashSet, /// Represent validators in the X axis @@ -61,7 +158,18 @@ pub struct SessionGridTopology { pub validator_indices_y: HashSet, } -impl SessionGridTopology { +impl GridNeighbors { + /// Utility function for creating an empty set of grid neighbors. + /// Useful for testing. + pub fn empty() -> Self { + GridNeighbors { + peers_x: HashSet::new(), + validator_indices_x: HashSet::new(), + peers_y: HashSet::new(), + validator_indices_y: HashSet::new(), + } + } + /// Given the originator of a message as a validator index, indicates the part of the topology /// we're meant to send the message to. pub fn required_routing_by_index( @@ -123,7 +231,7 @@ impl SessionGridTopology { } /// Returns the difference between this and the `other` topology as a vector of peers - pub fn peers_diff(&self, other: &SessionGridTopology) -> Vec { + pub fn peers_diff(&self, other: &Self) -> Vec { self.peers_x .iter() .chain(self.peers_y.iter()) @@ -138,15 +246,39 @@ impl SessionGridTopology { } } +/// An entry tracking a session grid topology and some cached local neighbors. +#[derive(Debug)] +pub struct SessionGridTopologyEntry { + topology: SessionGridTopology, + local_neighbors: GridNeighbors, +} + +impl SessionGridTopologyEntry { + /// Access the local grid neighbors. + pub fn local_grid_neighbors(&self) -> &GridNeighbors { + &self.local_neighbors + } + + /// Access the local grid neighbors mutably. + pub fn local_grid_neighbors_mut(&mut self) -> &mut GridNeighbors { + &mut self.local_neighbors + } + + /// Access the underlying topology. + pub fn get(&self) -> &SessionGridTopology { + &self.topology + } +} + /// A set of topologies indexed by session #[derive(Default)] pub struct SessionGridTopologies { - inner: HashMap, usize)>, + inner: HashMap, usize)>, } impl SessionGridTopologies { /// Returns a topology for the specific session index - pub fn get_topology(&self, session: SessionIndex) -> Option<&SessionGridTopology> { + pub fn get_topology(&self, session: SessionIndex) -> Option<&SessionGridTopologyEntry> { self.inner.get(&session).and_then(|val| val.0.as_ref()) } @@ -166,63 +298,112 @@ impl SessionGridTopologies { } /// Insert a new topology, no-op if already present. - pub fn insert_topology(&mut self, session: SessionIndex, topology: SessionGridTopology) { + pub fn insert_topology( + &mut self, + session: SessionIndex, + topology: SessionGridTopology, + local_index: Option, + ) { let entry = self.inner.entry(session).or_insert((None, 0)); if entry.0.is_none() { - entry.0 = Some(topology); + let local_neighbors = local_index + .and_then(|l| topology.compute_grid_neighbors_for(l)) + .unwrap_or_else(GridNeighbors::empty); + + entry.0 = Some(SessionGridTopologyEntry { topology, local_neighbors }); } } } /// A simple storage for a topology and the corresponding session index -#[derive(Default, Debug)] -pub struct GridTopologySessionBound { - topology: SessionGridTopology, +#[derive(Debug)] +struct GridTopologySessionBound { + entry: SessionGridTopologyEntry, session_index: SessionIndex, } /// A storage for the current and maybe previous topology -#[derive(Default, Debug)] +#[derive(Debug)] pub struct SessionBoundGridTopologyStorage { current_topology: GridTopologySessionBound, prev_topology: Option, } +impl Default for SessionBoundGridTopologyStorage { + fn default() -> Self { + // having this struct be `Default` is objectively stupid + // but used in a few places + SessionBoundGridTopologyStorage { + current_topology: GridTopologySessionBound { + // session 0 is valid so we should use the upper bound + // as the default instead of the lower bound. + session_index: SessionIndex::max_value(), + entry: SessionGridTopologyEntry { + topology: SessionGridTopology { + shuffled_indices: Vec::new(), + canonical_shuffling: Vec::new(), + }, + local_neighbors: GridNeighbors::empty(), + }, + }, + prev_topology: None, + } + } +} + impl SessionBoundGridTopologyStorage { /// Return a grid topology based on the session index: /// If we need a previous session and it is registered in the storage, then return that session. /// Otherwise, return a current session to have some grid topology in any case - pub fn get_topology_or_fallback(&self, idx: SessionIndex) -> &SessionGridTopology { - self.get_topology(idx).unwrap_or(&self.current_topology.topology) + pub fn get_topology_or_fallback(&self, idx: SessionIndex) -> &SessionGridTopologyEntry { + self.get_topology(idx).unwrap_or(&self.current_topology.entry) } /// Return the grid topology for the specific session index, if no such a session is stored /// returns `None`. - pub fn get_topology(&self, idx: SessionIndex) -> Option<&SessionGridTopology> { + pub fn get_topology(&self, idx: SessionIndex) -> Option<&SessionGridTopologyEntry> { if let Some(prev_topology) = &self.prev_topology { if idx == prev_topology.session_index { - return Some(&prev_topology.topology) + return Some(&prev_topology.entry) } } if self.current_topology.session_index == idx { - return Some(&self.current_topology.topology) + return Some(&self.current_topology.entry) } None } /// Update the current topology preserving the previous one - pub fn update_topology(&mut self, session_index: SessionIndex, topology: SessionGridTopology) { + pub fn update_topology( + &mut self, + session_index: SessionIndex, + topology: SessionGridTopology, + local_index: Option, + ) { + let local_neighbors = local_index + .and_then(|l| topology.compute_grid_neighbors_for(l)) + .unwrap_or_else(GridNeighbors::empty); + let old_current = std::mem::replace( &mut self.current_topology, - GridTopologySessionBound { topology, session_index }, + GridTopologySessionBound { + entry: SessionGridTopologyEntry { topology, local_neighbors }, + session_index, + }, ); self.prev_topology.replace(old_current); } /// Returns a current grid topology - pub fn get_current_topology(&self) -> &SessionGridTopology { - &self.current_topology.topology + pub fn get_current_topology(&self) -> &SessionGridTopologyEntry { + &self.current_topology.entry + } + + /// Access the current grid topology mutably. Dangerous and intended + /// to be used in tests. + pub fn get_current_topology_mut(&mut self) -> &mut SessionGridTopologyEntry { + &mut self.current_topology.entry } } @@ -365,4 +546,27 @@ mod tests { let mut random_routing = RandomRouting { target: 10, sent: 0, sample_rate: 10 }; assert_eq!(run_random_routing(&mut random_routing, &mut rng, 10, 100), 10); } + + #[test] + fn test_matrix_neighbors() { + for (our_index, len, expected_row, expected_column) in vec![ + (0usize, 1usize, vec![], vec![]), + (1, 2, vec![], vec![0usize]), + (0, 9, vec![1, 2], vec![3, 6]), + (9, 10, vec![], vec![0, 3, 6]), + (10, 11, vec![9], vec![1, 4, 7]), + (7, 11, vec![6, 8], vec![1, 4, 10]), + ] + .into_iter() + { + let matrix = matrix_neighbors(our_index, len).unwrap(); + let mut row_result: Vec<_> = matrix.row_neighbors.collect(); + let mut column_result: Vec<_> = matrix.column_neighbors.collect(); + row_result.sort(); + column_result.sort(); + + assert_eq!(row_result, expected_row); + assert_eq!(column_result, expected_column); + } + } } diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 274582420f5d..17ca5d8ea4ac 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -27,7 +27,7 @@ use parity_scale_codec::Encode; use polkadot_node_network_protocol::{ self as net_protocol, - grid_topology::{RequiredRouting, SessionBoundGridTopologyStorage, SessionGridTopology}, + grid_topology::{GridNeighbors, RequiredRouting, SessionBoundGridTopologyStorage}, peer_set::{IsAuthority, PeerSet}, request_response::{v1 as request_v1, IncomingRequestReceiver}, v1::{self as protocol_v1, StatementMetadata}, @@ -910,7 +910,10 @@ async fn circulate_statement_and_dependents( .with_candidate(statement.payload().candidate_hash()) .with_stage(jaeger::Stage::StatementDistribution); - let topology = topology_store.get_topology_or_fallback(active_head.session_index); + let topology = topology_store + .get_topology_or_fallback(active_head.session_index) + .local_grid_neighbors(); + // First circulate the statement directly to all peers needing it. // The borrow of `active_head` needs to encompass only this (Rust) statement. let outputs: Option<(CandidateHash, Vec)> = { @@ -1009,7 +1012,7 @@ fn is_statement_large(statement: &SignedFullStatement) -> (bool, Option) #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn circulate_statement<'a, Context>( required_routing: RequiredRouting, - topology: &SessionGridTopology, + topology: &GridNeighbors, peers: &mut HashMap, ctx: &mut Context, relay_parent: Hash, @@ -1352,7 +1355,8 @@ async fn handle_incoming_message_and_circulate<'a, Context, R>( let session_index = runtime.get_session_index_for_child(ctx.sender(), relay_parent).await; let topology = match session_index { - Ok(session_index) => topology_storage.get_topology_or_fallback(session_index), + Ok(session_index) => + topology_storage.get_topology_or_fallback(session_index).local_grid_neighbors(), Err(e) => { gum::debug!( target: LOG_TARGET, @@ -1361,7 +1365,7 @@ async fn handle_incoming_message_and_circulate<'a, Context, R>( e ); - topology_storage.get_current_topology() + topology_storage.get_current_topology().local_grid_neighbors() }, }; let required_routing = @@ -1588,7 +1592,7 @@ async fn handle_incoming_message<'a, Context>( #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] async fn update_peer_view_and_maybe_send_unlocked( peer: PeerId, - topology: &SessionGridTopology, + topology: &GridNeighbors, peer_data: &mut PeerData, ctx: &mut Context, active_heads: &HashMap, @@ -1673,16 +1677,22 @@ async fn handle_network_update( let _ = metrics.time_network_bridge_update_v1("new_gossip_topology"); let new_session_index = topology.session; - let new_topology: SessionGridTopology = topology.into(); - let old_topology = topology_storage.get_current_topology(); - let newly_added = new_topology.peers_diff(old_topology); - topology_storage.update_topology(new_session_index, new_topology); + let new_topology = topology.topology; + let old_topology = + topology_storage.get_current_topology().local_grid_neighbors().clone(); + topology_storage.update_topology(new_session_index, new_topology, topology.local_index); + + let newly_added = topology_storage + .get_current_topology() + .local_grid_neighbors() + .peers_diff(&old_topology); + for peer in newly_added { if let Some(data) = peers.get_mut(&peer) { let view = std::mem::take(&mut data.view); update_peer_view_and_maybe_send_unlocked( peer, - topology_storage.get_current_topology(), + topology_storage.get_current_topology().local_grid_neighbors(), data, ctx, &*active_heads, @@ -1717,7 +1727,7 @@ async fn handle_network_update( Some(data) => update_peer_view_and_maybe_send_unlocked( peer, - topology_storage.get_current_topology(), + topology_storage.get_current_topology().local_grid_neighbors(), data, ctx, &*active_heads, diff --git a/node/network/statement-distribution/src/tests.rs b/node/network/statement-distribution/src/tests.rs index 3304ad86fcd5..f3b9db00aef4 100644 --- a/node/network/statement-distribution/src/tests.rs +++ b/node/network/statement-distribution/src/tests.rs @@ -20,6 +20,7 @@ use futures::executor::{self, block_on}; use futures_timer::Delay; use parity_scale_codec::{Decode, Encode}; use polkadot_node_network_protocol::{ + grid_topology::{SessionGridTopology, TopologyPeerInfo}, peer_set::ValidationVersion, request_response::{ v1::{StatementFetchingRequest, StatementFetchingResponse}, @@ -509,7 +510,7 @@ fn peer_view_update_sends_messages() { let peer = PeerId::random(); executor::block_on(async move { - let mut topology: SessionGridTopology = Default::default(); + let mut topology = GridNeighbors::empty(); topology.peers_x = HashSet::from_iter(vec![peer.clone()].into_iter()); update_peer_view_and_maybe_send_unlocked( peer.clone(), @@ -639,7 +640,7 @@ fn circulated_statement_goes_to_all_peers_with_view() { }; let statement = StoredStatement { comparator: &comparator, statement: &statement }; - let mut topology: SessionGridTopology = Default::default(); + let mut topology = GridNeighbors::empty(); topology.peers_x = HashSet::from_iter(vec![peer_a.clone(), peer_b.clone(), peer_c.clone()].into_iter()); let needs_dependents = circulate_statement( @@ -2019,42 +2020,77 @@ fn handle_multiple_seconded_statements() { .await; } - // Explicitly add all `lucky` peers to the gossip peers to ensure that neither `peerA` not `peerB` - // receive statements + // Set up a topology which puts peers a & b in a column together. let gossip_topology = { - let mut t = network_bridge_event::NewGossipTopology { - session: 1, - our_neighbors_x: HashMap::new(), - our_neighbors_y: HashMap::new(), - }; - - // Create a topology to ensure that we send messages not to `peer_a`/`peer_b` - for (i, peer) in lucky_peers.iter().enumerate() { - let authority_id = AuthorityPair::generate().0.public(); - t.our_neighbors_y.insert( - authority_id, - network_bridge_event::TopologyPeerInfo { - peer_ids: vec![peer.clone()], - validator_index: (i as u32 + 2_u32).into(), - }, - ); + // create a lucky_peers+1 * lucky_peers+1 grid topology where we are at index 2, sharing + // a row with peer_a (0) and peer_b (1) and a column with all the lucky peers. + // the rest is filled with junk. + // This is an absolute garbage hack depending on quirks of the implementation + // and not on sound architecture. + + let n_lucky = lucky_peers.len(); + let dim = n_lucky + 1; + let grid_size = dim * dim; + let topology_peer_info: Vec<_> = (0..grid_size) + .map(|i| { + if i == 0 { + TopologyPeerInfo { + peer_ids: vec![peer_a.clone()], + validator_index: ValidatorIndex(0), + discovery_id: AuthorityPair::generate().0.public(), + } + } else if i == 1 { + TopologyPeerInfo { + peer_ids: vec![peer_b.clone()], + validator_index: ValidatorIndex(1), + discovery_id: AuthorityPair::generate().0.public(), + } + } else if i == 2 { + TopologyPeerInfo { + peer_ids: vec![], + validator_index: ValidatorIndex(2), + discovery_id: AuthorityPair::generate().0.public(), + } + } else if (i - 2) % dim == 0 { + let lucky_index = ((i - 2) / dim) - 1; + TopologyPeerInfo { + peer_ids: vec![lucky_peers[lucky_index].clone()], + validator_index: ValidatorIndex(i as _), + discovery_id: AuthorityPair::generate().0.public(), + } + } else { + TopologyPeerInfo { + peer_ids: vec![PeerId::random()], + validator_index: ValidatorIndex(i as _), + discovery_id: AuthorityPair::generate().0.public(), + } + } + }) + .collect(); + + // also a hack: this is only required to be accurate for + // the validator indices we compute grid neighbors for. + let mut shuffled_indices = vec![0; grid_size]; + shuffled_indices[2] = 2; + + // Some sanity checking to make sure this hack is set up correctly. + let topology = SessionGridTopology::new(shuffled_indices, topology_peer_info); + let grid_neighbors = topology.compute_grid_neighbors_for(ValidatorIndex(2)).unwrap(); + assert_eq!(grid_neighbors.peers_x.len(), 25); + assert!(grid_neighbors.peers_x.contains(&peer_a)); + assert!(grid_neighbors.peers_x.contains(&peer_b)); + assert!(!grid_neighbors.peers_y.contains(&peer_b)); + assert!(!grid_neighbors.route_to_peer(RequiredRouting::GridY, &peer_b)); + assert_eq!(grid_neighbors.peers_y.len(), lucky_peers.len()); + for lucky in &lucky_peers { + assert!(grid_neighbors.peers_y.contains(lucky)); } - t.our_neighbors_x.insert( - AuthorityPair::generate().0.public(), - network_bridge_event::TopologyPeerInfo { - peer_ids: vec![peer_a.clone()], - validator_index: 0_u32.into(), - }, - ); - t.our_neighbors_x.insert( - AuthorityPair::generate().0.public(), - network_bridge_event::TopologyPeerInfo { - peer_ids: vec![peer_b.clone()], - validator_index: 1_u32.into(), - }, - ); - t + network_bridge_event::NewGossipTopology { + session: 1, + topology, + local_index: Some(ValidatorIndex(2)), + } }; handle diff --git a/node/overseer/src/tests.rs b/node/overseer/src/tests.rs index 121c707c2541..dee4c7cbbba9 100644 --- a/node/overseer/src/tests.rs +++ b/node/overseer/src/tests.rs @@ -873,8 +873,9 @@ fn test_network_bridge_tx_msg() -> NetworkBridgeTxMessage { fn test_network_bridge_rx_msg() -> NetworkBridgeRxMessage { NetworkBridgeRxMessage::NewGossipTopology { session: SessionIndex::from(0_u32), - our_neighbors_x: HashMap::new(), - our_neighbors_y: HashMap::new(), + local_index: None, + canonical_shuffling: Vec::new(), + shuffled_indices: Vec::new(), } } diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index a1520a9aeba8..6e4983813984 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -328,18 +328,13 @@ pub enum NetworkBridgeRxMessage { NewGossipTopology { /// The session info this gossip topology is concerned with. session: SessionIndex, - /// Ids of our neighbors in the X dimensions of the new gossip topology, - /// along with their validator indices within the session. - /// - /// We're not necessarily connected to all of them, but we should - /// try to be. - our_neighbors_x: HashMap, - /// Ids of our neighbors in the X dimensions of the new gossip topology, - /// along with their validator indices within the session. - /// - /// We're not necessarily connected to all of them, but we should - /// try to be. - our_neighbors_y: HashMap, + /// Our validator index in the session, if any. + local_index: Option, + /// The canonical shuffling of validators for the session. + canonical_shuffling: Vec<(AuthorityDiscoveryId, ValidatorIndex)>, + /// The reverse mapping of `canonical_shuffling`: from validator index + /// to the index in `canonical_shuffling` + shuffled_indices: Vec, }, } diff --git a/node/subsystem-types/src/messages/network_bridge_event.rs b/node/subsystem-types/src/messages/network_bridge_event.rs index cd0bb9894b6b..5abad8a3c22c 100644 --- a/node/subsystem-types/src/messages/network_bridge_event.rs +++ b/node/subsystem-types/src/messages/network_bridge_event.rs @@ -14,10 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::{ - collections::{HashMap, HashSet}, - convert::TryFrom, -}; +use std::{collections::HashSet, convert::TryFrom}; pub use sc_network::{PeerId, ReputationChange}; @@ -27,25 +24,15 @@ use polkadot_node_network_protocol::{ }; use polkadot_primitives::v2::{AuthorityDiscoveryId, SessionIndex, ValidatorIndex}; -/// Information about a peer in the gossip topology for a session. -#[derive(Debug, Clone, PartialEq)] -pub struct TopologyPeerInfo { - /// The validator's known peer IDs. - pub peer_ids: Vec, - /// The index of the validator in the discovery keys of the corresponding - /// `SessionInfo`. This can extend _beyond_ the set of active parachain validators. - pub validator_index: ValidatorIndex, -} - /// A struct indicating new gossip topology. #[derive(Debug, Clone, PartialEq)] pub struct NewGossipTopology { /// The session index this topology corresponds to. pub session: SessionIndex, - /// Neighbors in the 'X' dimension of the grid. - pub our_neighbors_x: HashMap, - /// Neighbors in the 'Y' dimension of the grid. - pub our_neighbors_y: HashMap, + /// The topology itself. + pub topology: SessionGridTopology, + /// The local validator index, if any. + pub local_index: Option, } /// Events from network. @@ -122,19 +109,3 @@ impl NetworkBridgeEvent { }) } } - -impl From for SessionGridTopology { - fn from(topology: NewGossipTopology) -> Self { - let peers_x = - topology.our_neighbors_x.values().flat_map(|p| &p.peer_ids).cloned().collect(); - let peers_y = - topology.our_neighbors_y.values().flat_map(|p| &p.peer_ids).cloned().collect(); - - let validator_indices_x = - topology.our_neighbors_x.values().map(|p| p.validator_index.clone()).collect(); - let validator_indices_y = - topology.our_neighbors_y.values().map(|p| p.validator_index.clone()).collect(); - - SessionGridTopology { peers_x, peers_y, validator_indices_x, validator_indices_y } - } -} diff --git a/parachain/test-parachains/adder/collator/Cargo.toml b/parachain/test-parachains/adder/collator/Cargo.toml index 78fee0d5bd6d..09219c3258b9 100644 --- a/parachain/test-parachains/adder/collator/Cargo.toml +++ b/parachain/test-parachains/adder/collator/Cargo.toml @@ -15,7 +15,7 @@ path = "bin/puppet_worker.rs" [dependencies] parity-scale-codec = { version = "3.1.5", default-features = false, features = ["derive"] } -clap = { version = "3.1", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.2" log = "0.4.17" diff --git a/parachain/test-parachains/adder/collator/src/cli.rs b/parachain/test-parachains/adder/collator/src/cli.rs index 8e55a7821f3d..a71837c4663a 100644 --- a/parachain/test-parachains/adder/collator/src/cli.rs +++ b/parachain/test-parachains/adder/collator/src/cli.rs @@ -23,11 +23,11 @@ use sc_cli::{RuntimeVersion, SubstrateCli}; #[derive(Debug, Parser)] pub enum Subcommand { /// Export the genesis state of the parachain. - #[clap(name = "export-genesis-state")] + #[command(name = "export-genesis-state")] ExportGenesisState(ExportGenesisStateCommand), /// Export the genesis wasm of the parachain. - #[clap(name = "export-genesis-wasm")] + #[command(name = "export-genesis-wasm")] ExportGenesisWasm(ExportGenesisWasmCommand), } @@ -41,20 +41,21 @@ pub struct ExportGenesisWasmCommand {} #[allow(missing_docs)] #[derive(Debug, Parser)] +#[group(skip)] pub struct RunCmd { #[allow(missing_docs)] #[clap(flatten)] pub base: sc_cli::RunCmd, /// Id of the parachain this collator collates for. - #[clap(long)] + #[arg(long)] pub parachain_id: Option, } #[allow(missing_docs)] #[derive(Debug, Parser)] pub struct Cli { - #[clap(subcommand)] + #[command(subcommand)] pub subcommand: Option, #[clap(flatten)] diff --git a/parachain/test-parachains/undying/collator/Cargo.toml b/parachain/test-parachains/undying/collator/Cargo.toml index 8596f1c94587..b70de68bbdef 100644 --- a/parachain/test-parachains/undying/collator/Cargo.toml +++ b/parachain/test-parachains/undying/collator/Cargo.toml @@ -15,7 +15,7 @@ path = "bin/puppet_worker.rs" [dependencies] parity-scale-codec = { version = "3.1.5", default-features = false, features = ["derive"] } -clap = { version = "3.1", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } futures = "0.3.19" futures-timer = "3.0.2" log = "0.4.17" diff --git a/parachain/test-parachains/undying/collator/src/cli.rs b/parachain/test-parachains/undying/collator/src/cli.rs index 4f1ff06e7472..703a0bfd46f6 100644 --- a/parachain/test-parachains/undying/collator/src/cli.rs +++ b/parachain/test-parachains/undying/collator/src/cli.rs @@ -23,11 +23,11 @@ use sc_cli::{RuntimeVersion, SubstrateCli}; #[derive(Debug, Parser)] pub enum Subcommand { /// Export the genesis state of the parachain. - #[clap(name = "export-genesis-state")] + #[command(name = "export-genesis-state")] ExportGenesisState(ExportGenesisStateCommand), /// Export the genesis wasm of the parachain. - #[clap(name = "export-genesis-wasm")] + #[command(name = "export-genesis-wasm")] ExportGenesisWasm(ExportGenesisWasmCommand), } @@ -35,16 +35,16 @@ pub enum Subcommand { #[derive(Debug, Parser)] pub struct ExportGenesisStateCommand { /// Id of the parachain this collator collates for. - #[clap(long, default_value = "100")] + #[arg(long, default_value_t = 100)] pub parachain_id: u32, /// The target raw PoV size in bytes. Minimum value is 64. - #[clap(long, default_value = "1024")] + #[arg(long, default_value_t = 1024)] pub pov_size: usize, /// The PVF execution complexity. Actually specifies how many iterations/signatures /// we compute per block. - #[clap(long, default_value = "1")] + #[arg(long, default_value_t = 1)] pub pvf_complexity: u32, } @@ -54,29 +54,30 @@ pub struct ExportGenesisWasmCommand {} #[allow(missing_docs)] #[derive(Debug, Parser)] +#[group(skip)] pub struct RunCmd { #[allow(missing_docs)] #[clap(flatten)] pub base: sc_cli::RunCmd, /// Id of the parachain this collator collates for. - #[clap(long, default_value = "2000")] + #[arg(long, default_value_t = 2000)] pub parachain_id: u32, /// The target raw PoV size in bytes. Minimum value is 64. - #[clap(long, default_value = "1024")] + #[arg(long, default_value_t = 1024)] pub pov_size: usize, /// The PVF execution complexity. Actually specifies how many iterations/signatures /// we compute per block. - #[clap(long, default_value = "1")] + #[arg(long, default_value_t = 1)] pub pvf_complexity: u32, } #[allow(missing_docs)] #[derive(Debug, Parser)] pub struct Cli { - #[clap(subcommand)] + #[command(subcommand)] pub subcommand: Option, #[clap(flatten)] diff --git a/roadmap/implementers-guide/src/disputes-flow.md b/roadmap/implementers-guide/src/disputes-flow.md index 322af8e9256e..053bcfd3f2a4 100644 --- a/roadmap/implementers-guide/src/disputes-flow.md +++ b/roadmap/implementers-guide/src/disputes-flow.md @@ -96,7 +96,7 @@ Votes must be queryable by a particular validator, identified by its signing key Votes must be queryable by a particular validator, identified by a session index and the validator index valid in that session. -If there exists a negative and a positive fork for a particular block, a dispute is detected. +If there exists a negative and a positive vote for a particular block, a dispute is detected. If a dispute is detected, all currently available votes for that block must be gossiped. diff --git a/roadmap/implementers-guide/src/protocol-chain-selection.md b/roadmap/implementers-guide/src/protocol-chain-selection.md index 4f90a26949d8..dd066df43cdd 100644 --- a/roadmap/implementers-guide/src/protocol-chain-selection.md +++ b/roadmap/implementers-guide/src/protocol-chain-selection.md @@ -43,6 +43,6 @@ The leaf-selection rule based on our definitions above is simple: we take the ma Finality gadgets, as mentioned above, will often impose an additional requirement to vote on a chain containing a specific block, known as the **required** block. Although this is typically the most recently finalized block, it is possible that it may be a block that is unfinalized. When receiving such a request: 1. If the required block is the best finalized block, then select the best viable leaf. 2. If the required block is unfinalized and non-viable, then select the required block and go no further. This is likely an indication that something bad will be finalized in the network, which will never happen when approvals & disputes are functioning correctly. Nevertheless we account for the case here. -3. If the required block is unfinalized and non-viable, then iterate over the viable leaves in descending order by score and select the first one which contains the required block in its chain. Backwards iteration is a simple way to check this, but if unfinalized chains grow long then Merkle Mountain-Ranges will most likely be more efficient. +3. If the required block is unfinalized and viable, then iterate over the viable leaves in descending order by score and select the first one which contains the required block in its chain. Backwards iteration is a simple way to check this, but if unfinalized chains grow long then Merkle Mountain-Ranges will most likely be more efficient. Once selecting a leaf, the chain should be constrained to the maximum of the required block or the highest **finalizable** ancestor. diff --git a/roadmap/implementers-guide/src/pvf-prechecking.md b/roadmap/implementers-guide/src/pvf-prechecking.md index 0daeaf0593c7..1dc7611c0cef 100644 --- a/roadmap/implementers-guide/src/pvf-prechecking.md +++ b/roadmap/implementers-guide/src/pvf-prechecking.md @@ -1,12 +1,34 @@ # PVF Pre-checking Overview -> ⚠️ This discusses a mechanism that is currently not under-development. Follow the progress under [#3211]. +> ⚠️ This discusses a mechanism that is currently under-development. Follow the progress under [#3211]. + +## Terms + +This functionality involves several processes which may be potentially +confusing: + +- **Prechecking:** This is the process of initially checking the PVF when it is + first added. We attempt *preparation* of the PVF and make sure it succeeds + within a given timeout. +- **Execution:** This actually executes the PVF. The node may not have the + artifact from prechecking, in which case this process also includes a + *preparation* job. The timeout for preparation here is more lenient than when + prechecking. +- **Preparation:** This is the process of preparing the WASM blob and includes + both *prevalidation* and *compilation*. As prevalidation is pretty minimal + right now, preparation mostly consists of compilation. Note that *prechecking* + just consists of preparation, whereas *execution* will also prepare the PVF if + the artifact is not already found. +- **Prevalidation:** Right now this just tries to deserialize the binary with + parity-wasm. It is a part of *preparation*. +- **Compilation:** This is the process of compiling a PVF from wasm code to + machine code. It is a part of *preparation*. ## Motivation Parachains' and parathreads' validation function is described by a wasm module that we refer to as a PVF. Since it's a wasm module the typical way of executing it is to compile it to machine code. Typically an optimizing compiler consists of algorithms that are able to optimize the resulting machine code heavily. However, while those algorithms perform quite well for a typical wasm code produced by standard toolchains (e.g. rustc/LLVM), those algorithms can be abused to consume a lot of resources. Moreover, since those algorithms are rather complex there is a lot of room for a bug that can crash the compiler. -If compilation of a Parachain Validation Function (PVF) takes too long or uses too much memory, this can leave a node in limbo as to whether a candidate of that parachain is valid or not. +If compilation of a Parachain Validation Function (PVF) takes too long or uses too much memory, this can leave a node in limbo as to whether a candidate of that parachain is valid or not. The amount of time that a PVF takes to compile is a subjective resource limit and as such PVFs may be maliciously crafted so that there is e.g. a 50/50 split of validators which can and cannot compile and execute the PVF. diff --git a/roadmap/implementers-guide/src/runtime/configuration.md b/roadmap/implementers-guide/src/runtime/configuration.md index 739352b202b3..96d63faccedd 100644 --- a/roadmap/implementers-guide/src/runtime/configuration.md +++ b/roadmap/implementers-guide/src/runtime/configuration.md @@ -28,7 +28,7 @@ The session change routine works as follows: ## Routines ```rust -enum InconsistentErrror { +enum InconsistentError { // ... } diff --git a/roadmap/implementers-guide/src/runtime/paras.md b/roadmap/implementers-guide/src/runtime/paras.md index af2e7add54e5..a9e99c8993bf 100644 --- a/roadmap/implementers-guide/src/runtime/paras.md +++ b/roadmap/implementers-guide/src/runtime/paras.md @@ -172,7 +172,7 @@ PastCodePruning: Vec<(ParaId, BlockNumber)>; /// The change will be applied after the first parablock for this ID included which executes /// in the context of a relay chain block with a number >= `expected_at`. FutureCodeUpgrades: map ParaId => Option; -/// The actual future code of a para. +/// Hash of the actual future code of a para. FutureCodeHash: map ParaId => Option; /// This is used by the relay-chain to communicate to a parachain a go-ahead with in the upgrade procedure. /// @@ -246,8 +246,8 @@ CodeByHash: map ValidationCodeHash => Option 1. Do pruning based on all entries in `PastCodePruning` with `BlockNumber <= now`. Update the corresponding `PastCodeMeta` and `PastCode` accordingly. 1. Toggle the upgrade related signals - 1. Collect all `(para_id, expected_at)` from `UpcomingUpgrades` where `expected_at <= now` and prune them. For each para pruned set `UpgradeGoAheadSignal` to `GoAhead`. - 1. Collect all `(para_id, next_possible_upgrade_at)` from `UpgradeCooldowns` where `next_possible_upgrade_at <= now` and prune them. For each para pruned set `UpgradeRestrictionSignal` to `Present`. + 1. Collect all `(para_id, expected_at)` from `UpcomingUpgrades` where `expected_at <= now` and prune them. For each para pruned set `UpgradeGoAheadSignal` to `GoAhead`. Reserve weight for the state modification to upgrade each para pruned. + 1. Collect all `(para_id, next_possible_upgrade_at)` from `UpgradeCooldowns` where `next_possible_upgrade_at <= now`. For each para obtained this way reserve weight to remove its `UpgradeRestrictionSignal` on finalization. ## Routines @@ -275,4 +275,4 @@ In case the PVF pre-checking is enabled, or the new code is not already present ## Finalization -No finalization routine runs for this module. +Collect all `(para_id, next_possible_upgrade_at)` from `UpgradeCooldowns` where `next_possible_upgrade_at <= now` and prune them. For each para pruned remove its `UpgradeRestrictionSignal`. diff --git a/roadmap/implementers-guide/src/types/network.md b/roadmap/implementers-guide/src/types/network.md index 0d09a682cff2..b698ca2075bf 100644 --- a/roadmap/implementers-guide/src/types/network.md +++ b/roadmap/implementers-guide/src/types/network.md @@ -145,10 +145,19 @@ These updates are posted from the [Network Bridge Subsystem](../node/utility/net struct NewGossipTopology { /// The session index this topology corresponds to. session: SessionIndex, - /// Neighbors in the 'X' dimension of the grid. - our_neighbors_x: HashMap, - /// Neighbors in the 'Y' dimension of the grid. - our_neighbors_y: HashMap, + /// The topology itself. + topology: SessionGridTopology, + /// The local validator index, if any. + local_index: Option, +} + +struct SessionGridTopology { + /// An array mapping validator indices to their indices in the + /// shuffling itself. This has the same size as the number of validators + /// in the session. + shuffled_indices: Vec, + /// The canonical shuffling of validators for the session. + canonical_shuffling: Vec, } struct TopologyPeerInfo { @@ -157,6 +166,9 @@ struct TopologyPeerInfo { /// The index of the validator in the discovery keys of the corresponding /// `SessionInfo`. This can extend _beyond_ the set of active parachain validators. validator_index: ValidatorIndex, + /// The authority discovery public key of the validator in the corresponding + /// `SessionInfo`. + discovery_id: AuthorityDiscoveryId, } enum NetworkBridgeEvent { diff --git a/roadmap/implementers-guide/src/types/overseer-protocol.md b/roadmap/implementers-guide/src/types/overseer-protocol.md index b2559c4cfda7..4b9dc97c27e2 100644 --- a/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -555,14 +555,15 @@ enum NetworkBridgeMessage { /// Inform the distribution subsystems about the new /// gossip network topology formed. NewGossipTopology { - /// The session this topology corresponds to. - session: SessionIndex, - /// Ids of our neighbors in the X dimension of the new gossip topology. - /// We're not necessarily connected to all of them, but we should try to be. - our_neighbors_x: HashSet, - /// Ids of our neighbors in the Y dimension of the new gossip topology. - /// We're not necessarily connected to all of them, but we should try to be. - our_neighbors_y: HashSet, + /// The session info this gossip topology is concerned with. + session: SessionIndex, + /// Our validator index in the session, if any. + local_index: Option, + /// The canonical shuffling of validators for the session. + canonical_shuffling: Vec<(AuthorityDiscoveryId, ValidatorIndex)>, + /// The reverse mapping of `canonical_shuffling`: from validator index + /// to the index in `canonical_shuffling` + shuffled_indices: Vec, } } ``` diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index fa2c61f17c25..2b3497832caa 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -108,7 +108,11 @@ where + Sync + 'static, C::Api: frame_rpc_system::AccountNonceApi, - C::Api: pallet_mmr_rpc::MmrRuntimeApi::Hash>, + C::Api: pallet_mmr_rpc::MmrRuntimeApi< + Block, + ::Hash, + BlockNumber, + >, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BabeApi, C::Api: BlockBuilder, diff --git a/runtime/common/src/paras_registrar.rs b/runtime/common/src/paras_registrar.rs index d5bc59599715..7245cd92d304 100644 --- a/runtime/common/src/paras_registrar.rs +++ b/runtime/common/src/paras_registrar.rs @@ -60,6 +60,8 @@ pub trait WeightInfo { fn force_register() -> Weight; fn deregister() -> Weight; fn swap() -> Weight; + fn schedule_code_upgrade(b: u32) -> Weight; + fn set_current_head(b: u32) -> Weight; } pub struct TestWeightInfo; @@ -79,6 +81,12 @@ impl WeightInfo for TestWeightInfo { fn swap() -> Weight { Weight::zero() } + fn schedule_code_upgrade(_b: u32) -> Weight { + Weight::zero() + } + fn set_current_head(_b: u32) -> Weight { + Weight::zero() + } } #[frame_support::pallet] @@ -318,11 +326,11 @@ pub mod pallet { /// Remove a manager lock from a para. This will allow the manager of a /// previously locked para to deregister or swap a para without using governance. /// - /// Can only be called by the Root origin. + /// Can only be called by the Root origin or the parachain. #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] - pub fn force_remove_lock(origin: OriginFor, para: ParaId) -> DispatchResult { - ensure_root(origin)?; - Self::remove_lock(para); + pub fn remove_lock(origin: OriginFor, para: ParaId) -> DispatchResult { + Self::ensure_root_or_para(origin, para)?; + ::remove_lock(para); Ok(()) } @@ -348,6 +356,45 @@ pub mod pallet { NextFreeParaId::::set(id + 1); Ok(()) } + + /// Add a manager lock from a para. This will prevent the manager of a + /// para to deregister or swap a para. + /// + /// Can be called by Root, the parachain, or the parachain manager if the parachain is unlocked. + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] + pub fn add_lock(origin: OriginFor, para: ParaId) -> DispatchResult { + Self::ensure_root_para_or_owner(origin, para)?; + ::apply_lock(para); + Ok(()) + } + + /// Schedule a parachain upgrade. + /// + /// Can be called by Root, the parachain, or the parachain manager if the parachain is unlocked. + #[pallet::weight(::WeightInfo::schedule_code_upgrade(new_code.0.len() as u32))] + pub fn schedule_code_upgrade( + origin: OriginFor, + para: ParaId, + new_code: ValidationCode, + ) -> DispatchResult { + Self::ensure_root_para_or_owner(origin, para)?; + runtime_parachains::schedule_code_upgrade::(para, new_code)?; + Ok(()) + } + + /// Set the parachain's current head. + /// + /// Can be called by Root, the parachain, or the parachain manager if the parachain is unlocked. + #[pallet::weight(::WeightInfo::set_current_head(new_head.0.len() as u32))] + pub fn set_current_head( + origin: OriginFor, + para: ParaId, + new_head: HeadData, + ) -> DispatchResult { + Self::ensure_root_para_or_owner(origin, para)?; + runtime_parachains::set_current_head::(para, new_head); + Ok(()) + } } } @@ -379,7 +426,7 @@ impl Registrar for Pallet { Paras::::mutate(id, |x| x.as_mut().map(|mut info| info.locked = true)); } - // Apply a lock to the parachain. + // Remove a lock from the parachain. fn remove_lock(id: ParaId) { Paras::::mutate(id, |x| x.as_mut().map(|mut info| info.locked = false)); } @@ -467,17 +514,23 @@ impl Pallet { ensure!(para_info.manager == who, Error::::NotOwner); Ok(()) }) - .or_else(|_| -> DispatchResult { - // Else check if para origin... - let caller_id = - ensure_parachain(::RuntimeOrigin::from(origin.clone()))?; - ensure!(caller_id == id, Error::::NotOwner); - Ok(()) - }) - .or_else(|_| -> DispatchResult { - // Check if root... - ensure_root(origin.clone()).map_err(|e| e.into()) - }) + .or_else(|_| -> DispatchResult { Self::ensure_root_or_para(origin, id) }) + } + + /// Ensure the origin is one of Root or the `para` itself. + fn ensure_root_or_para( + origin: ::RuntimeOrigin, + id: ParaId, + ) -> DispatchResult { + if let Ok(caller_id) = ensure_parachain(::RuntimeOrigin::from(origin.clone())) + { + // Check if matching para id... + ensure!(caller_id == id, Error::::NotOwner); + } else { + // Check if root... + ensure_root(origin.clone())?; + } + Ok(()) } fn do_reserve( @@ -1087,21 +1140,20 @@ mod tests { vec![1, 2, 3].into(), )); - // Owner can call swap - assert_ok!(Registrar::swap(RuntimeOrigin::signed(1), para_id, para_id + 1)); - - // 2 session changes to fully onboard. - run_to_session(2); - assert_eq!(Parachains::lifecycle(para_id), Some(ParaLifecycle::Parathread)); - + assert_noop!(Registrar::add_lock(RuntimeOrigin::signed(2), para_id), BadOrigin); // Once they begin onboarding, we lock them in. - assert_ok!(Registrar::make_parachain(para_id)); - - // Owner cannot call swap anymore + assert_ok!(Registrar::add_lock(RuntimeOrigin::signed(1), para_id)); + // Owner cannot pass origin check when checking lock assert_noop!( - Registrar::swap(RuntimeOrigin::signed(1), para_id, para_id + 2), + Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id), BadOrigin ); + // Owner cannot remove lock. + assert_noop!(Registrar::remove_lock(RuntimeOrigin::signed(1), para_id), BadOrigin); + // Para can. + assert_ok!(Registrar::remove_lock(para_origin(para_id), para_id)); + // Owner can pass origin check again + assert_ok!(Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id)); }); } @@ -1227,6 +1279,7 @@ mod benchmarking { use crate::traits::Registrar as RegistrarT; use frame_support::assert_ok; use frame_system::RawOrigin; + use primitives::v2::{MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE}; use runtime_parachains::{paras, shared, Origin as ParaOrigin}; use sp_runtime::traits::Bounded; @@ -1343,6 +1396,18 @@ mod benchmarking { assert_eq!(paras::Pallet::::lifecycle(parathread), Some(ParaLifecycle::Parachain)); } + schedule_code_upgrade { + let b in 1 .. MAX_CODE_SIZE; + let new_code = ValidationCode(vec![0; b as usize]); + let para_id = ParaId::from(1000); + }: _(RawOrigin::Root, para_id, new_code) + + set_current_head { + let b in 1 .. MAX_HEAD_DATA_SIZE; + let new_head = HeadData(vec![0; b as usize]); + let para_id = ParaId::from(1000); + }: _(RawOrigin::Root, para_id, new_head) + impl_benchmark_test_suite!( Registrar, crate::integration_tests::new_test_ext(), diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index 58a824ae1cc2..83651f7164b9 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -1478,6 +1478,7 @@ pub type Executive = frame_executive::Executive< pallet_multisig::migrations::v1::MigrateToV1, // "Properly migrate weights to v2" parachains_configuration::migration::v3::MigrateToV3, + pallet_election_provider_multi_phase::migrations::v1::MigrateToV1, ), >; /// The payload being signed in the transactions. @@ -1714,8 +1715,8 @@ sp_api::impl_runtime_apis! { } } - impl mmr::MmrApi for Runtime { - fn generate_proof(_leaf_index: u64) + impl mmr::MmrApi for Runtime { + fn generate_proof(_block_number: BlockNumber) -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) @@ -1739,15 +1740,15 @@ sp_api::impl_runtime_apis! { Err(mmr::Error::PalletNotIncluded) } - fn generate_batch_proof(_leaf_indices: Vec) + fn generate_batch_proof(_block_numbers: Vec) -> Result<(Vec, mmr::BatchProof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } fn generate_historical_batch_proof( - _leaf_indices: Vec, - _leaves_count: u64, + _block_numbers: Vec, + _best_known_block_number: BlockNumber, ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } diff --git a/runtime/kusama/src/weights/runtime_common_paras_registrar.rs b/runtime/kusama/src/weights/runtime_common_paras_registrar.rs index 6e8b878ad196..8b9f554fe3e6 100644 --- a/runtime/kusama/src/weights/runtime_common_paras_registrar.rs +++ b/runtime/kusama/src/weights/runtime_common_paras_registrar.rs @@ -103,4 +103,28 @@ impl runtime_common::paras_registrar::WeightInfo for We .saturating_add(T::DbWeight::get().reads(10 as u64)) .saturating_add(T::DbWeight::get().writes(8 as u64)) } + // Storage: Paras FutureCodeHash (r:1 w:1) + // Storage: Paras CurrentCodeHash (r:1 w:0) + // Storage: Paras UpgradeCooldowns (r:1 w:1) + // Storage: Paras PvfActiveVoteMap (r:1 w:0) + // Storage: Paras CodeByHash (r:1 w:1) + // Storage: Paras UpcomingUpgrades (r:1 w:1) + // Storage: System Digest (r:1 w:1) + // Storage: Paras CodeByHashRefs (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:0 w:1) + // Storage: Paras UpgradeRestrictionSignal (r:0 w:1) + fn schedule_code_upgrade(b: u32, ) -> Weight { + Weight::from_ref_time(0 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(3_000 as u64).saturating_mul(b as u64)) + .saturating_add(T::DbWeight::get().reads(8 as u64)) + .saturating_add(T::DbWeight::get().writes(8 as u64)) + } + // Storage: Paras Heads (r:0 w:1) + fn set_current_head(b: u32, ) -> Weight { + Weight::from_ref_time(5_494_000 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + } } diff --git a/runtime/kusama/src/weights/runtime_parachains_hrmp.rs b/runtime/kusama/src/weights/runtime_parachains_hrmp.rs index c7879ac8f17c..ab7a5b87291e 100644 --- a/runtime/kusama/src/weights/runtime_parachains_hrmp.rs +++ b/runtime/kusama/src/weights/runtime_parachains_hrmp.rs @@ -156,4 +156,19 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf .saturating_add(T::DbWeight::get().writes(1 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(c as u64))) } + // Storage: Paras ParaLifecycles (r:2 w:0) + // Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) + // Storage: Hrmp HrmpChannels (r:1 w:0) + // Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0) + // Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) + // Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) + // Storage: Dmp DownwardMessageQueueHeads (r:2 w:2) + // Storage: Dmp DownwardMessageQueues (r:2 w:2) + // Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0) + // Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1) + fn force_open_hrmp_channel() -> Weight { + Weight::from_ref_time(104_771_000 as u64) + .saturating_add(T::DbWeight::get().reads(13 as u64)) + .saturating_add(T::DbWeight::get().writes(8 as u64)) + } } diff --git a/runtime/parachains/src/hrmp.rs b/runtime/parachains/src/hrmp.rs index af0cccb22512..53ad6781048f 100644 --- a/runtime/parachains/src/hrmp.rs +++ b/runtime/parachains/src/hrmp.rs @@ -58,6 +58,7 @@ pub trait WeightInfo { fn force_process_hrmp_close(c: u32) -> Weight; fn hrmp_cancel_open_request(c: u32) -> Weight; fn clean_open_channel_requests(c: u32) -> Weight; + fn force_open_hrmp_channel() -> Weight; } /// A weight info that is only suitable for testing. @@ -88,6 +89,9 @@ impl WeightInfo for TestWeightInfo { fn clean_open_channel_requests(_: u32) -> Weight { Weight::MAX } + fn force_open_hrmp_channel() -> Weight { + Weight::MAX + } } /// A description of a request to open an HRMP channel. @@ -269,6 +273,9 @@ pub mod pallet { OpenChannelAccepted(ParaId, ParaId), /// HRMP channel closed. `[by_parachain, channel_id]` ChannelClosed(ParaId, HrmpChannelId), + /// An HRMP channel was opened via Root origin. + /// `[sender, recipient, proposed_max_capacity, proposed_max_message_size]` + HrmpChannelForceOpened(ParaId, ParaId, u32, u32), } #[pallet::error] @@ -577,6 +584,32 @@ pub mod pallet { Self::deposit_event(Event::OpenChannelCanceled(origin, channel_id)); Ok(()) } + + /// Open a channel from a `sender` to a `recipient` `ParaId` using the Root origin. Although + /// opened by Root, the `max_capacity` and `max_message_size` are still subject to the Relay + /// Chain's configured limits. + /// + /// Expected use is when one of the `ParaId`s involved in the channel is governed by the + /// Relay Chain, e.g. a common good parachain. + #[pallet::weight(::WeightInfo::force_open_hrmp_channel())] + pub fn force_open_hrmp_channel( + origin: OriginFor, + sender: ParaId, + recipient: ParaId, + max_capacity: u32, + max_message_size: u32, + ) -> DispatchResult { + ensure_root(origin)?; + Self::init_open_channel(sender, recipient, max_capacity, max_message_size)?; + Self::accept_open_channel(recipient, sender)?; + Self::deposit_event(Event::HrmpChannelForceOpened( + sender, + recipient, + max_capacity, + max_message_size, + )); + Ok(()) + } } } diff --git a/runtime/parachains/src/hrmp/benchmarking.rs b/runtime/parachains/src/hrmp/benchmarking.rs index 71ea684be179..7ea14b1dc922 100644 --- a/runtime/parachains/src/hrmp/benchmarking.rs +++ b/runtime/parachains/src/hrmp/benchmarking.rs @@ -296,6 +296,32 @@ frame_benchmarking::benchmarks! { } verify { assert_eq!(HrmpOpenChannelRequestsList::::decode_len().unwrap_or_default() as u32, 0); } + + force_open_hrmp_channel { + let sender_id: ParaId = 1u32.into(); + let recipient_id: ParaId = 2u32.into(); + + // make sure para is registered, and has enough balance. + let sender_deposit: BalanceOf = + Configuration::::config().hrmp_sender_deposit.unique_saturated_into(); + let recipient_deposit: BalanceOf = + Configuration::::config().hrmp_recipient_deposit.unique_saturated_into(); + register_parachain_with_balance::(sender_id, sender_deposit); + register_parachain_with_balance::(recipient_id, recipient_deposit); + + let capacity = Configuration::::config().hrmp_channel_max_capacity; + let message_size = Configuration::::config().hrmp_channel_max_message_size; + + // make sure this channel doesn't exist + let channel_id = HrmpChannelId { sender: sender_id, recipient: recipient_id }; + assert!(HrmpOpenChannelRequests::::get(&channel_id).is_none()); + assert!(HrmpChannels::::get(&channel_id).is_none()); + }: _(frame_system::Origin::::Root, sender_id, recipient_id, capacity, message_size) + verify { + assert_last_event::( + Event::::HrmpChannelForceOpened(sender_id, recipient_id, capacity, message_size).into() + ); + } } frame_benchmarking::impl_benchmark_test_suite!( diff --git a/runtime/parachains/src/hrmp/tests.rs b/runtime/parachains/src/hrmp/tests.rs index 22824601b521..9e0d0646d057 100644 --- a/runtime/parachains/src/hrmp/tests.rs +++ b/runtime/parachains/src/hrmp/tests.rs @@ -17,7 +17,7 @@ use super::*; use crate::mock::{ new_test_ext, Configuration, Hrmp, MockGenesisConfig, Paras, ParasShared, - RuntimeEvent as MockEvent, System, Test, + RuntimeEvent as MockEvent, RuntimeOrigin, System, Test, }; use frame_support::{assert_noop, assert_ok, traits::Currency as _}; use primitives::v2::BlockNumber; @@ -194,6 +194,34 @@ fn open_channel_works() { }); } +#[test] +fn force_open_channel_works() { + let para_a = 1.into(); + let para_b = 3.into(); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // We need both A & B to be registered and live parachains. + register_parachain(para_a); + register_parachain(para_b); + + run_to_block(5, Some(vec![4, 5])); + Hrmp::force_open_hrmp_channel(RuntimeOrigin::root(), para_a, para_b, 2, 8).unwrap(); + Hrmp::assert_storage_consistency_exhaustive(); + assert!(System::events().iter().any(|record| record.event == + MockEvent::Hrmp(Event::HrmpChannelForceOpened(para_a, para_b, 2, 8)))); + + // Advance to a block 6, but without session change. That means that the channel has + // not been created yet. + run_to_block(6, None); + assert!(!channel_exists(para_a, para_b)); + Hrmp::assert_storage_consistency_exhaustive(); + + // Now let the session change happen and thus open the channel. + run_to_block(8, Some(vec![8])); + assert!(channel_exists(para_a, para_b)); + }); +} + #[test] fn close_channel_works() { let para_a = 5.into(); diff --git a/runtime/parachains/src/lib.rs b/runtime/parachains/src/lib.rs index 2005861a6c4b..3d73a4049ed4 100644 --- a/runtime/parachains/src/lib.rs +++ b/runtime/parachains/src/lib.rs @@ -50,7 +50,8 @@ mod mock; pub use origin::{ensure_parachain, Origin}; pub use paras::ParaLifecycle; -use primitives::v2::Id as ParaId; +use primitives::v2::{HeadData, Id as ParaId, ValidationCode}; +use sp_runtime::DispatchResult; /// Schedule a para to be initialized at the start of the next session with the given genesis data. /// @@ -78,3 +79,20 @@ pub fn schedule_parathread_upgrade(id: ParaId) -> Result<(), ( pub fn schedule_parachain_downgrade(id: ParaId) -> Result<(), ()> { paras::Pallet::::schedule_parachain_downgrade(id).map_err(|_| ()) } + +/// Schedules a validation code upgrade to a parachain with the given id. +/// +/// This simply calls [`crate::paras::Pallet::schedule_code_upgrade_external`]. +pub fn schedule_code_upgrade( + id: ParaId, + new_code: ValidationCode, +) -> DispatchResult { + paras::Pallet::::schedule_code_upgrade_external(id, new_code) +} + +/// Sets the current parachain head with the given id. +/// +/// This simply calls [`crate::paras::Pallet::set_current_head`]. +pub fn set_current_head(id: ParaId, new_head: HeadData) { + paras::Pallet::::set_current_head(id, new_head) +} diff --git a/runtime/parachains/src/paras/mod.rs b/runtime/parachains/src/paras/mod.rs index 184696ffcd45..273db30e2839 100644 --- a/runtime/parachains/src/paras/mod.rs +++ b/runtime/parachains/src/paras/mod.rs @@ -118,7 +118,7 @@ use primitives::v2::{ use scale_info::TypeInfo; use sp_core::RuntimeDebug; use sp_runtime::{ - traits::{AppVerify, One}, + traits::{AppVerify, One, Saturating}, DispatchResult, SaturatedConversion, }; use sp_std::{cmp, mem, prelude::*}; @@ -535,6 +535,8 @@ pub mod pallet { /// The PVF pre-checking statement cannot be included since the PVF pre-checking mechanism /// is disabled. PvfCheckDisabled, + /// Parachain cannot currently schedule a code upgrade. + CannotUpgradeCode, } /// All currently active PVF pre-checking votes. @@ -752,8 +754,7 @@ pub mod pallet { new_head: HeadData, ) -> DispatchResult { ensure_root(origin)?; - ::Heads::insert(¶, new_head); - Self::deposit_event(Event::CurrentHeadUpdated(para)); + Self::set_current_head(para, new_head); Ok(()) } @@ -1050,6 +1051,31 @@ const INVALID_TX_DOUBLE_VOTE: u8 = 3; const INVALID_TX_PVF_CHECK_DISABLED: u8 = 4; impl Pallet { + /// This is a call to schedule code upgrades for parachains which is safe to be called + /// outside of this module. That means this function does all checks necessary to ensure + /// that some external code is allowed to trigger a code upgrade. We do not do auth checks, + /// that should be handled by whomever calls this function. + pub(crate) fn schedule_code_upgrade_external( + id: ParaId, + new_code: ValidationCode, + ) -> DispatchResult { + // Check that we can schedule an upgrade at all. + ensure!(Self::can_upgrade_validation_code(id), Error::::CannotUpgradeCode); + let config = configuration::Pallet::::config(); + let current_block = frame_system::Pallet::::block_number(); + // Schedule the upgrade with a delay just like if a parachain triggered the upgrade. + let upgrade_block = current_block.saturating_add(config.validation_upgrade_delay); + Self::schedule_code_upgrade(id, new_code, upgrade_block, &config); + Self::deposit_event(Event::CodeUpgradeScheduled(id)); + Ok(()) + } + + /// Set the current head of a parachain. + pub(crate) fn set_current_head(para: ParaId, new_head: HeadData) { + ::Heads::insert(¶, new_head); + Self::deposit_event(Event::CurrentHeadUpdated(para)); + } + /// Called by the initializer to initialize the paras pallet. pub(crate) fn initializer_initialize(now: T::BlockNumber) -> Weight { let weight = Self::prune_old_code(now); diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index 8f6f2e4db47d..5a5df70e079a 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -1571,6 +1571,7 @@ pub type Executive = frame_executive::Executive< pallet_multisig::migrations::v1::MigrateToV1, // "Properly migrate weights to v2" parachains_configuration::migration::v3::MigrateToV3, + pallet_election_provider_multi_phase::migrations::v1::MigrateToV1, ), >; @@ -1594,6 +1595,7 @@ mod benches { [runtime_common::paras_registrar, Registrar] [runtime_parachains::configuration, Configuration] [runtime_parachains::disputes, ParasDisputes] + [runtime_parachains::hrmp, Hrmp] [runtime_parachains::initializer, Initializer] [runtime_parachains::paras, Paras] [runtime_parachains::paras_inherent, ParaInherent] @@ -1807,8 +1809,8 @@ sp_api::impl_runtime_apis! { } } - impl mmr::MmrApi for Runtime { - fn generate_proof(_leaf_index: u64) + impl mmr::MmrApi for Runtime { + fn generate_proof(_block_number: BlockNumber) -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) @@ -1832,15 +1834,15 @@ sp_api::impl_runtime_apis! { Err(mmr::Error::PalletNotIncluded) } - fn generate_batch_proof(_leaf_indices: Vec) + fn generate_batch_proof(_block_numbers: Vec) -> Result<(Vec, mmr::BatchProof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } fn generate_historical_batch_proof( - _leaf_indices: Vec, - _leaves_count: u64, + _block_numbers: Vec, + _best_known_block_number: BlockNumber, ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } diff --git a/runtime/polkadot/src/weights/runtime_common_paras_registrar.rs b/runtime/polkadot/src/weights/runtime_common_paras_registrar.rs index 9e30a3aecaa8..89a1c628c503 100644 --- a/runtime/polkadot/src/weights/runtime_common_paras_registrar.rs +++ b/runtime/polkadot/src/weights/runtime_common_paras_registrar.rs @@ -105,4 +105,28 @@ impl runtime_common::paras_registrar::WeightInfo for We .saturating_add(T::DbWeight::get().reads(10 as u64)) .saturating_add(T::DbWeight::get().writes(8 as u64)) } + // Storage: Paras FutureCodeHash (r:1 w:1) + // Storage: Paras CurrentCodeHash (r:1 w:0) + // Storage: Paras UpgradeCooldowns (r:1 w:1) + // Storage: Paras PvfActiveVoteMap (r:1 w:0) + // Storage: Paras CodeByHash (r:1 w:1) + // Storage: Paras UpcomingUpgrades (r:1 w:1) + // Storage: System Digest (r:1 w:1) + // Storage: Paras CodeByHashRefs (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:0 w:1) + // Storage: Paras UpgradeRestrictionSignal (r:0 w:1) + fn schedule_code_upgrade(b: u32, ) -> Weight { + Weight::from_ref_time(0 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(3_000 as u64).saturating_mul(b as u64)) + .saturating_add(T::DbWeight::get().reads(8 as u64)) + .saturating_add(T::DbWeight::get().writes(8 as u64)) + } + // Storage: Paras Heads (r:0 w:1) + fn set_current_head(b: u32, ) -> Weight { + Weight::from_ref_time(5_494_000 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + } } diff --git a/runtime/polkadot/src/weights/runtime_parachains_hrmp.rs b/runtime/polkadot/src/weights/runtime_parachains_hrmp.rs index 17d03bb637af..576b25ddd718 100644 --- a/runtime/polkadot/src/weights/runtime_parachains_hrmp.rs +++ b/runtime/polkadot/src/weights/runtime_parachains_hrmp.rs @@ -154,4 +154,19 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf .saturating_add(T::DbWeight::get().writes(1 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(c as u64))) } + // Storage: Paras ParaLifecycles (r:2 w:0) + // Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) + // Storage: Hrmp HrmpChannels (r:1 w:0) + // Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0) + // Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) + // Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) + // Storage: Dmp DownwardMessageQueueHeads (r:2 w:2) + // Storage: Dmp DownwardMessageQueues (r:2 w:2) + // Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0) + // Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1) + fn force_open_hrmp_channel() -> Weight { + Weight::from_ref_time(104_771_000 as u64) + .saturating_add(T::DbWeight::get().reads(13 as u64)) + .saturating_add(T::DbWeight::get().writes(8 as u64)) + } } diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index ac247248b9ea..1763613ec6c2 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -1690,11 +1690,11 @@ sp_api::impl_runtime_apis! { } } - impl mmr::MmrApi for Runtime { - fn generate_proof(leaf_index: u64) + impl mmr::MmrApi for Runtime { + fn generate_proof(block_number: BlockNumber) -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> { - Mmr::generate_batch_proof(vec![leaf_index]) + Mmr::generate_batch_proof(vec![block_number]) .and_then(|(leaves, proof)| Ok(( mmr::EncodableOpaqueLeaf::from_leaf(&leaves[0]), mmr::BatchProof::into_single_leaf_proof(proof)? @@ -1725,18 +1725,18 @@ sp_api::impl_runtime_apis! { Ok(Mmr::mmr_root()) } - fn generate_batch_proof(leaf_indices: Vec) + fn generate_batch_proof(block_numbers: Vec) -> Result<(Vec, mmr::BatchProof), mmr::Error> { - Mmr::generate_batch_proof(leaf_indices) + Mmr::generate_batch_proof(block_numbers) .map(|(leaves, proof)| (leaves.into_iter().map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)).collect(), proof)) } fn generate_historical_batch_proof( - leaf_indices: Vec, - leaves_count: mmr::LeafIndex, + block_numbers: Vec, + best_known_block_number: BlockNumber, ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { - Mmr::generate_historical_batch_proof(leaf_indices, leaves_count).map( + Mmr::generate_historical_batch_proof(block_numbers, best_known_block_number).map( |(leaves, proof)| { ( leaves diff --git a/runtime/rococo/src/weights/runtime_common_paras_registrar.rs b/runtime/rococo/src/weights/runtime_common_paras_registrar.rs index 9e4d3c4e7e54..5afe490ae1ff 100644 --- a/runtime/rococo/src/weights/runtime_common_paras_registrar.rs +++ b/runtime/rococo/src/weights/runtime_common_paras_registrar.rs @@ -105,4 +105,28 @@ impl runtime_common::paras_registrar::WeightInfo for We .saturating_add(T::DbWeight::get().reads(10 as u64)) .saturating_add(T::DbWeight::get().writes(8 as u64)) } + // Storage: Paras FutureCodeHash (r:1 w:1) + // Storage: Paras CurrentCodeHash (r:1 w:0) + // Storage: Paras UpgradeCooldowns (r:1 w:1) + // Storage: Paras PvfActiveVoteMap (r:1 w:0) + // Storage: Paras CodeByHash (r:1 w:1) + // Storage: Paras UpcomingUpgrades (r:1 w:1) + // Storage: System Digest (r:1 w:1) + // Storage: Paras CodeByHashRefs (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:0 w:1) + // Storage: Paras UpgradeRestrictionSignal (r:0 w:1) + fn schedule_code_upgrade(b: u32, ) -> Weight { + Weight::from_ref_time(0 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(3_000 as u64).saturating_mul(b as u64)) + .saturating_add(T::DbWeight::get().reads(8 as u64)) + .saturating_add(T::DbWeight::get().writes(8 as u64)) + } + // Storage: Paras Heads (r:0 w:1) + fn set_current_head(b: u32, ) -> Weight { + Weight::from_ref_time(5_494_000 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + } } diff --git a/runtime/rococo/src/weights/runtime_parachains_hrmp.rs b/runtime/rococo/src/weights/runtime_parachains_hrmp.rs index 41e000095add..04db94e8f76a 100644 --- a/runtime/rococo/src/weights/runtime_parachains_hrmp.rs +++ b/runtime/rococo/src/weights/runtime_parachains_hrmp.rs @@ -160,4 +160,19 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf .saturating_add(T::DbWeight::get().writes(1 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(c as u64))) } + // Storage: Paras ParaLifecycles (r:2 w:0) + // Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) + // Storage: Hrmp HrmpChannels (r:1 w:0) + // Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0) + // Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) + // Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) + // Storage: Dmp DownwardMessageQueueHeads (r:2 w:2) + // Storage: Dmp DownwardMessageQueues (r:2 w:2) + // Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0) + // Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1) + fn force_open_hrmp_channel() -> Weight { + Weight::from_ref_time(104_771_000 as u64) + .saturating_add(T::DbWeight::get().reads(13 as u64)) + .saturating_add(T::DbWeight::get().writes(8 as u64)) + } } diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index a43e13eb015e..8a9e128befa8 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -919,8 +919,8 @@ sp_api::impl_runtime_apis! { } } - impl mmr::MmrApi for Runtime { - fn generate_proof(_leaf_index: u64) + impl mmr::MmrApi for Runtime { + fn generate_proof(_block_number: BlockNumber) -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) @@ -944,15 +944,15 @@ sp_api::impl_runtime_apis! { Err(mmr::Error::PalletNotIncluded) } - fn generate_batch_proof(_leaf_indices: Vec) + fn generate_batch_proof(_block_numbers: Vec) -> Result<(Vec, mmr::BatchProof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } fn generate_historical_batch_proof( - _leaf_indices: Vec, - _leaves_count: u64, + _block_numbers: Vec, + _best_known_block_number: BlockNumber, ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index 454e367e2c15..b9ef6a34316b 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -1222,6 +1222,7 @@ pub type Executive = frame_executive::Executive< pallet_multisig::migrations::v1::MigrateToV1, // "Properly migrate weights to v2" parachains_configuration::migration::v3::MigrateToV3, + pallet_election_provider_multi_phase::migrations::v1::MigrateToV1, ), >; /// The payload being signed in transactions. @@ -1448,8 +1449,8 @@ sp_api::impl_runtime_apis! { } } - impl mmr::MmrApi for Runtime { - fn generate_proof(_leaf_index: u64) + impl mmr::MmrApi for Runtime { + fn generate_proof(_block_number: BlockNumber) -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> { @@ -1477,15 +1478,15 @@ sp_api::impl_runtime_apis! { Err(mmr::Error::PalletNotIncluded) } - fn generate_batch_proof(_leaf_indices: Vec) + fn generate_batch_proof(_block_numbers: Vec) -> Result<(Vec, mmr::BatchProof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } fn generate_historical_batch_proof( - _leaf_indices: Vec, - _leaves_count: u64, + _block_numbers: Vec, + _best_known_block_number: BlockNumber, ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } diff --git a/runtime/westend/src/weights/runtime_common_paras_registrar.rs b/runtime/westend/src/weights/runtime_common_paras_registrar.rs index 32cacfb57771..e52924381cd7 100644 --- a/runtime/westend/src/weights/runtime_common_paras_registrar.rs +++ b/runtime/westend/src/weights/runtime_common_paras_registrar.rs @@ -103,4 +103,28 @@ impl runtime_common::paras_registrar::WeightInfo for We .saturating_add(T::DbWeight::get().reads(10 as u64)) .saturating_add(T::DbWeight::get().writes(8 as u64)) } + // Storage: Paras FutureCodeHash (r:1 w:1) + // Storage: Paras CurrentCodeHash (r:1 w:0) + // Storage: Paras UpgradeCooldowns (r:1 w:1) + // Storage: Paras PvfActiveVoteMap (r:1 w:0) + // Storage: Paras CodeByHash (r:1 w:1) + // Storage: Paras UpcomingUpgrades (r:1 w:1) + // Storage: System Digest (r:1 w:1) + // Storage: Paras CodeByHashRefs (r:1 w:1) + // Storage: Paras FutureCodeUpgrades (r:0 w:1) + // Storage: Paras UpgradeRestrictionSignal (r:0 w:1) + fn schedule_code_upgrade(b: u32, ) -> Weight { + Weight::from_ref_time(0 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(3_000 as u64).saturating_mul(b as u64)) + .saturating_add(T::DbWeight::get().reads(8 as u64)) + .saturating_add(T::DbWeight::get().writes(8 as u64)) + } + // Storage: Paras Heads (r:0 w:1) + fn set_current_head(b: u32, ) -> Weight { + Weight::from_ref_time(5_494_000 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + } } diff --git a/runtime/westend/src/weights/runtime_parachains_hrmp.rs b/runtime/westend/src/weights/runtime_parachains_hrmp.rs index 67713cdf2f2d..5cb4754daa02 100644 --- a/runtime/westend/src/weights/runtime_parachains_hrmp.rs +++ b/runtime/westend/src/weights/runtime_parachains_hrmp.rs @@ -156,4 +156,19 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf .saturating_add(T::DbWeight::get().writes(1 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(c as u64))) } + // Storage: Paras ParaLifecycles (r:2 w:0) + // Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) + // Storage: Hrmp HrmpChannels (r:1 w:0) + // Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0) + // Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) + // Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) + // Storage: Dmp DownwardMessageQueueHeads (r:2 w:2) + // Storage: Dmp DownwardMessageQueues (r:2 w:2) + // Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0) + // Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1) + fn force_open_hrmp_channel() -> Weight { + Weight::from_ref_time(104_771_000 as u64) + .saturating_add(T::DbWeight::get().reads(13 as u64)) + .saturating_add(T::DbWeight::get().writes(8 as u64)) + } } diff --git a/scripts/ci/gitlab/lingua.dic b/scripts/ci/gitlab/lingua.dic index 3add6a276cf0..3a19233a8fb9 100644 --- a/scripts/ci/gitlab/lingua.dic +++ b/scripts/ci/gitlab/lingua.dic @@ -204,6 +204,7 @@ PoV/MS PoW/MS PR precheck +prechecking preconfigured preimage/MS preopen diff --git a/scripts/ci/gitlab/pipeline/zombienet.yml b/scripts/ci/gitlab/pipeline/zombienet.yml index 8b8bc3abe0ae..5c6215aad2c4 100644 --- a/scripts/ci/gitlab/pipeline/zombienet.yml +++ b/scripts/ci/gitlab/pipeline/zombienet.yml @@ -88,7 +88,7 @@ zombienet-tests-parachains-disputes: - /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh --github-remote-dir="${GH_DIR}" --test="0002-parachains-disputes.zndsl" - allow_failure: true + allow_failure: false retry: 2 tags: - zombienet-polkadot-integration-test diff --git a/tests/common.rs b/tests/common.rs index 325bffd125ad..3f040208972c 100644 --- a/tests/common.rs +++ b/tests/common.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use polkadot_core_primitives::Block; -use remote_externalities::rpc_api::RpcService; +use polkadot_core_primitives::{Block, Hash, Header}; use std::{ io::{BufRead, BufReader, Read}, process::{Child, ExitStatus}, thread, time::Duration, }; +use substrate_rpc_client::{ws_client, ChainApi}; use tokio::time::timeout; /// Wait for the given `child` the given amount of `secs`. @@ -56,12 +56,12 @@ async fn wait_n_finalized_blocks_from(n: usize, url: &str) { let mut interval = tokio::time::interval(Duration::from_secs(6)); loop { - let rpc_service = match RpcService::new(url, false).await { + let rpc = match ws_client(url).await { Ok(rpc_service) => rpc_service, Err(_) => continue, }; - if let Ok(block) = rpc_service.get_finalized_head::().await { + if let Ok(block) = ChainApi::<(), Hash, Header, Block>::finalized_head(&rpc).await { built_blocks.insert(block); if built_blocks.len() > n { break diff --git a/utils/generate-bags/Cargo.toml b/utils/generate-bags/Cargo.toml index 11d2d57c7213..2038dc94a1ea 100644 --- a/utils/generate-bags/Cargo.toml +++ b/utils/generate-bags/Cargo.toml @@ -5,7 +5,7 @@ authors.workspace = true edition.workspace = true [dependencies] -clap = { version = "3.1", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } generate-bags = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/utils/generate-bags/src/main.rs b/utils/generate-bags/src/main.rs index 9f5dbada3e93..409b28b5b79d 100644 --- a/utils/generate-bags/src/main.rs +++ b/utils/generate-bags/src/main.rs @@ -20,15 +20,15 @@ //! touched again. It can be reused to regenerate a wholly different //! quantity of bags, or if the existential deposit changes, etc. -use clap::{ArgEnum, Parser}; +use clap::{Parser, ValueEnum}; use generate_bags::generate_thresholds; use kusama_runtime::Runtime as KusamaRuntime; use polkadot_runtime::Runtime as PolkadotRuntime; use std::path::{Path, PathBuf}; use westend_runtime::Runtime as WestendRuntime; -#[derive(Clone, Debug, ArgEnum)] -#[clap(rename_all = "PascalCase")] +#[derive(Clone, Debug, ValueEnum)] +#[value(rename_all = "PascalCase")] enum Runtime { Westend, Kusama, @@ -50,22 +50,22 @@ impl Runtime { #[derive(Debug, Parser)] struct Opt { /// How many bags to generate. - #[clap(long, default_value = "200")] + #[arg(long, default_value_t = 200)] n_bags: usize, /// Which runtime to generate. - #[clap(long, ignore_case = true, arg_enum, default_value = "Polkadot")] + #[arg(long, ignore_case = true, value_enum, default_value_t = Runtime::Polkadot)] runtime: Runtime, /// Where to write the output. output: PathBuf, /// The total issuance of the native currency. - #[clap(short, long)] + #[arg(short, long)] total_issuance: u128, /// The minimum account balance (i.e. existential deposit) for the native currency. - #[clap(short, long)] + #[arg(short, long)] minimum_balance: u128, } diff --git a/utils/remote-ext-tests/bags-list/Cargo.toml b/utils/remote-ext-tests/bags-list/Cargo.toml index 187da8a48af3..b6390cd34bcd 100644 --- a/utils/remote-ext-tests/bags-list/Cargo.toml +++ b/utils/remote-ext-tests/bags-list/Cargo.toml @@ -17,6 +17,6 @@ sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -clap = { version = "3.1", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } log = "0.4.17" tokio = { version = "1.19.2", features = ["macros"] } diff --git a/utils/remote-ext-tests/bags-list/src/main.rs b/utils/remote-ext-tests/bags-list/src/main.rs index 0fafa5e96f7a..53d81c4b4e05 100644 --- a/utils/remote-ext-tests/bags-list/src/main.rs +++ b/utils/remote-ext-tests/bags-list/src/main.rs @@ -16,18 +16,18 @@ //! Remote tests for bags-list pallet. -use clap::{ArgEnum, Parser}; +use clap::{Parser, ValueEnum}; -#[derive(Clone, Debug, ArgEnum)] -#[clap(rename_all = "PascalCase")] +#[derive(Clone, Debug, ValueEnum)] +#[value(rename_all = "PascalCase")] enum Command { CheckMigration, SanityCheck, Snapshot, } -#[derive(Clone, Debug, ArgEnum)] -#[clap(rename_all = "PascalCase")] +#[derive(Clone, Debug, ValueEnum)] +#[value(rename_all = "PascalCase")] enum Runtime { Polkadot, Kusama, @@ -36,13 +36,13 @@ enum Runtime { #[derive(Parser)] struct Cli { - #[clap(long, short, default_value = "wss://kusama-rpc.polkadot.io:443")] + #[arg(long, short, default_value = "wss://kusama-rpc.polkadot.io:443")] uri: String, - #[clap(long, short, ignore_case = true, arg_enum, default_value = "kusama")] + #[arg(long, short, ignore_case = true, value_enum, default_value_t = Runtime::Kusama)] runtime: Runtime, - #[clap(long, short, ignore_case = true, arg_enum, default_value = "SanityCheck")] + #[arg(long, short, ignore_case = true, value_enum, default_value_t = Command::SanityCheck)] command: Command, - #[clap(long, short)] + #[arg(long, short)] snapshot_limit: Option, } diff --git a/utils/staking-miner/Cargo.toml b/utils/staking-miner/Cargo.toml index 21946983e330..7c0745a7b3f2 100644 --- a/utils/staking-miner/Cargo.toml +++ b/utils/staking-miner/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -clap = { version = "3.1", features = ["derive", "env"] } +clap = { version = "4.0.9", features = ["derive", "env"] } tracing-subscriber = { version = "0.3.11", features = ["env-filter"] } jsonrpsee = { version = "0.15.1", features = ["ws-client", "macros"] } log = "0.4.17" diff --git a/utils/staking-miner/src/main.rs b/utils/staking-miner/src/main.rs index b10f87950445..6e5b53423548 100644 --- a/utils/staking-miner/src/main.rs +++ b/utils/staking-miner/src/main.rs @@ -253,6 +253,7 @@ enum Error { AlreadySubmitted, VersionMismatch, StrategyNotSatisfied, + QueueFull, Other(String), } @@ -295,11 +296,16 @@ frame_support::parameter_types! { /// Build the Ext at hash with all the data of `ElectionProviderMultiPhase` and any additional /// pallets. -async fn create_election_ext( +async fn create_election_ext( client: SharedRpcClient, at: Option, additional: Vec, -) -> Result> { +) -> Result> +where + T: EPM::Config, + B: BlockT, + B::Header: DeserializeOwned, +{ use frame_support::{storage::generator::StorageMap, traits::PalletInfo}; use sp_core::hashing::twox_128; @@ -420,6 +426,7 @@ fn mine_dpos(ext: &mut Ext) -> Result<(), Error> { pub(crate) async fn check_versions( rpc: &SharedRpcClient, + print: bool, ) -> Result<(), Error> { let linked_version = T::Version::get(); let on_chain_version = rpc @@ -427,10 +434,31 @@ pub(crate) async fn check_versions( .await .expect("runtime version RPC should always work; qed"); - log::debug!(target: LOG_TARGET, "linked version {:?}", linked_version); - log::debug!(target: LOG_TARGET, "on-chain version {:?}", on_chain_version); + let do_print = || { + log::info!( + target: LOG_TARGET, + "linked version {:?}", + (&linked_version.spec_name, &linked_version.spec_version) + ); + log::info!( + target: LOG_TARGET, + "on-chain version {:?}", + (&on_chain_version.spec_name, &on_chain_version.spec_version) + ); + }; - if linked_version != on_chain_version { + if print { + do_print(); + } + + // we relax the checking here a bit, which should not cause any issues in production (a chain + // that messes up its spec name is highly unlikely), but it allows us to do easier testing. + if linked_version.spec_name != on_chain_version.spec_name || + linked_version.spec_version != on_chain_version.spec_version + { + if !print { + do_print(); + } log::error!( target: LOG_TARGET, "VERSION MISMATCH: any transaction will fail with bad-proof" @@ -558,7 +586,7 @@ async fn main() { log::info!(target: LOG_TARGET, "connected to chain {:?}", chain); any_runtime_unit! { - check_versions::(&rpc).await + check_versions::(&rpc, true).await }; let outcome = any_runtime! { diff --git a/utils/staking-miner/src/monitor.rs b/utils/staking-miner/src/monitor.rs index 676440d018fd..0780ef881fce 100644 --- a/utils/staking-miner/src/monitor.rs +++ b/utils/staking-miner/src/monitor.rs @@ -64,7 +64,7 @@ where .map_err::, _>(Into::into)? .unwrap_or_default(); - for (_score, idx) in indices { + for (_score, _bn, idx) in indices { let key = StorageKey(EPM::SignedSubmissionsMap::::hashed_key_for(idx)); if let Some(submission) = rpc @@ -81,20 +81,36 @@ where Ok(()) } +/// `true` if `our_score` should pass the onchain `best_score` with the given strategy. +pub(crate) fn score_passes_strategy( + our_score: sp_npos_elections::ElectionScore, + best_score: sp_npos_elections::ElectionScore, + strategy: SubmissionStrategy, +) -> bool { + match strategy { + SubmissionStrategy::Always => true, + SubmissionStrategy::IfLeading => + our_score == best_score || + our_score.strict_threshold_better(best_score, Perbill::zero()), + SubmissionStrategy::ClaimBetterThan(epsilon) => + our_score.strict_threshold_better(best_score, epsilon), + SubmissionStrategy::ClaimNoWorseThan(epsilon) => + !best_score.strict_threshold_better(our_score, epsilon), + } +} + /// Reads all current solutions and checks the scores according to the `SubmissionStrategy`. -async fn ensure_no_better_solution( +async fn ensure_strategy_met( rpc: &SharedRpcClient, at: Hash, score: sp_npos_elections::ElectionScore, strategy: SubmissionStrategy, max_submissions: u32, ) -> Result<(), Error> { - let epsilon = match strategy { - // don't care about current scores. - SubmissionStrategy::Always => return Ok(()), - SubmissionStrategy::IfLeading => Perbill::zero(), - SubmissionStrategy::ClaimBetterThan(epsilon) => epsilon, - }; + // don't care about current scores. + if matches!(strategy, SubmissionStrategy::Always) { + return Ok(()) + } let indices_key = StorageKey(EPM::SignedSubmissionIndices::::hashed_key().to_vec()); @@ -104,34 +120,16 @@ async fn ensure_no_better_solution( .map_err::, _>(Into::into)? .unwrap_or_default(); - let mut is_best_score = true; - let mut scores = 0; - - log::debug!(target: LOG_TARGET, "submitted solutions on chain: {:?}", indices); - - // BTreeMap is ordered, take last to get the max score. - for (curr_max_score, _) in indices.into_iter() { - if !score.strict_threshold_better(curr_max_score, epsilon) { - log::warn!(target: LOG_TARGET, "mined score is not better; skipping to submit"); - is_best_score = false; - } - - if score == curr_max_score { - log::warn!( - target: LOG_TARGET, - "mined score has the same score as already submitted score" - ); - } - - // Indices can't be bigger than u32::MAX so can't overflow. - scores += 1; + // we check the queue here as well. Could be checked elsewhere. + if indices.len() as u32 >= max_submissions { + return Err(Error::::QueueFull) } - if scores == max_submissions { - log::warn!(target: LOG_TARGET, "The submissions queue is full"); - } + // default score is all zeros, any score is better than it. + let best_score = indices.last().map(|(score, _, _)| *score).unwrap_or_default(); + log::debug!(target: LOG_TARGET, "best onchain score is {:?}", best_score); - if is_best_score { + if score_passes_strategy(score, best_score, strategy) { Ok(()) } else { Err(Error::StrategyNotSatisfied) @@ -233,7 +231,7 @@ macro_rules! monitor_cmd_for { ($runtime:tt) => { paste::paste! { // block on this because if this fails there is no way to recover from // that error i.e, upgrade/downgrade required. - if let Err(err) = crate::check_versions::(&rpc).await { + if let Err(err) = crate::check_versions::(&rpc, false).await { let _ = tx.send(err.into()); return; } @@ -314,9 +312,14 @@ macro_rules! monitor_cmd_for { ($runtime:tt) => { paste::paste! { } }; - let ensure_no_better_fut = tokio::spawn(async move { - ensure_no_better_solution::(&rpc1, latest_head, score, config.submission_strategy, - SignedMaxSubmissions::get()).await + let ensure_strategy_met_fut = tokio::spawn(async move { + ensure_strategy_met::( + &rpc1, + latest_head, + score, + config.submission_strategy, + SignedMaxSubmissions::get() + ).await }); let ensure_signed_phase_fut = tokio::spawn(async move { @@ -325,7 +328,7 @@ macro_rules! monitor_cmd_for { ($runtime:tt) => { paste::paste! { // Run the calls in parallel and return once all has completed or any failed. if let Err(err) = tokio::try_join!( - flatten(ensure_no_better_fut), + flatten(ensure_strategy_met_fut), flatten(ensure_signed_phase_fut), ) { log::debug!(target: LOG_TARGET, "Skipping to submit at block {}; {}", at.number, err); @@ -370,7 +373,7 @@ macro_rules! monitor_cmd_for { ($runtime:tt) => { paste::paste! { TransactionStatus::Ready | TransactionStatus::Broadcast(_) | TransactionStatus::Future => continue, - TransactionStatus::InBlock(hash) => { + TransactionStatus::InBlock((hash, _)) => { log::info!(target: LOG_TARGET, "included at {:?}", hash); let key = StorageKey( frame_support::storage::storage_prefix(b"System", b"Events").to_vec(), @@ -399,7 +402,7 @@ macro_rules! monitor_cmd_for { ($runtime:tt) => { paste::paste! { TransactionStatus::Retracted(hash) => { log::info!(target: LOG_TARGET, "Retracted at {:?}", hash); }, - TransactionStatus::Finalized(hash) => { + TransactionStatus::Finalized((hash, _)) => { log::info!(target: LOG_TARGET, "Finalized at {:?}", hash); break }, @@ -420,3 +423,46 @@ macro_rules! monitor_cmd_for { ($runtime:tt) => { paste::paste! { monitor_cmd_for!(polkadot); monitor_cmd_for!(kusama); monitor_cmd_for!(westend); + +#[cfg(test)] +pub mod tests { + use super::*; + + #[test] + fn score_passes_strategy_works() { + let s = |x| sp_npos_elections::ElectionScore { minimal_stake: x, ..Default::default() }; + let two = Perbill::from_percent(2); + + // anything passes Always + assert!(score_passes_strategy(s(0), s(0), SubmissionStrategy::Always)); + assert!(score_passes_strategy(s(5), s(0), SubmissionStrategy::Always)); + assert!(score_passes_strategy(s(5), s(10), SubmissionStrategy::Always)); + + // if leading + assert!(score_passes_strategy(s(0), s(0), SubmissionStrategy::IfLeading)); + assert!(score_passes_strategy(s(1), s(0), SubmissionStrategy::IfLeading)); + assert!(score_passes_strategy(s(2), s(0), SubmissionStrategy::IfLeading)); + assert!(!score_passes_strategy(s(5), s(10), SubmissionStrategy::IfLeading)); + assert!(!score_passes_strategy(s(9), s(10), SubmissionStrategy::IfLeading)); + assert!(score_passes_strategy(s(10), s(10), SubmissionStrategy::IfLeading)); + + // if better by 2% + assert!(!score_passes_strategy(s(50), s(100), SubmissionStrategy::ClaimBetterThan(two))); + assert!(!score_passes_strategy(s(100), s(100), SubmissionStrategy::ClaimBetterThan(two))); + assert!(!score_passes_strategy(s(101), s(100), SubmissionStrategy::ClaimBetterThan(two))); + assert!(!score_passes_strategy(s(102), s(100), SubmissionStrategy::ClaimBetterThan(two))); + assert!(score_passes_strategy(s(103), s(100), SubmissionStrategy::ClaimBetterThan(two))); + assert!(score_passes_strategy(s(150), s(100), SubmissionStrategy::ClaimBetterThan(two))); + + // if no less than 2% worse + assert!(!score_passes_strategy(s(50), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); + assert!(!score_passes_strategy(s(97), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); + assert!(score_passes_strategy(s(98), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); + assert!(score_passes_strategy(s(99), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); + assert!(score_passes_strategy(s(100), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); + assert!(score_passes_strategy(s(101), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); + assert!(score_passes_strategy(s(102), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); + assert!(score_passes_strategy(s(103), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); + assert!(score_passes_strategy(s(150), s(100), SubmissionStrategy::ClaimNoWorseThan(two))); + } +} diff --git a/utils/staking-miner/src/opts.rs b/utils/staking-miner/src/opts.rs index f43744ad45e8..d7690a21205b 100644 --- a/utils/staking-miner/src/opts.rs +++ b/utils/staking-miner/src/opts.rs @@ -21,21 +21,21 @@ use std::str::FromStr; #[derive(Debug, Clone, Parser)] #[cfg_attr(test, derive(PartialEq))] -#[clap(author, version, about)] +#[command(author, version, about)] pub(crate) struct Opt { /// The `ws` node to connect to. - #[clap(long, short, default_value = DEFAULT_URI, env = "URI", global = true)] + #[arg(long, short, default_value = DEFAULT_URI, env = "URI", global = true)] pub uri: String, /// WS connection timeout in number of seconds. - #[clap(long, parse(try_from_str), default_value_t = 60)] + #[arg(long, default_value_t = 60)] pub connection_timeout: usize, /// WS request timeout in number of seconds. - #[clap(long, parse(try_from_str), default_value_t = 60 * 10)] + #[arg(long, default_value_t = 60 * 10)] pub request_timeout: usize, - #[clap(subcommand)] + #[command(subcommand)] pub command: Command, } @@ -65,7 +65,7 @@ pub(crate) struct MonitorConfig { /// /// WARNING: Don't use an account with a large stash for this. Based on how the bot is /// configured, it might re-try and lose funds through transaction fees/deposits. - #[clap(long, short, env = "SEED")] + #[arg(long, short, env = "SEED")] pub seed_or_path: String, /// They type of event to listen to. @@ -73,11 +73,11 @@ pub(crate) struct MonitorConfig { /// Typically, finalized is safer and there is no chance of anything going wrong, but it can be /// slower. It is recommended to use finalized, if the duration of the signed phase is longer /// than the the finality delay. - #[clap(long, default_value = "head", possible_values = &["head", "finalized"])] + #[arg(long, default_value = "head", value_parser = ["head", "finalized"])] pub listen: String, /// The solver algorithm to use. - #[clap(subcommand)] + #[command(subcommand)] pub solver: Solver, /// Submission strategy to use. @@ -89,7 +89,9 @@ pub(crate) struct MonitorConfig { /// `--submission-strategy always`: always submit. /// /// `--submission-strategy "percent-better "`: submit if the submission is `n` percent better. - #[clap(long, parse(try_from_str), default_value = "if-leading")] + /// + /// `--submission-strategy "no-worse-than "`: submit if submission is no more than `n` percent worse. + #[clap(long, default_value = "if-leading")] pub submission_strategy: SubmissionStrategy, /// Delay in number seconds to wait until starting mining a solution. @@ -100,7 +102,7 @@ pub(crate) struct MonitorConfig { /// /// When this is enabled and there are competing solutions, your solution might not be submitted /// if the scores are equal. - #[clap(long, parse(try_from_str), default_value_t = 0)] + #[arg(long, default_value_t = 0)] pub delay: usize, } @@ -114,19 +116,19 @@ pub(crate) struct DryRunConfig { /// /// WARNING: Don't use an account with a large stash for this. Based on how the bot is /// configured, it might re-try and lose funds through transaction fees/deposits. - #[clap(long, short, env = "SEED")] + #[arg(long, short, env = "SEED")] pub seed_or_path: String, /// The block hash at which scraping happens. If none is provided, the latest head is used. - #[clap(long)] + #[arg(long)] pub at: Option, /// The solver algorithm to use. - #[clap(subcommand)] + #[command(subcommand)] pub solver: Solver, /// Force create a new snapshot, else expect one to exist onchain. - #[clap(long)] + #[arg(long)] pub force_snapshot: bool, } @@ -134,11 +136,11 @@ pub(crate) struct DryRunConfig { #[cfg_attr(test, derive(PartialEq))] pub(crate) struct EmergencySolutionConfig { /// The block hash at which scraping happens. If none is provided, the latest head is used. - #[clap(long)] + #[arg(long)] pub at: Option, /// The solver algorithm to use. - #[clap(subcommand)] + #[command(subcommand)] pub solver: Solver, /// The number of top backed winners to take. All are taken, if not provided. @@ -149,7 +151,7 @@ pub(crate) struct EmergencySolutionConfig { #[cfg_attr(test, derive(PartialEq))] pub(crate) struct InfoOpts { /// Serialize the output as json - #[clap(long, short)] + #[arg(long, short)] pub json: bool, } @@ -157,12 +159,14 @@ pub(crate) struct InfoOpts { #[derive(Debug, Copy, Clone)] #[cfg_attr(test, derive(PartialEq))] pub enum SubmissionStrategy { - // Only submit if at the time, we are the best. - IfLeading, - // Always submit. + /// Always submit. Always, - // Submit if we are leading, or if the solution that's leading is more that the given `Perbill` - // better than us. This helps detect obviously fake solutions and still combat them. + /// Only submit if at the time, we are the best (or equal to it). + IfLeading, + /// Submit if we are no worse than `Perbill` worse than the best. + ClaimNoWorseThan(Perbill), + /// Submit if we are leading, or if the solution that's leading is more that the given `Perbill` + /// better than us. This helps detect obviously fake solutions and still combat them. ClaimBetterThan(Perbill), } @@ -170,11 +174,11 @@ pub enum SubmissionStrategy { #[cfg_attr(test, derive(PartialEq))] pub(crate) enum Solver { SeqPhragmen { - #[clap(long, default_value = "10")] + #[arg(long, default_value_t = 10)] iterations: usize, }, PhragMMS { - #[clap(long, default_value = "10")] + #[arg(long, default_value_t = 10)] iterations: usize, }, } @@ -185,6 +189,7 @@ pub(crate) enum Solver { /// * --submission-strategy if-leading: only submit if leading /// * --submission-strategy always: always submit /// * --submission-strategy "percent-better ": submit if submission is `n` percent better. +/// * --submission-strategy "no-worse-than": submit if submission is no more than `n` percent worse. /// impl FromStr for SubmissionStrategy { type Err = String; @@ -196,8 +201,11 @@ impl FromStr for SubmissionStrategy { Self::IfLeading } else if s == "always" { Self::Always - } else if s.starts_with("percent-better ") { - let percent: u32 = s[15..].parse().map_err(|e| format!("{:?}", e))?; + } else if let Some(percent) = s.strip_prefix("no-worse-than ") { + let percent: u32 = percent.parse().map_err(|e| format!("{:?}", e))?; + Self::ClaimNoWorseThan(Perbill::from_percent(percent)) + } else if let Some(percent) = s.strip_prefix("percent-better ") { + let percent: u32 = percent.parse().map_err(|e| format!("{:?}", e))?; Self::ClaimBetterThan(Perbill::from_percent(percent)) } else { return Err(s.into()) diff --git a/xcm/xcm-executor/integration-tests/src/lib.rs b/xcm/xcm-executor/integration-tests/src/lib.rs index 272ceadfea01..821987531aa0 100644 --- a/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/xcm/xcm-executor/integration-tests/src/lib.rs @@ -23,7 +23,7 @@ use polkadot_test_client::{ }; use polkadot_test_runtime::pallet_test_notifier; use polkadot_test_service::construct_extrinsic; -use sp_runtime::{generic::BlockId, traits::Block}; +use sp_runtime::traits::Block; use sp_state_machine::InspectState; use xcm::{latest::prelude::*, VersionedResponse, VersionedXcm}; @@ -60,17 +60,14 @@ fn basic_buy_fees_message_executes() { futures::executor::block_on(client.import(sp_consensus::BlockOrigin::Own, block)) .expect("imports the block"); - client - .state_at(&BlockId::Hash(block_hash)) - .expect("state should exist") - .inspect_state(|| { - assert!(polkadot_test_runtime::System::events().iter().any(|r| matches!( - r.event, - polkadot_test_runtime::RuntimeEvent::Xcm(pallet_xcm::Event::Attempted( - Outcome::Complete(_) - )), - ))); - }); + client.state_at(&block_hash).expect("state should exist").inspect_state(|| { + assert!(polkadot_test_runtime::System::events().iter().any(|r| matches!( + r.event, + polkadot_test_runtime::RuntimeEvent::Xcm(pallet_xcm::Event::Attempted( + Outcome::Complete(_) + )), + ))); + }); } #[test] @@ -104,17 +101,14 @@ fn query_response_fires() { .expect("imports the block"); let mut query_id = None; - client - .state_at(&BlockId::Hash(block_hash)) - .expect("state should exist") - .inspect_state(|| { - for r in polkadot_test_runtime::System::events().iter() { - match r.event { - TestNotifier(QueryPrepared(q)) => query_id = Some(q), - _ => (), - } + client.state_at(&block_hash).expect("state should exist").inspect_state(|| { + for r in polkadot_test_runtime::System::events().iter() { + match r.event { + TestNotifier(QueryPrepared(q)) => query_id = Some(q), + _ => (), } - }); + } + }); let query_id = query_id.unwrap(); let mut block_builder = client.init_polkadot_block_builder(); @@ -142,25 +136,22 @@ fn query_response_fires() { futures::executor::block_on(client.import(sp_consensus::BlockOrigin::Own, block)) .expect("imports the block"); - client - .state_at(&BlockId::Hash(block_hash)) - .expect("state should exist") - .inspect_state(|| { - assert!(polkadot_test_runtime::System::events().iter().any(|r| matches!( - r.event, - polkadot_test_runtime::RuntimeEvent::Xcm(pallet_xcm::Event::ResponseReady( - q, - Response::ExecutionResult(None), - )) if q == query_id, - ))); - assert_eq!( - polkadot_test_runtime::Xcm::query(query_id), - Some(QueryStatus::Ready { - response: VersionedResponse::V2(Response::ExecutionResult(None)), - at: 2u32.into() - }), - ) - }); + client.state_at(&block_hash).expect("state should exist").inspect_state(|| { + assert!(polkadot_test_runtime::System::events().iter().any(|r| matches!( + r.event, + polkadot_test_runtime::RuntimeEvent::Xcm(pallet_xcm::Event::ResponseReady( + q, + Response::ExecutionResult(None), + )) if q == query_id, + ))); + assert_eq!( + polkadot_test_runtime::Xcm::query(query_id), + Some(QueryStatus::Ready { + response: VersionedResponse::V2(Response::ExecutionResult(None)), + at: 2u32.into() + }), + ) + }); } #[test] @@ -193,17 +184,14 @@ fn query_response_elicits_handler() { .expect("imports the block"); let mut query_id = None; - client - .state_at(&BlockId::Hash(block_hash)) - .expect("state should exist") - .inspect_state(|| { - for r in polkadot_test_runtime::System::events().iter() { - match r.event { - TestNotifier(NotifyQueryPrepared(q)) => query_id = Some(q), - _ => (), - } + client.state_at(&block_hash).expect("state should exist").inspect_state(|| { + for r in polkadot_test_runtime::System::events().iter() { + match r.event { + TestNotifier(NotifyQueryPrepared(q)) => query_id = Some(q), + _ => (), } - }); + } + }); let query_id = query_id.unwrap(); let mut block_builder = client.init_polkadot_block_builder(); @@ -230,17 +218,14 @@ fn query_response_elicits_handler() { futures::executor::block_on(client.import(sp_consensus::BlockOrigin::Own, block)) .expect("imports the block"); - client - .state_at(&BlockId::Hash(block_hash)) - .expect("state should exist") - .inspect_state(|| { - assert!(polkadot_test_runtime::System::events().iter().any(|r| matches!( - r.event, - TestNotifier(ResponseReceived( - MultiLocation { parents: 0, interior: X1(Junction::AccountId32 { .. }) }, - q, - Response::ExecutionResult(None), - )) if q == query_id, - ))); - }); + client.state_at(&block_hash).expect("state should exist").inspect_state(|| { + assert!(polkadot_test_runtime::System::events().iter().any(|r| matches!( + r.event, + TestNotifier(ResponseReceived( + MultiLocation { parents: 0, interior: X1(Junction::AccountId32 { .. }) }, + q, + Response::ExecutionResult(None), + )) if q == query_id, + ))); + }); } diff --git a/xcm/xcm-simulator/fuzzer/Cargo.toml b/xcm/xcm-simulator/fuzzer/Cargo.toml index e90ed3e913e3..5a24c34f2b30 100644 --- a/xcm/xcm-simulator/fuzzer/Cargo.toml +++ b/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -7,7 +7,7 @@ edition.workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -honggfuzz = "0.5.54" +honggfuzz = "0.5.55" scale-info = { version = "2.1.2", features = ["derive"] } frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/xcm/xcm-simulator/fuzzer/src/fuzz.rs b/xcm/xcm-simulator/fuzzer/src/fuzz.rs index 691fba10cc10..70ad3c1e2cc6 100644 --- a/xcm/xcm-simulator/fuzzer/src/fuzz.rs +++ b/xcm/xcm-simulator/fuzzer/src/fuzz.rs @@ -122,7 +122,7 @@ fn main() { #[cfg(fuzzing)] { loop { - fuzz!(|data: &[u8]| { + honggfuzz::fuzz!(|data: &[u8]| { run_one_input(data); }); } diff --git a/zombienet_tests/functional/0001-parachains-pvf.zndsl b/zombienet_tests/functional/0001-parachains-pvf.zndsl index 07bc356464fe..1f187498d78f 100644 --- a/zombienet_tests/functional/0001-parachains-pvf.zndsl +++ b/zombienet_tests/functional/0001-parachains-pvf.zndsl @@ -64,14 +64,14 @@ one: reports histogram polkadot_pvf_preparation_time has at least 1 samples in b two: reports histogram polkadot_pvf_preparation_time has at least 1 samples in buckets ["0.1", "0.5", "1", "2", "3", "10"] within 10 seconds # Check all buckets >= 20. -alice: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "+Inf"] within 10 seconds -bob: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "+Inf"] within 10 seconds -charlie: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "+Inf"] within 10 seconds -dave: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "+Inf"] within 10 seconds -ferdie: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "+Inf"] within 10 seconds -eve: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "+Inf"] within 10 seconds -one: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "+Inf"] within 10 seconds -two: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "+Inf"] within 10 seconds +alice: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "180", "+Inf"] within 10 seconds +bob: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "180", "+Inf"] within 10 seconds +charlie: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "180", "+Inf"] within 10 seconds +dave: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "180", "+Inf"] within 10 seconds +ferdie: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "180", "+Inf"] within 10 seconds +eve: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "180", "+Inf"] within 10 seconds +one: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "180", "+Inf"] within 10 seconds +two: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "180", "+Inf"] within 10 seconds # Check execution time. # There are two different timeout conditions: BACKING_EXECUTION_TIMEOUT(2s) and diff --git a/zombienet_tests/misc/0001-paritydb.zndsl b/zombienet_tests/misc/0001-paritydb.zndsl index 2ed448321f78..7340fffb477e 100644 --- a/zombienet_tests/misc/0001-paritydb.zndsl +++ b/zombienet_tests/misc/0001-paritydb.zndsl @@ -30,7 +30,7 @@ validator-8: reports node_roles is 4 validator-9: reports node_roles is 4 # Ensure parachains are registered. -validator-0: parachain 2000 is registered +validator-0: parachain 2000 is registered within 20 seconds validator-0: parachain 2001 is registered validator-0: parachain 2002 is registered validator-0: parachain 2003 is registered