diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 000000000000..66b28b3485d8 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,32 @@ +# +# An auto defined `clippy` feature was introduced, +# but it was found to clash with user defined features, +# so was renamed to `cargo-clippy`. +# +# If you want standard clippy run: +# RUSTFLAGS= cargo clippy +[target.'cfg(feature = "cargo-clippy")'] +rustflags = [ + "-Aclippy::all", + "-Dclippy::correctness", + "-Aclippy::if-same-then-else", + "-Aclippy::clone-double-ref", + "-Dclippy::complexity", + "-Aclippy::zero-prefixed-literal", # 00_1000_000 + "-Aclippy::type_complexity", # raison d'etre + "-Aclippy::nonminimal-bool", # maybe + "-Aclippy::borrowed-box", # Reasonable to fix this one + "-Aclippy::too-many-arguments", # (Turning this on would lead to) + "-Aclippy::unnecessary_cast", # Types may change + "-Aclippy::identity-op", # One case where we do 0 + + "-Aclippy::useless_conversion", # Types may change + "-Aclippy::unit_arg", # styalistic. + "-Aclippy::option-map-unit-fn", # styalistic + "-Aclippy::bind_instead_of_map", # styalistic + "-Aclippy::erasing_op", # E.g. 0 * DOLLARS + "-Aclippy::eq_op", # In tests we test equality. + "-Aclippy::while_immutable_condition", # false positives + "-Aclippy::needless_option_as_deref", # false positives + "-Aclippy::derivable_impls", # false positives + "-Aclippy::stable_sort_primitive", # prefer stable sort +] diff --git a/.gitignore b/.gitignore index 5ea0458ddfc8..f9ab33eb63f3 100644 --- a/.gitignore +++ b/.gitignore @@ -10,5 +10,4 @@ polkadot.* !polkadot.service !.rpm/* .DS_Store -.cargo .env diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a8f70394cd5c..57a76ad2c857 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -33,6 +33,7 @@ variables: GIT_DEPTH: 100 CI_SERVER_NAME: "GitLab CI" CI_IMAGE: "paritytech/ci-linux:production" + BUILDAH_IMAGE: "quay.io/buildah/stable:v1.27" DOCKER_OS: "debian:stretch" ARCH: "x86_64" ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.2.78" @@ -40,6 +41,13 @@ variables: default: cache: {} + retry: + max: 2 + when: + - runner_system_failure + - unknown_failure + - api_failure + interruptible: true .collect-artifacts: artifacts: @@ -72,25 +80,12 @@ default: dotenv: pipeline-stopper.env .kubernetes-env: - retry: - max: 2 - when: - - runner_system_failure - - unknown_failure - - api_failure - interruptible: true + image: "${CI_IMAGE}" tags: - kubernetes-parity-build .docker-env: image: "${CI_IMAGE}" - retry: - max: 2 - when: - - runner_system_failure - - unknown_failure - - api_failure - interruptible: true tags: - linux-docker @@ -150,9 +145,6 @@ default: - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 .build-push-image: - extends: - - .kubernetes-env - image: quay.io/buildah/stable:v1.27 before_script: - test -s ./artifacts/VERSION || exit 1 - test -s ./artifacts/EXTRATAG || exit 1 @@ -196,8 +188,6 @@ include: # zombienet jobs - scripts/ci/gitlab/pipeline/zombienet.yml - - #### stage: .post deploy-parity-testnet: diff --git a/Cargo.lock b/Cargo.lock index f8c46862ef44..20321502cf97 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -426,7 +426,7 @@ dependencies = [ [[package]] name = "beefy-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "array-bytes", "async-trait", @@ -463,7 +463,7 @@ dependencies = [ [[package]] name = "beefy-gadget-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "beefy-gadget", "beefy-primitives", @@ -483,7 +483,7 @@ dependencies = [ [[package]] name = "beefy-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "beefy-primitives", "sp-api", @@ -493,7 +493,7 @@ dependencies = [ [[package]] name = "beefy-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "parity-scale-codec", "scale-info", @@ -1027,6 +1027,16 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "cpu-time" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e393a7668fe1fad3075085b86c781883000b4ede868f43627b34a87c8b7ded" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "cpufeatures" version = "0.2.1" @@ -2013,7 +2023,7 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "parity-scale-codec", ] @@ -2037,7 +2047,7 @@ checksum = "85dcb89d2b10c5f6133de2efd8c11959ce9dbb46a2f7a4cab208c4eeda6ce1ab" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-support", "frame-system", @@ -2060,7 +2070,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "Inflector", "array-bytes", @@ -2112,7 +2122,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2123,7 +2133,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2139,7 +2149,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-support", "frame-system", @@ -2168,7 +2178,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "bitflags", "frame-metadata", @@ -2200,7 +2210,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "Inflector", "cfg-expr", @@ -2214,7 +2224,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2226,7 +2236,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "proc-macro2", "quote", @@ -2236,7 +2246,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-support", "frame-support-test-pallet", @@ -2259,7 +2269,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-support", "frame-system", @@ -2270,7 +2280,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-support", "log", @@ -2288,7 +2298,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -2303,7 +2313,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "parity-scale-codec", "sp-api", @@ -2312,7 +2322,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-support", "parity-scale-codec", @@ -2483,7 +2493,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "chrono", "frame-election-provider-support", @@ -3209,7 +3219,7 @@ checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" [[package]] name = "kusama-runtime" -version = "0.9.31" +version = "0.9.33" dependencies = [ "beefy-primitives", "bitvec", @@ -3313,7 +3323,7 @@ dependencies = [ [[package]] name = "kusama-runtime-constants" -version = "0.9.31" +version = "0.9.33" dependencies = [ "frame-support", "polkadot-primitives", @@ -4027,15 +4037,6 @@ dependencies = [ "parity-util-mem", ] -[[package]] -name = "memory-lru" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce95ae042940bad7e312857b929ee3d11b8f799a80cb7b9c7ec5125516906395" -dependencies = [ - "lru", -] - [[package]] name = "memory_units" version = "0.4.0" @@ -4110,6 +4111,42 @@ dependencies = [ "winapi", ] +[[package]] +name = "mmr-gadget" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" +dependencies = [ + "beefy-primitives", + "futures", + "log", + "parity-scale-codec", + "sc-client-api", + "sc-offchain", + "sp-api", + "sp-blockchain", + "sp-consensus", + "sp-core", + "sp-io", + "sp-mmr-primitives", + "sp-runtime", +] + +[[package]] +name = "mmr-rpc" +version = "4.0.0-dev" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" +dependencies = [ + "anyhow", + "jsonrpsee", + "parity-scale-codec", + "serde", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-mmr-primitives", + "sp-runtime", +] + [[package]] name = "mockall" version = "0.11.2" @@ -4611,7 +4648,7 @@ checksum = "20448fd678ec04e6ea15bbe0476874af65e98a01515d667aa49f1434dc44ebf4" [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -4625,7 +4662,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-support", "frame-system", @@ -4641,7 +4678,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-support", "frame-system", @@ -4656,7 +4693,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -4680,7 +4717,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4700,7 +4737,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-election-provider-support", "frame-support", @@ -4719,7 +4756,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -4734,7 +4771,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "beefy-primitives", "frame-support", @@ -4750,7 +4787,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "array-bytes", "beefy-merkle-tree", @@ -4773,7 +4810,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -4791,7 +4828,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -4810,7 +4847,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -4827,7 +4864,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "assert_matches", "frame-benchmarking", @@ -4844,7 +4881,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -4862,7 +4899,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4886,7 +4923,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4899,7 +4936,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -4917,7 +4954,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4935,7 +4972,7 @@ dependencies = [ [[package]] name = "pallet-gilt" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -4950,7 +4987,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -4973,7 +5010,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "enumflags2", "frame-benchmarking", @@ -4989,7 +5026,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5009,7 +5046,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5026,7 +5063,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5043,9 +5080,8 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ - "ckb-merkle-mountain-range", "frame-benchmarking", "frame-support", "frame-system", @@ -5058,26 +5094,10 @@ dependencies = [ "sp-std", ] -[[package]] -name = "pallet-mmr-rpc" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" -dependencies = [ - "anyhow", - "jsonrpsee", - "parity-scale-codec", - "serde", - "sp-api", - "sp-blockchain", - "sp-core", - "sp-mmr-primitives", - "sp-runtime", -] - [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5093,7 +5113,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-support", "frame-system", @@ -5110,7 +5130,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5130,7 +5150,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "parity-scale-codec", "sp-api", @@ -5140,7 +5160,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-support", "frame-system", @@ -5157,7 +5177,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5180,7 +5200,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5197,7 +5217,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5212,7 +5232,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5230,7 +5250,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5245,7 +5265,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5263,7 +5283,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5279,7 +5299,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-support", "frame-system", @@ -5300,7 +5320,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5316,7 +5336,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-support", "frame-system", @@ -5330,7 +5350,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5353,7 +5373,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5364,7 +5384,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "log", "sp-arithmetic", @@ -5373,7 +5393,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5390,7 +5410,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-support", "frame-system", @@ -5404,7 +5424,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5422,7 +5442,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5441,7 +5461,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-support", "frame-system", @@ -5457,7 +5477,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -5473,7 +5493,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -5485,7 +5505,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5502,7 +5522,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5518,7 +5538,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5533,7 +5553,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-benchmarking", "frame-support", @@ -5547,7 +5567,7 @@ dependencies = [ [[package]] name = "pallet-xcm" -version = "0.9.31" +version = "0.9.33" dependencies = [ "frame-support", "frame-system", @@ -5569,7 +5589,7 @@ dependencies = [ [[package]] name = "pallet-xcm-benchmarks" -version = "0.9.31" +version = "0.9.33" dependencies = [ "frame-benchmarking", "frame-support", @@ -5884,7 +5904,7 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "polkadot" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_cmd", "color-eyre", @@ -5899,7 +5919,7 @@ dependencies = [ [[package]] name = "polkadot-approval-distribution" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "env_logger 0.9.0", @@ -5923,7 +5943,7 @@ dependencies = [ [[package]] name = "polkadot-availability-bitfield-distribution" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "bitvec", @@ -5948,7 +5968,7 @@ dependencies = [ [[package]] name = "polkadot-availability-distribution" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "derive_more", @@ -5977,7 +5997,7 @@ dependencies = [ [[package]] name = "polkadot-availability-recovery" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "env_logger 0.9.0", @@ -6006,7 +6026,7 @@ dependencies = [ [[package]] name = "polkadot-cli" -version = "0.9.31" +version = "0.9.33" dependencies = [ "clap", "frame-benchmarking-cli", @@ -6032,7 +6052,7 @@ dependencies = [ [[package]] name = "polkadot-client" -version = "0.9.31" +version = "0.9.33" dependencies = [ "beefy-primitives", "frame-benchmarking", @@ -6074,7 +6094,7 @@ dependencies = [ [[package]] name = "polkadot-collator-protocol" -version = "0.9.31" +version = "0.9.33" dependencies = [ "always-assert", "assert_matches", @@ -6103,7 +6123,7 @@ dependencies = [ [[package]] name = "polkadot-core-primitives" -version = "0.9.31" +version = "0.9.33" dependencies = [ "parity-scale-codec", "parity-util-mem", @@ -6115,7 +6135,7 @@ dependencies = [ [[package]] name = "polkadot-dispute-distribution" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "async-trait", @@ -6147,7 +6167,7 @@ dependencies = [ [[package]] name = "polkadot-erasure-coding" -version = "0.9.31" +version = "0.9.33" dependencies = [ "parity-scale-codec", "polkadot-node-primitives", @@ -6160,7 +6180,7 @@ dependencies = [ [[package]] name = "polkadot-gossip-support" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "async-trait", @@ -6187,7 +6207,7 @@ dependencies = [ [[package]] name = "polkadot-network-bridge" -version = "0.9.31" +version = "0.9.33" dependencies = [ "always-assert", "assert_matches", @@ -6216,7 +6236,7 @@ dependencies = [ [[package]] name = "polkadot-node-collation-generation" -version = "0.9.31" +version = "0.9.33" dependencies = [ "futures", "parity-scale-codec", @@ -6235,7 +6255,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-approval-voting" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "async-trait", @@ -6274,7 +6294,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-av-store" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "bitvec", @@ -6302,7 +6322,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-backing" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "bitvec", @@ -6328,7 +6348,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-bitfield-signing" -version = "0.9.31" +version = "0.9.33" dependencies = [ "futures", "polkadot-node-subsystem", @@ -6344,7 +6364,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-candidate-validation" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "async-trait", @@ -6367,7 +6387,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-chain-api" -version = "0.9.31" +version = "0.9.33" dependencies = [ "futures", "maplit", @@ -6386,7 +6406,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-chain-selection" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "futures", @@ -6407,7 +6427,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-dispute-coordinator" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "fatality", @@ -6434,7 +6454,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-parachains-inherent" -version = "0.9.31" +version = "0.9.33" dependencies = [ "async-trait", "futures", @@ -6450,7 +6470,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-provisioner" -version = "0.9.31" +version = "0.9.33" dependencies = [ "bitvec", "fatality", @@ -6471,12 +6491,13 @@ dependencies = [ [[package]] name = "polkadot-node-core-pvf" -version = "0.9.31" +version = "0.9.33" dependencies = [ "always-assert", "assert_matches", "async-process", "async-std", + "cpu-time", "futures", "futures-timer", "hex-literal", @@ -6505,7 +6526,7 @@ dependencies = [ [[package]] name = "polkadot-node-core-pvf-checker" -version = "0.9.31" +version = "0.9.33" dependencies = [ "futures", "futures-timer", @@ -6528,11 +6549,10 @@ dependencies = [ [[package]] name = "polkadot-node-core-runtime-api" -version = "0.9.31" +version = "0.9.33" dependencies = [ "futures", - "memory-lru", - "parity-util-mem", + "lru", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -6550,7 +6570,7 @@ dependencies = [ [[package]] name = "polkadot-node-jaeger" -version = "0.9.31" +version = "0.9.33" dependencies = [ "async-std", "lazy_static", @@ -6567,7 +6587,7 @@ dependencies = [ [[package]] name = "polkadot-node-metrics" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_cmd", "bs58", @@ -6595,7 +6615,7 @@ dependencies = [ [[package]] name = "polkadot-node-network-protocol" -version = "0.9.31" +version = "0.9.33" dependencies = [ "async-trait", "derive_more", @@ -6618,7 +6638,7 @@ dependencies = [ [[package]] name = "polkadot-node-primitives" -version = "0.9.31" +version = "0.9.33" dependencies = [ "bounded-vec", "futures", @@ -6640,7 +6660,7 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem" -version = "0.9.31" +version = "0.9.33" dependencies = [ "polkadot-node-jaeger", "polkadot-node-subsystem-types", @@ -6649,7 +6669,7 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem-test-helpers" -version = "0.9.31" +version = "0.9.33" dependencies = [ "async-trait", "futures", @@ -6667,7 +6687,7 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem-types" -version = "0.9.31" +version = "0.9.33" dependencies = [ "async-trait", "derive_more", @@ -6689,7 +6709,7 @@ dependencies = [ [[package]] name = "polkadot-node-subsystem-util" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "async-trait", @@ -6730,7 +6750,7 @@ dependencies = [ [[package]] name = "polkadot-overseer" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "async-trait", @@ -6756,7 +6776,7 @@ dependencies = [ [[package]] name = "polkadot-parachain" -version = "0.9.31" +version = "0.9.33" dependencies = [ "derive_more", "frame-support", @@ -6772,7 +6792,7 @@ dependencies = [ [[package]] name = "polkadot-performance-test" -version = "0.9.31" +version = "0.9.33" dependencies = [ "env_logger 0.9.0", "kusama-runtime", @@ -6786,7 +6806,7 @@ dependencies = [ [[package]] name = "polkadot-primitives" -version = "0.9.31" +version = "0.9.33" dependencies = [ "bitvec", "hex-literal", @@ -6812,7 +6832,7 @@ dependencies = [ [[package]] name = "polkadot-primitives-test-helpers" -version = "0.9.31" +version = "0.9.33" dependencies = [ "polkadot-primitives", "rand 0.8.5", @@ -6824,12 +6844,12 @@ dependencies = [ [[package]] name = "polkadot-rpc" -version = "0.9.31" +version = "0.9.33" dependencies = [ "beefy-gadget", "beefy-gadget-rpc", "jsonrpsee", - "pallet-mmr-rpc", + "mmr-rpc", "pallet-transaction-payment-rpc", "polkadot-primitives", "sc-chain-spec", @@ -6855,7 +6875,7 @@ dependencies = [ [[package]] name = "polkadot-runtime" -version = "0.9.31" +version = "0.9.33" dependencies = [ "beefy-primitives", "bitvec", @@ -6951,7 +6971,7 @@ dependencies = [ [[package]] name = "polkadot-runtime-common" -version = "0.9.31" +version = "0.9.33" dependencies = [ "beefy-primitives", "bitvec", @@ -7003,7 +7023,7 @@ dependencies = [ [[package]] name = "polkadot-runtime-constants" -version = "0.9.31" +version = "0.9.33" dependencies = [ "frame-support", "polkadot-primitives", @@ -7014,7 +7034,7 @@ dependencies = [ [[package]] name = "polkadot-runtime-metrics" -version = "0.9.31" +version = "0.9.33" dependencies = [ "bs58", "parity-scale-codec", @@ -7025,7 +7045,7 @@ dependencies = [ [[package]] name = "polkadot-runtime-parachains" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "bitflags", @@ -7077,7 +7097,7 @@ dependencies = [ [[package]] name = "polkadot-service" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "async-trait", @@ -7094,6 +7114,7 @@ dependencies = [ "kvdb-rocksdb", "log", "lru", + "mmr-gadget", "pallet-babe", "pallet-im-online", "pallet-staking", @@ -7171,6 +7192,7 @@ dependencies = [ "sp-inherents", "sp-io", "sp-keystore", + "sp-mmr-primitives", "sp-offchain", "sp-runtime", "sp-session", @@ -7189,7 +7211,7 @@ dependencies = [ [[package]] name = "polkadot-statement-distribution" -version = "0.9.31" +version = "0.9.33" dependencies = [ "arrayvec 0.5.2", "assert_matches", @@ -7220,7 +7242,7 @@ dependencies = [ [[package]] name = "polkadot-statement-table" -version = "0.9.31" +version = "0.9.33" dependencies = [ "parity-scale-codec", "polkadot-primitives", @@ -7229,7 +7251,7 @@ dependencies = [ [[package]] name = "polkadot-test-client" -version = "0.9.31" +version = "0.9.33" dependencies = [ "futures", "parity-scale-codec", @@ -7255,7 +7277,7 @@ dependencies = [ [[package]] name = "polkadot-test-malus" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "async-trait", @@ -7284,7 +7306,7 @@ dependencies = [ [[package]] name = "polkadot-test-runtime" -version = "0.9.31" +version = "0.9.33" dependencies = [ "beefy-primitives", "bitvec", @@ -7349,7 +7371,7 @@ dependencies = [ [[package]] name = "polkadot-test-service" -version = "0.9.31" +version = "0.9.33" dependencies = [ "frame-benchmarking", "frame-system", @@ -7404,7 +7426,7 @@ dependencies = [ [[package]] name = "polkadot-voter-bags" -version = "0.9.31" +version = "0.9.33" dependencies = [ "clap", "generate-bags", @@ -7987,7 +8009,7 @@ checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" [[package]] name = "remote-ext-tests-bags-list" -version = "0.9.31" +version = "0.9.33" dependencies = [ "clap", "frame-system", @@ -8007,7 +8029,7 @@ dependencies = [ [[package]] name = "remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "env_logger 0.9.0", "log", @@ -8115,7 +8137,7 @@ dependencies = [ [[package]] name = "rococo-runtime" -version = "0.9.31" +version = "0.9.33" dependencies = [ "beefy-merkle-tree", "beefy-primitives", @@ -8204,7 +8226,7 @@ dependencies = [ [[package]] name = "rococo-runtime-constants" -version = "0.9.31" +version = "0.9.33" dependencies = [ "frame-support", "polkadot-primitives", @@ -8347,7 +8369,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "log", "sp-core", @@ -8358,7 +8380,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "futures", @@ -8385,7 +8407,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "futures", "futures-timer", @@ -8408,7 +8430,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -8424,7 +8446,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "impl-trait-for-tuples", "memmap2", @@ -8441,7 +8463,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -8452,7 +8474,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "array-bytes", "chrono", @@ -8492,7 +8514,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "fnv", "futures", @@ -8520,7 +8542,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "hash-db", "kvdb", @@ -8545,7 +8567,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "futures", @@ -8569,7 +8591,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "fork-tree", @@ -8610,7 +8632,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "futures", "jsonrpsee", @@ -8632,7 +8654,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "fork-tree", "parity-scale-codec", @@ -8645,7 +8667,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "futures", @@ -8669,7 +8691,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "lazy_static", "lru", @@ -8695,7 +8717,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "environmental", "parity-scale-codec", @@ -8711,7 +8733,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "log", "parity-scale-codec", @@ -8726,7 +8748,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "cfg-if", "libc", @@ -8746,7 +8768,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "ahash", "array-bytes", @@ -8787,7 +8809,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "finality-grandpa", "futures", @@ -8808,7 +8830,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "ansi_term", "futures", @@ -8825,7 +8847,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "array-bytes", "async-trait", @@ -8840,7 +8862,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "array-bytes", "async-trait", @@ -8887,7 +8909,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "cid", "futures", @@ -8907,7 +8929,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "bitflags", @@ -8933,7 +8955,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "ahash", "futures", @@ -8951,7 +8973,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "array-bytes", "futures", @@ -8972,7 +8994,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "array-bytes", "async-trait", @@ -9003,7 +9025,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "array-bytes", "futures", @@ -9022,7 +9044,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "array-bytes", "bytes", @@ -9052,7 +9074,7 @@ dependencies = [ [[package]] name = "sc-peerset" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "futures", "libp2p", @@ -9065,7 +9087,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -9074,7 +9096,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "futures", "hash-db", @@ -9104,7 +9126,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "futures", "jsonrpsee", @@ -9127,7 +9149,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "futures", "jsonrpsee", @@ -9140,7 +9162,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "futures", "hex", @@ -9159,7 +9181,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "directories", @@ -9230,7 +9252,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "log", "parity-scale-codec", @@ -9244,7 +9266,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -9263,7 +9285,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "futures", "libc", @@ -9282,7 +9304,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "chrono", "futures", @@ -9300,7 +9322,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "ansi_term", "atty", @@ -9331,7 +9353,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9342,7 +9364,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "futures", @@ -9369,7 +9391,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "futures", @@ -9383,7 +9405,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "futures", "futures-timer", @@ -9788,7 +9810,7 @@ checksum = "03b634d87b960ab1a38c4fe143b508576f075e7c978bfad18217645ebfdfa2ec" [[package]] name = "slot-range-helper" -version = "0.9.31" +version = "0.9.33" dependencies = [ "enumn", "parity-scale-codec", @@ -9864,7 +9886,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "hash-db", "log", @@ -9882,7 +9904,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "blake2", "proc-macro-crate", @@ -9894,7 +9916,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "parity-scale-codec", "scale-info", @@ -9907,7 +9929,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "integer-sqrt", "num-traits", @@ -9922,7 +9944,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "parity-scale-codec", "scale-info", @@ -9935,7 +9957,7 @@ dependencies = [ [[package]] name = "sp-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "parity-scale-codec", @@ -9947,7 +9969,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "parity-scale-codec", "sp-api", @@ -9959,7 +9981,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "futures", "log", @@ -9977,7 +9999,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "futures", @@ -9996,7 +10018,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "merlin", @@ -10019,7 +10041,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "parity-scale-codec", "scale-info", @@ -10033,7 +10055,7 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "parity-scale-codec", "scale-info", @@ -10046,7 +10068,7 @@ dependencies = [ [[package]] name = "sp-core" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "array-bytes", "base58", @@ -10091,7 +10113,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "blake2", "byteorder", @@ -10105,7 +10127,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "proc-macro2", "quote", @@ -10116,7 +10138,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -10125,7 +10147,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "proc-macro2", "quote", @@ -10135,7 +10157,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.13.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "environmental", "parity-scale-codec", @@ -10146,7 +10168,7 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "finality-grandpa", "log", @@ -10164,7 +10186,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -10178,9 +10200,10 @@ dependencies = [ [[package]] name = "sp-io" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "bytes", + "ed25519-dalek", "futures", "hash-db", "libsecp256k1", @@ -10204,7 +10227,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "lazy_static", "sp-core", @@ -10215,7 +10238,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.13.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "futures", @@ -10232,7 +10255,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "thiserror", "zstd", @@ -10241,8 +10264,9 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ + "ckb-merkle-mountain-range", "log", "parity-scale-codec", "scale-info", @@ -10258,7 +10282,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "parity-scale-codec", "scale-info", @@ -10272,7 +10296,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "sp-api", "sp-core", @@ -10282,7 +10306,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "backtrace", "lazy_static", @@ -10292,7 +10316,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "rustc-hash", "serde", @@ -10302,7 +10326,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "either", "hash256-std-hasher", @@ -10325,7 +10349,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -10343,7 +10367,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "Inflector", "proc-macro-crate", @@ -10355,7 +10379,7 @@ dependencies = [ [[package]] name = "sp-sandbox" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "log", "parity-scale-codec", @@ -10369,7 +10393,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "parity-scale-codec", "scale-info", @@ -10383,7 +10407,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "parity-scale-codec", "scale-info", @@ -10394,7 +10418,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.13.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "hash-db", "log", @@ -10416,12 +10440,12 @@ dependencies = [ [[package]] name = "sp-std" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" [[package]] name = "sp-storage" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "impl-serde", "parity-scale-codec", @@ -10434,7 +10458,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "futures-timer", @@ -10450,7 +10474,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "parity-scale-codec", "sp-std", @@ -10462,7 +10486,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "sp-api", "sp-runtime", @@ -10471,7 +10495,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "log", @@ -10487,7 +10511,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "ahash", "hash-db", @@ -10510,7 +10534,7 @@ dependencies = [ [[package]] name = "sp-version" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "impl-serde", "parity-scale-codec", @@ -10527,7 +10551,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -10538,7 +10562,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "impl-trait-for-tuples", "log", @@ -10551,7 +10575,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -10603,7 +10627,7 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "staking-miner" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_cmd", "clap", @@ -10766,7 +10790,7 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "platforms", ] @@ -10774,7 +10798,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -10795,7 +10819,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "futures-util", "hyper", @@ -10808,7 +10832,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "async-trait", "jsonrpsee", @@ -10821,7 +10845,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "jsonrpsee", "log", @@ -10842,7 +10866,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "array-bytes", "async-trait", @@ -10868,7 +10892,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "futures", "substrate-test-utils-derive", @@ -10878,7 +10902,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10889,7 +10913,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "ansi_term", "build-helper", @@ -11028,7 +11052,7 @@ checksum = "13a4ec180a2de59b57434704ccfad967f789b12737738798fa08798cd5824c16" [[package]] name = "test-parachain-adder" -version = "0.9.31" +version = "0.9.33" dependencies = [ "dlmalloc", "parity-scale-codec", @@ -11041,7 +11065,7 @@ dependencies = [ [[package]] name = "test-parachain-adder-collator" -version = "0.9.31" +version = "0.9.33" dependencies = [ "clap", "futures", @@ -11067,14 +11091,14 @@ dependencies = [ [[package]] name = "test-parachain-halt" -version = "0.9.31" +version = "0.9.33" dependencies = [ "substrate-wasm-builder", ] [[package]] name = "test-parachain-undying" -version = "0.9.31" +version = "0.9.33" dependencies = [ "dlmalloc", "log", @@ -11088,7 +11112,7 @@ dependencies = [ [[package]] name = "test-parachain-undying-collator" -version = "0.9.31" +version = "0.9.33" dependencies = [ "clap", "futures", @@ -11114,7 +11138,7 @@ dependencies = [ [[package]] name = "test-parachains" -version = "0.9.31" +version = "0.9.33" dependencies = [ "parity-scale-codec", "sp-core", @@ -11125,7 +11149,7 @@ dependencies = [ [[package]] name = "test-runtime-constants" -version = "0.9.31" +version = "0.9.33" dependencies = [ "frame-support", "polkadot-primitives", @@ -11440,7 +11464,7 @@ dependencies = [ [[package]] name = "tracing-gum" -version = "0.9.31" +version = "0.9.33" dependencies = [ "polkadot-node-jaeger", "polkadot-primitives", @@ -11450,7 +11474,7 @@ dependencies = [ [[package]] name = "tracing-gum-proc-macro" -version = "0.9.31" +version = "0.9.33" dependencies = [ "assert_matches", "expander 0.0.6", @@ -11596,7 +11620,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#6cb4b6799de6f784f4c42eb01a76a8fa67039a67" +source = "git+https://github.com/paritytech/substrate?branch=master#5c8aa7eebaceaf37c8aaa58c980c1f445fbb1ebf" dependencies = [ "clap", "frame-try-runtime", @@ -12268,7 +12292,7 @@ dependencies = [ [[package]] name = "westend-runtime" -version = "0.9.31" +version = "0.9.33" dependencies = [ "beefy-primitives", "bitvec", @@ -12364,7 +12388,7 @@ dependencies = [ [[package]] name = "westend-runtime-constants" -version = "0.9.31" +version = "0.9.33" dependencies = [ "frame-support", "polkadot-primitives", @@ -12590,7 +12614,7 @@ dependencies = [ [[package]] name = "xcm" -version = "0.9.31" +version = "0.9.33" dependencies = [ "derivative", "impl-trait-for-tuples", @@ -12603,7 +12627,7 @@ dependencies = [ [[package]] name = "xcm-builder" -version = "0.9.31" +version = "0.9.33" dependencies = [ "frame-support", "frame-system", @@ -12626,7 +12650,7 @@ dependencies = [ [[package]] name = "xcm-executor" -version = "0.9.31" +version = "0.9.33" dependencies = [ "frame-benchmarking", "frame-support", @@ -12643,7 +12667,7 @@ dependencies = [ [[package]] name = "xcm-executor-integration-tests" -version = "0.9.31" +version = "0.9.33" dependencies = [ "frame-support", "frame-system", @@ -12663,7 +12687,7 @@ dependencies = [ [[package]] name = "xcm-procedural" -version = "0.9.31" +version = "0.9.33" dependencies = [ "Inflector", "proc-macro2", @@ -12673,7 +12697,7 @@ dependencies = [ [[package]] name = "xcm-simulator" -version = "0.9.31" +version = "0.9.33" dependencies = [ "frame-support", "parity-scale-codec", @@ -12689,7 +12713,7 @@ dependencies = [ [[package]] name = "xcm-simulator-example" -version = "0.9.31" +version = "0.9.33" dependencies = [ "frame-support", "frame-system", @@ -12712,7 +12736,7 @@ dependencies = [ [[package]] name = "xcm-simulator-fuzzer" -version = "0.9.31" +version = "0.9.33" dependencies = [ "frame-support", "frame-system", @@ -12771,7 +12795,7 @@ dependencies = [ [[package]] name = "zombienet-backchannel" -version = "0.9.31" +version = "0.9.33" dependencies = [ "futures-util", "lazy_static", diff --git a/Cargo.toml b/Cargo.toml index 4b0e2047bf64..8beb3e81568f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ path = "src/main.rs" name = "polkadot" description = "Implementation of a `https://polkadot.network` node in Rust based on the Substrate framework." license = "GPL-3.0-only" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" rust-version = "1.57.0" # custom profiles diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 13739d396b24..c4705d4a3537 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-cli" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] description = "Polkadot Relay-chain Client Node" edition = "2021" diff --git a/cli/src/command.rs b/cli/src/command.rs index 5ce7c05162c1..0995e1d265d4 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -591,27 +591,27 @@ pub fn run() -> Result<()> { #[cfg(feature = "kusama-native")] if chain_spec.is_kusama() { - return Ok(runner.sync_run(|config| { + return runner.sync_run(|config| { cmd.run::(config) .map_err(|e| Error::SubstrateCli(e)) - })?) + }) } #[cfg(feature = "westend-native")] if chain_spec.is_westend() { - return Ok(runner.sync_run(|config| { + return runner.sync_run(|config| { cmd.run::(config) .map_err(|e| Error::SubstrateCli(e)) - })?) + }) } // else we assume it is polkadot. #[cfg(feature = "polkadot-native")] { - return Ok(runner.sync_run(|config| { + return runner.sync_run(|config| { cmd.run::(config) .map_err(|e| Error::SubstrateCli(e)) - })?) + }) } #[cfg(not(feature = "polkadot-native"))] diff --git a/core-primitives/Cargo.toml b/core-primitives/Cargo.toml index 2c6fe070522d..67d764cccc9b 100644 --- a/core-primitives/Cargo.toml +++ b/core-primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-core-primitives" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/erasure-coding/Cargo.toml b/erasure-coding/Cargo.toml index 0a6c4002a1c5..4c8cbbe6c33e 100644 --- a/erasure-coding/Cargo.toml +++ b/erasure-coding/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-erasure-coding" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/erasure-coding/fuzzer/Cargo.toml b/erasure-coding/fuzzer/Cargo.toml index 6c8ad88e3955..8e7710417e59 100644 --- a/erasure-coding/fuzzer/Cargo.toml +++ b/erasure-coding/fuzzer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "erasure_coding_fuzzer" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/erasure-coding/src/lib.rs b/erasure-coding/src/lib.rs index 5e85809f4117..6abd7dce4dd3 100644 --- a/erasure-coding/src/lib.rs +++ b/erasure-coding/src/lib.rs @@ -216,7 +216,7 @@ pub struct Branches<'a, I> { impl<'a, I: AsRef<[u8]>> Branches<'a, I> { /// Get the trie root. pub fn root(&self) -> H256 { - self.root.clone() + self.root } } diff --git a/node/client/Cargo.toml b/node/client/Cargo.toml index 4889ecb6ecef..305ace46eb08 100644 --- a/node/client/Cargo.toml +++ b/node/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-client" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/client/src/benchmarking.rs b/node/client/src/benchmarking.rs index 7990bc88d218..aaa60a168b4d 100644 --- a/node/client/src/benchmarking.rs +++ b/node/client/src/benchmarking.rs @@ -165,7 +165,7 @@ impl BenchmarkCallSigner (), runtime::VERSION.spec_version, runtime::VERSION.transaction_version, - genesis.clone(), + genesis, genesis, (), (), @@ -220,7 +220,7 @@ impl BenchmarkCallSigner (), runtime::VERSION.spec_version, runtime::VERSION.transaction_version, - genesis.clone(), + genesis, genesis, (), (), @@ -274,7 +274,7 @@ impl BenchmarkCallSigner (), runtime::VERSION.spec_version, runtime::VERSION.transaction_version, - genesis.clone(), + genesis, genesis, (), (), @@ -328,7 +328,7 @@ impl BenchmarkCallSigner (), runtime::VERSION.spec_version, runtime::VERSION.transaction_version, - genesis.clone(), + genesis, genesis, (), (), diff --git a/node/collation-generation/Cargo.toml b/node/collation-generation/Cargo.toml index 46eaf83b4498..b5152945ae7f 100644 --- a/node/collation-generation/Cargo.toml +++ b/node/collation-generation/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-collation-generation" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/core/approval-voting/Cargo.toml b/node/core/approval-voting/Cargo.toml index bf580c3bf348..4f5b457e5400 100644 --- a/node/core/approval-voting/Cargo.toml +++ b/node/core/approval-voting/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-core-approval-voting" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/core/approval-voting/src/approval_checking.rs b/node/core/approval-voting/src/approval_checking.rs index b513c18895b3..82a9a8c89bf5 100644 --- a/node/core/approval-voting/src/approval_checking.rs +++ b/node/core/approval-voting/src/approval_checking.rs @@ -282,8 +282,8 @@ impl State { /// Constructs an infinite iterator from an array of `TrancheEntry` values. Any missing tranches /// are filled with empty assignments, as they are needed to compute the approved tranches. -fn filled_tranche_iterator<'a>( - tranches: &'a [TrancheEntry], +fn filled_tranche_iterator( + tranches: &[TrancheEntry], ) -> impl Iterator { let mut gap_end = None; diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index fea71d79c098..520a1a745056 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -155,10 +155,10 @@ impl<'a> From<&'a SessionInfo> for Config { Config { assignment_keys: s.assignment_keys.clone(), validator_groups: s.validator_groups.clone(), - n_cores: s.n_cores.clone(), - zeroth_delay_tranche_width: s.zeroth_delay_tranche_width.clone(), - relay_vrf_modulo_samples: s.relay_vrf_modulo_samples.clone(), - n_delay_tranches: s.n_delay_tranches.clone(), + n_cores: s.n_cores, + zeroth_delay_tranche_width: s.zeroth_delay_tranche_width, + relay_vrf_modulo_samples: s.relay_vrf_modulo_samples, + n_delay_tranches: s.n_delay_tranches, } } } diff --git a/node/core/approval-voting/src/import.rs b/node/core/approval-voting/src/import.rs index 20629dd022d4..2331b50b6bb1 100644 --- a/node/core/approval-voting/src/import.rs +++ b/node/core/approval-voting/src/import.rs @@ -415,11 +415,8 @@ pub(crate) async fn handle_new_head( Err(error) => { // It's possible that we've lost a race with finality. let (tx, rx) = oneshot::channel(); - ctx.send_message(ChainApiMessage::FinalizedBlockHash( - block_header.number.clone(), - tx, - )) - .await; + ctx.send_message(ChainApiMessage::FinalizedBlockHash(block_header.number, tx)) + .await; let lost_to_finality = match rx.await { Ok(Ok(Some(h))) if h != block_hash => true, diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index bc63549795c2..06a4f0b24bb0 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -621,10 +621,7 @@ impl CurrentlyCheckingSet { .candidate_hash_map .remove(&approval_state.candidate_hash) .unwrap_or_default(); - approvals_cache.put( - approval_state.candidate_hash.clone(), - approval_state.approval_outcome.clone(), - ); + approvals_cache.put(approval_state.candidate_hash, approval_state.approval_outcome); return (out, approval_state) } } @@ -768,7 +765,7 @@ async fn run( where B: Backend, { - if let Err(err) = db_sanity_check(subsystem.db.clone(), subsystem.db_config.clone()) { + if let Err(err) = db_sanity_check(subsystem.db.clone(), subsystem.db_config) { gum::warn!(target: LOG_TARGET, ?err, "Could not run approval vote DB sanity check"); } @@ -1278,7 +1275,7 @@ async fn get_approval_signatures_for_candidate( Some(e) => e, }; - let relay_hashes = entry.block_assignments.iter().map(|(relay_hash, _)| relay_hash); + let relay_hashes = entry.block_assignments.keys(); let mut candidate_indices = HashSet::new(); // Retrieve `CoreIndices`/`CandidateIndices` as required by approval-distribution: @@ -2502,7 +2499,7 @@ async fn issue_approval( }; let candidate_hash = match block_entry.candidate(candidate_index as usize) { - Some((_, h)) => h.clone(), + Some((_, h)) => *h, None => { gum::warn!( target: LOG_TARGET, diff --git a/node/core/av-store/Cargo.toml b/node/core/av-store/Cargo.toml index a4a39df77668..6e221b08ef27 100644 --- a/node/core/av-store/Cargo.toml +++ b/node/core/av-store/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-core-av-store" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 4fbbf3740ab0..cbbbf2bbd7dc 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -61,7 +61,7 @@ const PRUNE_BY_TIME_PREFIX: &[u8; 13] = b"prune_by_time"; // We have some keys we want to map to empty values because existence of the key is enough. We use this because // rocksdb doesn't support empty values. -const TOMBSTONE_VALUE: &[u8] = &*b" "; +const TOMBSTONE_VALUE: &[u8] = b" "; /// Unavailable blocks are kept for 1 hour. const KEEP_UNAVAILABLE_FOR: Duration = Duration::from_secs(60 * 60); diff --git a/node/core/av-store/src/metrics.rs b/node/core/av-store/src/metrics.rs index c50932c6173e..fedeb2b7d0e5 100644 --- a/node/core/av-store/src/metrics.rs +++ b/node/core/av-store/src/metrics.rs @@ -140,10 +140,16 @@ impl metrics::Metrics for Metrics { registry, )?, get_chunk: prometheus::register( - prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( - "polkadot_parachain_av_store_get_chunk", - "Time spent fetching requested chunks.`", - ))?, + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_av_store_get_chunk", + "Time spent fetching requested chunks.`", + ) + .buckets(vec![ + 0.000625, 0.00125, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.1, 0.25, + 0.5, 1.0, 2.5, 5.0, 10.0, + ]), + )?, registry, )?, }; diff --git a/node/core/backing/Cargo.toml b/node/core/backing/Cargo.toml index bf52d54167fc..386db79f8a37 100644 --- a/node/core/backing/Cargo.toml +++ b/node/core/backing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-core-backing" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index a9ae518e3103..2f8aa4490f27 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -482,9 +482,7 @@ impl TableContextTrait for TableContext { } fn is_member_of(&self, authority: &ValidatorIndex, group: &ParaId) -> bool { - self.groups - .get(group) - .map_or(false, |g| g.iter().position(|a| a == authority).is_some()) + self.groups.get(group).map_or(false, |g| g.iter().any(|a| a == authority)) } fn requisite_votes(&self, group: &ParaId) -> usize { @@ -499,7 +497,7 @@ struct InvalidErasureRoot; fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement { let statement = match s.payload() { Statement::Seconded(c) => TableStatement::Seconded(c.clone()), - Statement::Valid(h) => TableStatement::Valid(h.clone()), + Statement::Valid(h) => TableStatement::Valid(*h), }; TableSignedStatement { diff --git a/node/core/bitfield-signing/Cargo.toml b/node/core/bitfield-signing/Cargo.toml index 54aa27766b7b..6f4cb9909ce6 100644 --- a/node/core/bitfield-signing/Cargo.toml +++ b/node/core/bitfield-signing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-core-bitfield-signing" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/core/bitfield-signing/src/metrics.rs b/node/core/bitfield-signing/src/metrics.rs index ab4e73be0eeb..571a0c335bd7 100644 --- a/node/core/bitfield-signing/src/metrics.rs +++ b/node/core/bitfield-signing/src/metrics.rs @@ -50,10 +50,16 @@ impl metrics::Metrics for Metrics { registry, )?, run: prometheus::register( - prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( - "polkadot_parachain_bitfield_signing_run", - "Time spent within `bitfield_signing::run`", - ))?, + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_bitfield_signing_run", + "Time spent within `bitfield_signing::run`", + ) + .buckets(vec![ + 0.000625, 0.00125, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.1, 0.25, + 0.5, 1.0, 2.5, 5.0, 10.0, + ]), + )?, registry, )?, }; diff --git a/node/core/candidate-validation/Cargo.toml b/node/core/candidate-validation/Cargo.toml index 105d7c1a21dc..d1ea999cd66d 100644 --- a/node/core/candidate-validation/Cargo.toml +++ b/node/core/candidate-validation/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-core-candidate-validation" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/core/candidate-validation/src/lib.rs b/node/core/candidate-validation/src/lib.rs index 7d9db4f3d794..74610bc113ec 100644 --- a/node/core/candidate-validation/src/lib.rs +++ b/node/core/candidate-validation/src/lib.rs @@ -502,7 +502,7 @@ async fn validate_candidate_exhaustive( let _timer = metrics.time_validate_candidate_exhaustive(); let validation_code_hash = validation_code.hash(); - let para_id = candidate_receipt.descriptor.para_id.clone(); + let para_id = candidate_receipt.descriptor.para_id; gum::debug!( target: LOG_TARGET, ?validation_code_hash, @@ -513,7 +513,7 @@ async fn validate_candidate_exhaustive( if let Err(e) = perform_basic_checks( &candidate_receipt.descriptor, persisted_validation_data.max_pov_size, - &*pov, + &pov, &validation_code_hash, ) { gum::info!(target: LOG_TARGET, ?para_id, "Invalid candidate (basic checks)"); @@ -638,7 +638,7 @@ trait ValidationBackend { } } - async fn precheck_pvf(&mut self, pvf: Pvf) -> Result<(), PrepareError>; + async fn precheck_pvf(&mut self, pvf: Pvf) -> Result; } #[async_trait] @@ -664,7 +664,7 @@ impl ValidationBackend for ValidationHost { .map_err(|_| ValidationError::InternalError("validation was cancelled".into()))? } - async fn precheck_pvf(&mut self, pvf: Pvf) -> Result<(), PrepareError> { + async fn precheck_pvf(&mut self, pvf: Pvf) -> Result { let (tx, rx) = oneshot::channel(); if let Err(_) = self.precheck_pvf(pvf, tx).await { return Err(PrepareError::DidNotMakeIt) diff --git a/node/core/candidate-validation/src/tests.rs b/node/core/candidate-validation/src/tests.rs index cf467cd5c057..5ac93bc7d1f4 100644 --- a/node/core/candidate-validation/src/tests.rs +++ b/node/core/candidate-validation/src/tests.rs @@ -377,7 +377,7 @@ impl ValidationBackend for MockValidateCandidateBackend { result } - async fn precheck_pvf(&mut self, _pvf: Pvf) -> Result<(), PrepareError> { + async fn precheck_pvf(&mut self, _pvf: Pvf) -> Result { unreachable!() } } @@ -894,11 +894,11 @@ fn pov_decompression_failure_is_invalid() { } struct MockPreCheckBackend { - result: Result<(), PrepareError>, + result: Result, } impl MockPreCheckBackend { - fn with_hardcoded_result(result: Result<(), PrepareError>) -> Self { + fn with_hardcoded_result(result: Result) -> Self { Self { result } } } @@ -914,7 +914,7 @@ impl ValidationBackend for MockPreCheckBackend { unreachable!() } - async fn precheck_pvf(&mut self, _pvf: Pvf) -> Result<(), PrepareError> { + async fn precheck_pvf(&mut self, _pvf: Pvf) -> Result { self.result.clone() } } @@ -931,7 +931,7 @@ fn precheck_works() { let (check_fut, check_result) = precheck_pvf( ctx.sender(), - MockPreCheckBackend::with_hardcoded_result(Ok(())), + MockPreCheckBackend::with_hardcoded_result(Ok(Duration::default())), relay_parent, validation_code_hash, ) @@ -977,7 +977,7 @@ fn precheck_invalid_pvf_blob_compression() { let (check_fut, check_result) = precheck_pvf( ctx.sender(), - MockPreCheckBackend::with_hardcoded_result(Ok(())), + MockPreCheckBackend::with_hardcoded_result(Ok(Duration::default())), relay_parent, validation_code_hash, ) diff --git a/node/core/chain-api/Cargo.toml b/node/core/chain-api/Cargo.toml index 19cff83c9fdf..cf1c800c1a7f 100644 --- a/node/core/chain-api/Cargo.toml +++ b/node/core/chain-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-core-chain-api" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/core/chain-selection/Cargo.toml b/node/core/chain-selection/Cargo.toml index 1e505a4df98d..90f764c903a9 100644 --- a/node/core/chain-selection/Cargo.toml +++ b/node/core/chain-selection/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "polkadot-node-core-chain-selection" description = "Chain Selection Subsystem" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index e5ffe6811d6e..786454fb9891 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -381,6 +381,7 @@ async fn run( ) where B: Backend, { + #![allow(clippy::all)] loop { let res = run_until_error( &mut ctx, diff --git a/node/core/dispute-coordinator/Cargo.toml b/node/core/dispute-coordinator/Cargo.toml index 19eff42d4ad3..7088a7817f3e 100644 --- a/node/core/dispute-coordinator/Cargo.toml +++ b/node/core/dispute-coordinator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-core-dispute-coordinator" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/core/dispute-coordinator/src/db/v1.rs b/node/core/dispute-coordinator/src/db/v1.rs index bb1456a59745..ab571108af37 100644 --- a/node/core/dispute-coordinator/src/db/v1.rs +++ b/node/core/dispute-coordinator/src/db/v1.rs @@ -32,7 +32,7 @@ use crate::{ backend::{Backend, BackendWriteOp, OverlayedBackend}, error::{FatalError, FatalResult}, metrics::Metrics, - DISPUTE_WINDOW, LOG_TARGET, + LOG_TARGET, }; const RECENT_DISPUTES_KEY: &[u8; 15] = b"recent-disputes"; @@ -318,25 +318,24 @@ pub(crate) fn load_recent_disputes( /// /// If one or more ancient sessions are pruned, all metadata on candidates within the ancient /// session will be deleted. -pub(crate) fn note_current_session( +pub(crate) fn note_earliest_session( overlay_db: &mut OverlayedBackend<'_, impl Backend>, - current_session: SessionIndex, + new_earliest_session: SessionIndex, ) -> SubsystemResult<()> { - let new_earliest = current_session.saturating_sub(DISPUTE_WINDOW.get()); match overlay_db.load_earliest_session()? { None => { // First launch - write new-earliest. - overlay_db.write_earliest_session(new_earliest); + overlay_db.write_earliest_session(new_earliest_session); }, - Some(prev_earliest) if new_earliest > prev_earliest => { + Some(prev_earliest) if new_earliest_session > prev_earliest => { // Prune all data in the outdated sessions. - overlay_db.write_earliest_session(new_earliest); + overlay_db.write_earliest_session(new_earliest_session); // Clear recent disputes metadata. { let mut recent_disputes = overlay_db.load_recent_disputes()?.unwrap_or_default(); - let lower_bound = (new_earliest, CandidateHash(Hash::repeat_byte(0x00))); + let lower_bound = (new_earliest_session, CandidateHash(Hash::repeat_byte(0x00))); let new_recent_disputes = recent_disputes.split_off(&lower_bound); // Any remanining disputes are considered ancient and must be pruned. @@ -373,6 +372,7 @@ mod tests { use super::*; use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; + use polkadot_node_primitives::DISPUTE_WINDOW; use polkadot_primitives::v2::{Hash, Id as ParaId}; fn make_db() -> DbBackend { @@ -422,7 +422,7 @@ mod tests { let mut overlay_db = OverlayedBackend::new(&backend); gum::trace!(target: LOG_TARGET, ?current_session, "Noting current session"); - note_current_session(&mut overlay_db, current_session).unwrap(); + note_earliest_session(&mut overlay_db, earliest_session).unwrap(); let write_ops = overlay_db.into_write_ops(); backend.write(write_ops).unwrap(); @@ -442,7 +442,7 @@ mod tests { let current_session = current_session + 1; let earliest_session = earliest_session + 1; - note_current_session(&mut overlay_db, current_session).unwrap(); + note_earliest_session(&mut overlay_db, earliest_session).unwrap(); let write_ops = overlay_db.into_write_ops(); backend.write(write_ops).unwrap(); @@ -599,7 +599,7 @@ mod tests { } #[test] - fn note_current_session_prunes_old() { + fn note_earliest_session_prunes_old() { let mut backend = make_db(); let hash_a = CandidateHash(Hash::repeat_byte(0x0a)); @@ -648,7 +648,7 @@ mod tests { backend.write(write_ops).unwrap(); let mut overlay_db = OverlayedBackend::new(&backend); - note_current_session(&mut overlay_db, current_session).unwrap(); + note_earliest_session(&mut overlay_db, new_earliest_session).unwrap(); assert_eq!(overlay_db.load_earliest_session().unwrap(), Some(new_earliest_session)); diff --git a/node/core/dispute-coordinator/src/import.rs b/node/core/dispute-coordinator/src/import.rs index c0f0d3d9e009..28eacffab861 100644 --- a/node/core/dispute-coordinator/src/import.rs +++ b/node/core/dispute-coordinator/src/import.rs @@ -169,7 +169,7 @@ impl CandidateVoteState { } /// Create a new `CandidateVoteState` from already existing votes. - pub fn new<'a>(votes: CandidateVotes, env: &CandidateEnvironment<'a>, now: Timestamp) -> Self { + pub fn new(votes: CandidateVotes, env: &CandidateEnvironment, now: Timestamp) -> Self { let own_vote = OwnVoteState::new(&votes, env); let n_validators = env.validators().len(); diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index ab9faca39868..a9c174921749 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -27,7 +27,7 @@ use sc_keystore::LocalKeystore; use polkadot_node_primitives::{ CandidateVotes, DisputeMessage, DisputeMessageCheckError, DisputeStatus, - SignedDisputeStatement, Timestamp, DISPUTE_WINDOW, + SignedDisputeStatement, Timestamp, }; use polkadot_node_subsystem::{ messages::{ @@ -299,7 +299,7 @@ impl Initialized { self.highest_session = session; - db::v1::note_current_session(overlay_db, session)?; + db::v1::note_earliest_session(overlay_db, new_window_start)?; self.spam_slots.prune_old(new_window_start); } }, @@ -708,25 +708,27 @@ impl Initialized { now: Timestamp, ) -> Result { gum::trace!(target: LOG_TARGET, ?statements, "In handle import statements"); - if session + DISPUTE_WINDOW.get() < self.highest_session { - // It is not valid to participate in an ancient dispute (spam?). + if !self.rolling_session_window.contains(session) { + // It is not valid to participate in an ancient dispute (spam?) or too new. return Ok(ImportStatementsResult::InvalidImport) } - let env = - match CandidateEnvironment::new(&*self.keystore, &self.rolling_session_window, session) - { - None => { - gum::warn!( - target: LOG_TARGET, - session, - "We are lacking a `SessionInfo` for handling import of statements." - ); + let env = match CandidateEnvironment::new( + &self.keystore, + &self.rolling_session_window, + session, + ) { + None => { + gum::warn!( + target: LOG_TARGET, + session, + "We are lacking a `SessionInfo` for handling import of statements." + ); - return Ok(ImportStatementsResult::InvalidImport) - }, - Some(env) => env, - }; + return Ok(ImportStatementsResult::InvalidImport) + }, + Some(env) => env, + }; let candidate_hash = candidate_receipt.hash(); @@ -1075,20 +1077,22 @@ impl Initialized { "Issuing local statement for candidate!" ); // Load environment: - let env = - match CandidateEnvironment::new(&*self.keystore, &self.rolling_session_window, session) - { - None => { - gum::warn!( - target: LOG_TARGET, - session, - "Missing info for session which has an active dispute", - ); + let env = match CandidateEnvironment::new( + &self.keystore, + &self.rolling_session_window, + session, + ) { + None => { + gum::warn!( + target: LOG_TARGET, + session, + "Missing info for session which has an active dispute", + ); - return Ok(()) - }, - Some(env) => env, - }; + return Ok(()) + }, + Some(env) => env, + }; let votes = overlay_db .load_candidate_votes(session, &candidate_hash)? @@ -1257,7 +1261,7 @@ fn make_dispute_message( votes.invalid.iter().next().ok_or(DisputeMessageCreationError::NoOppositeVote)?; let other_vote = SignedDisputeStatement::new_checked( DisputeStatement::Invalid(*statement_kind), - our_vote.candidate_hash().clone(), + *our_vote.candidate_hash(), our_vote.session_index(), validators .get(*validator_index) @@ -1272,7 +1276,7 @@ fn make_dispute_message( votes.valid.iter().next().ok_or(DisputeMessageCreationError::NoOppositeVote)?; let other_vote = SignedDisputeStatement::new_checked( DisputeStatement::Valid(*statement_kind), - our_vote.candidate_hash().clone(), + *our_vote.candidate_hash(), our_vote.session_index(), validators .get(*validator_index) diff --git a/node/core/dispute-coordinator/src/lib.rs b/node/core/dispute-coordinator/src/lib.rs index 09d6c621b999..e7ac66ce2ece 100644 --- a/node/core/dispute-coordinator/src/lib.rs +++ b/node/core/dispute-coordinator/src/lib.rs @@ -30,7 +30,7 @@ use futures::FutureExt; use sc_keystore::LocalKeystore; -use polkadot_node_primitives::{CandidateVotes, DISPUTE_WINDOW}; +use polkadot_node_primitives::CandidateVotes; use polkadot_node_subsystem::{ overseer, ActivatedLeaf, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; @@ -272,7 +272,7 @@ impl DisputeCoordinatorSubsystem { ChainScraper, )> { // Prune obsolete disputes: - db::v1::note_current_session(overlay_db, rolling_session_window.latest_session())?; + db::v1::note_earliest_session(overlay_db, rolling_session_window.earliest_session())?; let active_disputes = match overlay_db.load_recent_disputes() { Ok(Some(disputes)) => diff --git a/node/core/dispute-coordinator/src/participation/mod.rs b/node/core/dispute-coordinator/src/participation/mod.rs index 874f37e63213..e923e13e8142 100644 --- a/node/core/dispute-coordinator/src/participation/mod.rs +++ b/node/core/dispute-coordinator/src/participation/mod.rs @@ -235,7 +235,7 @@ impl Participation { req: ParticipationRequest, recent_head: Hash, ) -> FatalResult<()> { - if self.running_participations.insert(req.candidate_hash().clone()) { + if self.running_participations.insert(*req.candidate_hash()) { let sender = ctx.sender().clone(); ctx.spawn( "participation-worker", diff --git a/node/core/dispute-coordinator/src/participation/queues/mod.rs b/node/core/dispute-coordinator/src/participation/queues/mod.rs index d2fcab1ba258..29380bd77df1 100644 --- a/node/core/dispute-coordinator/src/participation/queues/mod.rs +++ b/node/core/dispute-coordinator/src/participation/queues/mod.rs @@ -204,7 +204,7 @@ impl Queues { // Once https://github.com/rust-lang/rust/issues/62924 is there, we can use a simple: // target.pop_first(). if let Some((comparator, _)) = target.iter().next() { - let comparator = comparator.clone(); + let comparator = *comparator; target .remove(&comparator) .map(|participation_request| (comparator, participation_request)) diff --git a/node/core/parachains-inherent/Cargo.toml b/node/core/parachains-inherent/Cargo.toml index c38b45944d5b..3f2afadba97e 100644 --- a/node/core/parachains-inherent/Cargo.toml +++ b/node/core/parachains-inherent/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-core-parachains-inherent" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/core/provisioner/Cargo.toml b/node/core/provisioner/Cargo.toml index 6fbdc8cf0435..035d62676e51 100644 --- a/node/core/provisioner/Cargo.toml +++ b/node/core/provisioner/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-core-provisioner" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/core/provisioner/src/disputes/prioritized_selection/mod.rs b/node/core/provisioner/src/disputes/prioritized_selection/mod.rs index dcfcd0d1c2f0..4231dc840c2c 100644 --- a/node/core/provisioner/src/disputes/prioritized_selection/mod.rs +++ b/node/core/provisioner/src/disputes/prioritized_selection/mod.rs @@ -99,7 +99,7 @@ where ); // Fetch the onchain disputes. We'll do a prioritization based on them. - let onchain = match get_onchain_disputes(sender, leaf.hash.clone()).await { + let onchain = match get_onchain_disputes(sender, leaf.hash).await { Ok(r) => r, Err(GetOnchainDisputesError::NotSupported(runtime_api_err, relay_parent)) => { // Runtime version is checked before calling this method, so the error below should never happen! diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index 0530d48aabda..fcb65d66f286 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -373,7 +373,7 @@ async fn send_inherent_data( let disputes = match has_required_runtime( from_job, - leaf.hash.clone(), + leaf.hash, PRIORITIZED_SELECTION_RUNTIME_VERSION_REQUIREMENT, ) .await @@ -506,7 +506,7 @@ fn select_availability_bitfields( bitfields.len() ); - selected.into_iter().map(|(_, b)| b).collect() + selected.into_values().collect() } /// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. diff --git a/node/core/pvf-checker/Cargo.toml b/node/core/pvf-checker/Cargo.toml index b0881042e957..a5e46b689526 100644 --- a/node/core/pvf-checker/Cargo.toml +++ b/node/core/pvf-checker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-core-pvf-checker" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/core/pvf/Cargo.toml b/node/core/pvf/Cargo.toml index 8bcecf55475e..b88837e0833e 100644 --- a/node/core/pvf/Cargo.toml +++ b/node/core/pvf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-core-pvf" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" @@ -13,6 +13,7 @@ always-assert = "0.1" async-std = { version = "1.11.0", features = ["attributes"] } async-process = "1.3.0" assert_matches = "1.4.0" +cpu-time = "1.0.0" futures = "0.3.21" futures-timer = "3.0.2" slotmap = "1.0" @@ -21,10 +22,13 @@ pin-project = "1.0.9" rand = "0.8.5" tempfile = "3.3.0" rayon = "1.5.1" + parity-scale-codec = { version = "3.1.5", default-features = false, features = ["derive"] } + polkadot-parachain = { path = "../../../parachain" } polkadot-core-primitives = { path = "../../../core-primitives" } polkadot-node-metrics = { path = "../../metrics"} + sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-executor-wasmtime = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-executor-common = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/core/pvf/src/artifacts.rs b/node/core/pvf/src/artifacts.rs index 038d8e803299..413d73b4c558 100644 --- a/node/core/pvf/src/artifacts.rs +++ b/node/core/pvf/src/artifacts.rs @@ -101,6 +101,8 @@ pub enum ArtifactState { /// This is updated when we get the heads up for this artifact or when we just discover /// this file. last_time_needed: SystemTime, + /// The CPU time that was taken preparing this artifact. + cpu_time_elapsed: Duration, }, /// A task to prepare this artifact is scheduled. Preparing { @@ -171,11 +173,16 @@ impl Artifacts { /// This function must be used only for brand-new artifacts and should never be used for /// replacing existing ones. #[cfg(test)] - pub fn insert_prepared(&mut self, artifact_id: ArtifactId, last_time_needed: SystemTime) { + pub fn insert_prepared( + &mut self, + artifact_id: ArtifactId, + last_time_needed: SystemTime, + cpu_time_elapsed: Duration, + ) { // See the precondition. always!(self .artifacts - .insert(artifact_id, ArtifactState::Prepared { last_time_needed }) + .insert(artifact_id, ArtifactState::Prepared { last_time_needed, cpu_time_elapsed }) .is_none()); } diff --git a/node/core/pvf/src/error.rs b/node/core/pvf/src/error.rs index 4aca2da4b3ba..ddcdb2561cfd 100644 --- a/node/core/pvf/src/error.rs +++ b/node/core/pvf/src/error.rs @@ -15,10 +15,11 @@ // along with Polkadot. If not, see . use parity_scale_codec::{Decode, Encode}; -use std::any::Any; +use std::{any::Any, time::Duration}; -/// Result of PVF preparation performed by the validation host. -pub type PrepareResult = Result<(), PrepareError>; +/// Result of PVF preparation performed by the validation host. Contains the elapsed CPU time if +/// successful +pub type PrepareResult = Result; /// An error that occurred during the prepare part of the PVF pipeline. #[derive(Debug, Clone, Encode, Decode)] diff --git a/node/core/pvf/src/execute/mod.rs b/node/core/pvf/src/execute/mod.rs index 86e1d79fc951..bc7f035a8b40 100644 --- a/node/core/pvf/src/execute/mod.rs +++ b/node/core/pvf/src/execute/mod.rs @@ -24,4 +24,4 @@ mod queue; mod worker; pub use queue::{start, ToQueue}; -pub use worker::worker_entrypoint; +pub use worker::{worker_entrypoint, Response as ExecuteResponse}; diff --git a/node/core/pvf/src/execute/queue.rs b/node/core/pvf/src/execute/queue.rs index b4c6a66b7719..72b6e450351b 100644 --- a/node/core/pvf/src/execute/queue.rs +++ b/node/core/pvf/src/execute/queue.rs @@ -225,9 +225,8 @@ fn handle_job_finish( result_tx: ResultSender, ) { let (idle_worker, result) = match outcome { - Outcome::Ok { result_descriptor, duration_ms, idle_worker } => { + Outcome::Ok { result_descriptor, duration: _, idle_worker } => { // TODO: propagate the soft timeout - drop(duration_ms); (Some(idle_worker), Ok(result_descriptor)) }, diff --git a/node/core/pvf/src/execute/worker.rs b/node/core/pvf/src/execute/worker.rs index a0b8337ddc4a..46226a159c26 100644 --- a/node/core/pvf/src/execute/worker.rs +++ b/node/core/pvf/src/execute/worker.rs @@ -18,8 +18,9 @@ use crate::{ artifacts::ArtifactPathId, executor_intf::Executor, worker_common::{ - bytes_to_path, framed_recv, framed_send, path_to_bytes, spawn_with_program_path, - worker_event_loop, IdleWorker, SpawnErr, WorkerHandle, + bytes_to_path, cpu_time_monitor_loop, framed_recv, framed_send, path_to_bytes, + spawn_with_program_path, worker_event_loop, IdleWorker, JobKind, SpawnErr, WorkerHandle, + JOB_TIMEOUT_WALL_CLOCK_FACTOR, }, LOG_TARGET, }; @@ -27,12 +28,21 @@ use async_std::{ io, os::unix::net::UnixStream, path::{Path, PathBuf}, + task, }; +use cpu_time::ProcessTime; use futures::FutureExt; use futures_timer::Delay; use parity_scale_codec::{Decode, Encode}; use polkadot_parachain::primitives::ValidationResult; -use std::time::{Duration, Instant}; +use std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + thread, + time::Duration, +}; /// Spawns a new worker with the given program path that acts as the worker and the spawn timeout. /// @@ -48,7 +58,7 @@ pub async fn spawn( pub enum Outcome { /// PVF execution completed successfully and the result is returned. The worker is ready for /// another job. - Ok { result_descriptor: ValidationResult, duration_ms: u64, idle_worker: IdleWorker }, + Ok { result_descriptor: ValidationResult, duration: Duration, idle_worker: IdleWorker }, /// The candidate validation failed. It may be for example because the wasm execution triggered a trap. /// Errors related to the preparation process are not expected to be encountered by the execution workers. InvalidCandidate { err: String, idle_worker: IdleWorker }, @@ -80,7 +90,9 @@ pub async fn start_work( artifact.path.display(), ); - if let Err(error) = send_request(&mut stream, &artifact.path, &validation_params).await { + if let Err(error) = + send_request(&mut stream, &artifact.path, &validation_params, execution_timeout).await + { gum::warn!( target: LOG_TARGET, worker_pid = %pid, @@ -91,6 +103,12 @@ pub async fn start_work( return Outcome::IoErr } + // We use a generous timeout here. This is in addition to the one in the child process, in + // case the child stalls. We have a wall clock timeout here in the host, but a CPU timeout + // in the child. We want to use CPU time because it varies less than wall clock time under + // load, but the CPU resources of the child can only be measured from the parent after the + // child process terminates. + let timeout = execution_timeout * JOB_TIMEOUT_WALL_CLOCK_FACTOR; let response = futures::select! { response = recv_response(&mut stream).fuse() => { match response { @@ -104,25 +122,47 @@ pub async fn start_work( ); return Outcome::IoErr }, - Ok(response) => response, + Ok(response) => { + if let Response::Ok{duration, ..} = response { + if duration > execution_timeout { + // The job didn't complete within the timeout. + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + "execute job took {}ms cpu time, exceeded execution timeout {}ms.", + duration.as_millis(), + execution_timeout.as_millis(), + ); + + // Return a timeout error. + return Outcome::HardTimeout; + } + } + + response + }, } }, - _ = Delay::new(execution_timeout).fuse() => { + _ = Delay::new(timeout).fuse() => { gum::warn!( target: LOG_TARGET, worker_pid = %pid, validation_code_hash = ?artifact.id.code_hash, "execution worker exceeded alloted time for execution", ); - return Outcome::HardTimeout; + // TODO: This case is not really a hard timeout as the timeout here in the host is + // lenient. Should fix this as part of + // https://github.com/paritytech/polkadot/issues/3754. + Response::TimedOut }, }; match response { - Response::Ok { result_descriptor, duration_ms } => - Outcome::Ok { result_descriptor, duration_ms, idle_worker: IdleWorker { stream, pid } }, + Response::Ok { result_descriptor, duration } => + Outcome::Ok { result_descriptor, duration, idle_worker: IdleWorker { stream, pid } }, Response::InvalidCandidate(err) => Outcome::InvalidCandidate { err, idle_worker: IdleWorker { stream, pid } }, + Response::TimedOut => Outcome::HardTimeout, Response::InternalError(err) => Outcome::InternalError { err, idle_worker: IdleWorker { stream, pid } }, } @@ -132,12 +172,14 @@ async fn send_request( stream: &mut UnixStream, artifact_path: &Path, validation_params: &[u8], + execution_timeout: Duration, ) -> io::Result<()> { framed_send(stream, path_to_bytes(artifact_path)).await?; - framed_send(stream, validation_params).await + framed_send(stream, validation_params).await?; + framed_send(stream, &execution_timeout.encode()).await } -async fn recv_request(stream: &mut UnixStream) -> io::Result<(PathBuf, Vec)> { +async fn recv_request(stream: &mut UnixStream) -> io::Result<(PathBuf, Vec, Duration)> { let artifact_path = framed_recv(stream).await?; let artifact_path = bytes_to_path(&artifact_path).ok_or_else(|| { io::Error::new( @@ -146,7 +188,14 @@ async fn recv_request(stream: &mut UnixStream) -> io::Result<(PathBuf, Vec)> ) })?; let params = framed_recv(stream).await?; - Ok((artifact_path, params)) + let execution_timeout = framed_recv(stream).await?; + let execution_timeout = Duration::decode(&mut &execution_timeout[..]).map_err(|_| { + io::Error::new( + io::ErrorKind::Other, + "execute pvf recv_request: failed to decode duration".to_string(), + ) + })?; + Ok((artifact_path, params, execution_timeout)) } async fn send_response(stream: &mut UnixStream, response: Response) -> io::Result<()> { @@ -164,9 +213,10 @@ async fn recv_response(stream: &mut UnixStream) -> io::Result { } #[derive(Encode, Decode)] -enum Response { - Ok { result_descriptor: ValidationResult, duration_ms: u64 }, +pub enum Response { + Ok { result_descriptor: ValidationResult, duration: Duration }, InvalidCandidate(String), + TimedOut, InternalError(String), } @@ -187,15 +237,53 @@ pub fn worker_entrypoint(socket_path: &str) { let executor = Executor::new().map_err(|e| { io::Error::new(io::ErrorKind::Other, format!("cannot create executor: {}", e)) })?; + loop { - let (artifact_path, params) = recv_request(&mut stream).await?; + let (artifact_path, params, execution_timeout) = recv_request(&mut stream).await?; gum::debug!( target: LOG_TARGET, worker_pid = %std::process::id(), "worker: validating artifact {}", artifact_path.display(), ); - let response = validate_using_artifact(&artifact_path, ¶ms, &executor).await; + + // Create a lock flag. We set it when either thread finishes. + let lock = Arc::new(AtomicBool::new(false)); + let cpu_time_start = ProcessTime::now(); + + // Spawn a new thread that runs the CPU time monitor. Continuously wakes up from + // sleeping and then either sleeps for the remaining CPU time, or kills the process if + // we exceed the CPU timeout. + let (stream_2, cpu_time_start_2, execution_timeout_2, lock_2) = + (stream.clone(), cpu_time_start, execution_timeout, lock.clone()); + let handle = + thread::Builder::new().name("CPU time monitor".into()).spawn(move || { + task::block_on(async { + cpu_time_monitor_loop( + JobKind::Execute, + stream_2, + cpu_time_start_2, + execution_timeout_2, + lock_2, + ) + .await; + }) + })?; + + let response = + validate_using_artifact(&artifact_path, ¶ms, &executor, cpu_time_start).await; + + let lock_result = + lock.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed); + if lock_result.is_err() { + // The other thread is still sending an error response over the socket. Wait on it + // and return. + let _ = handle.join(); + // Monitor thread detected timeout and likely already terminated the process, + // nothing to do. + continue + } + send_response(&mut stream, response).await?; } }); @@ -205,19 +293,19 @@ async fn validate_using_artifact( artifact_path: &Path, params: &[u8], executor: &Executor, + cpu_time_start: ProcessTime, ) -> Response { - let validation_started_at = Instant::now(); let descriptor_bytes = match unsafe { // SAFETY: this should be safe since the compiled artifact passed here comes from the // file created by the prepare workers. These files are obtained by calling // [`executor_intf::prepare`]. executor.execute(artifact_path.as_ref(), params) } { - Err(err) => return Response::format_invalid("execute", &err.to_string()), + Err(err) => return Response::format_invalid("execute", &err), Ok(d) => d, }; - let duration_ms = validation_started_at.elapsed().as_millis() as u64; + let duration = cpu_time_start.elapsed(); let result_descriptor = match ValidationResult::decode(&mut &descriptor_bytes[..]) { Err(err) => @@ -225,5 +313,5 @@ async fn validate_using_artifact( Ok(r) => r, }; - Response::Ok { result_descriptor, duration_ms } + Response::Ok { result_descriptor, duration } } diff --git a/node/core/pvf/src/executor_intf.rs b/node/core/pvf/src/executor_intf.rs index bbeb6195e1dc..c5578f5f81ad 100644 --- a/node/core/pvf/src/executor_intf.rs +++ b/node/core/pvf/src/executor_intf.rs @@ -424,7 +424,7 @@ impl sp_core::traits::ReadRuntimeVersion for ReadRuntimeVersion { use parity_scale_codec::Encode; Ok(version.encode()) }, - None => Err(format!("runtime version section is not found")), + None => Err("runtime version section is not found".to_string()), } } } diff --git a/node/core/pvf/src/host.rs b/node/core/pvf/src/host.rs index 5c29072da1c3..483419409448 100644 --- a/node/core/pvf/src/host.rs +++ b/node/core/pvf/src/host.rs @@ -218,7 +218,7 @@ pub fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Future { + ArtifactState::Prepared { last_time_needed, cpu_time_elapsed } => { *last_time_needed = SystemTime::now(); - let _ = result_sender.send(Ok(())); + let _ = result_sender.send(Ok(*cpu_time_elapsed)); }, ArtifactState::Preparing { waiting_for_response, num_failures: _ } => waiting_for_response.push(result_sender), @@ -490,7 +490,7 @@ async fn handle_precheck_pvf( /// /// If the prepare job failed previously, we may retry it under certain conditions. /// -/// When preparing for execution, we use a more lenient timeout ([`EXECUTE_COMPILATION_TIMEOUT`]) +/// When preparing for execution, we use a more lenient timeout ([`EXECUTE_PREPARATION_TIMEOUT`]) /// than when prechecking. async fn handle_execute_pvf( cache_path: &Path, @@ -505,7 +505,7 @@ async fn handle_execute_pvf( if let Some(state) = artifacts.artifact_state_mut(&artifact_id) { match state { - ArtifactState::Prepared { last_time_needed } => { + ArtifactState::Prepared { last_time_needed, .. } => { *last_time_needed = SystemTime::now(); // This artifact has already been prepared, send it to the execute queue. @@ -563,7 +563,7 @@ async fn handle_execute_pvf( awaiting_prepare.add(artifact_id, execution_timeout, params, result_tx); } - return Ok(()) + Ok(()) } async fn handle_heads_up( @@ -701,11 +701,12 @@ async fn handle_prepare_done( } *state = match result { - Ok(()) => ArtifactState::Prepared { last_time_needed: SystemTime::now() }, + Ok(cpu_time_elapsed) => + ArtifactState::Prepared { last_time_needed: SystemTime::now(), cpu_time_elapsed }, Err(error) => ArtifactState::FailedToProcess { last_time_failed: SystemTime::now(), num_failures: *num_failures + 1, - error: error.clone(), + error, }, }; @@ -780,7 +781,7 @@ fn can_retry_prepare_after_failure( // Gracefully returned an error, so it will probably be reproducible. Don't retry. Prevalidation(_) | Preparation(_) => false, // Retry if the retry cooldown has elapsed and if we have already retried less than - // `NUM_PREPARE_RETRIES` times. + // `NUM_PREPARE_RETRIES` times. IO errors may resolve themselves. Panic(_) | TimedOut | DidNotMakeIt => SystemTime::now() >= last_time_failed + PREPARE_FAILURE_COOLDOWN && num_failures <= NUM_PREPARE_RETRIES, @@ -1016,8 +1017,8 @@ mod tests { let mut builder = Builder::default(); builder.cleanup_pulse_interval = Duration::from_millis(100); builder.artifact_ttl = Duration::from_millis(500); - builder.artifacts.insert_prepared(artifact_id(1), mock_now); - builder.artifacts.insert_prepared(artifact_id(2), mock_now); + builder.artifacts.insert_prepared(artifact_id(1), mock_now, Duration::default()); + builder.artifacts.insert_prepared(artifact_id(2), mock_now, Duration::default()); let mut test = builder.build(); let mut host = test.host_handle(); @@ -1087,7 +1088,10 @@ mod tests { ); test.from_prepare_queue_tx - .send(prepare::FromQueue { artifact_id: artifact_id(1), result: Ok(()) }) + .send(prepare::FromQueue { + artifact_id: artifact_id(1), + result: Ok(Duration::default()), + }) .await .unwrap(); let result_tx_pvf_1_1 = assert_matches!( @@ -1100,7 +1104,10 @@ mod tests { ); test.from_prepare_queue_tx - .send(prepare::FromQueue { artifact_id: artifact_id(2), result: Ok(()) }) + .send(prepare::FromQueue { + artifact_id: artifact_id(2), + result: Ok(Duration::default()), + }) .await .unwrap(); let result_tx_pvf_2 = assert_matches!( @@ -1149,13 +1156,16 @@ mod tests { ); // Send `Ok` right away and poll the host. test.from_prepare_queue_tx - .send(prepare::FromQueue { artifact_id: artifact_id(1), result: Ok(()) }) + .send(prepare::FromQueue { + artifact_id: artifact_id(1), + result: Ok(Duration::default()), + }) .await .unwrap(); // No pending execute requests. test.poll_ensure_to_execute_queue_is_empty().await; // Received the precheck result. - assert_matches!(result_rx.now_or_never().unwrap().unwrap(), Ok(())); + assert_matches!(result_rx.now_or_never().unwrap().unwrap(), Ok(_)); // Send multiple requests for the same PVF. let mut precheck_receivers = Vec::new(); @@ -1253,7 +1263,10 @@ mod tests { prepare::ToQueue::Enqueue { .. } ); test.from_prepare_queue_tx - .send(prepare::FromQueue { artifact_id: artifact_id(2), result: Ok(()) }) + .send(prepare::FromQueue { + artifact_id: artifact_id(2), + result: Ok(Duration::default()), + }) .await .unwrap(); // The execute queue receives new request, preckecking is finished and we can @@ -1263,7 +1276,7 @@ mod tests { execute::ToQueue::Enqueue { .. } ); for result_rx in precheck_receivers { - assert_matches!(result_rx.now_or_never().unwrap().unwrap(), Ok(())); + assert_matches!(result_rx.now_or_never().unwrap().unwrap(), Ok(_)); } } @@ -1511,7 +1524,10 @@ mod tests { ); test.from_prepare_queue_tx - .send(prepare::FromQueue { artifact_id: artifact_id(1), result: Ok(()) }) + .send(prepare::FromQueue { + artifact_id: artifact_id(1), + result: Ok(Duration::default()), + }) .await .unwrap(); diff --git a/node/core/pvf/src/metrics.rs b/node/core/pvf/src/metrics.rs index 20965ec7dbd7..8db105d895ea 100644 --- a/node/core/pvf/src/metrics.rs +++ b/node/core/pvf/src/metrics.rs @@ -183,6 +183,9 @@ impl metrics::Metrics for Metrics { ).buckets(vec![ // This is synchronized with `APPROVAL_EXECUTION_TIMEOUT` and // `BACKING_EXECUTION_TIMEOUT` constants in `node/primitives/src/lib.rs` + 0.01, + 0.025, + 0.05, 0.1, 0.25, 0.5, @@ -192,6 +195,9 @@ impl metrics::Metrics for Metrics { 4.0, 5.0, 6.0, + 8.0, + 10.0, + 12.0, ]), )?, registry, diff --git a/node/core/pvf/src/prepare/queue.rs b/node/core/pvf/src/prepare/queue.rs index ae0757d80461..df0a8ec41883 100644 --- a/node/core/pvf/src/prepare/queue.rs +++ b/node/core/pvf/src/prepare/queue.rs @@ -364,16 +364,14 @@ async fn handle_worker_concluded( // the pool up to the hard cap. spawn_extra_worker(queue, false).await?; } + } else if queue.limits.should_cull(queue.workers.len() + queue.spawn_inflight) { + // We no longer need services of this worker. Kill it. + queue.workers.remove(worker); + send_pool(&mut queue.to_pool_tx, pool::ToPool::Kill(worker)).await?; } else { - if queue.limits.should_cull(queue.workers.len() + queue.spawn_inflight) { - // We no longer need services of this worker. Kill it. - queue.workers.remove(worker); - send_pool(&mut queue.to_pool_tx, pool::ToPool::Kill(worker)).await?; - } else { - // see if there are more work available and schedule it. - if let Some(job) = queue.unscheduled.next() { - assign(queue, worker, job).await?; - } + // see if there are more work available and schedule it. + if let Some(job) = queue.unscheduled.next() { + assign(queue, worker, job).await?; } } @@ -618,7 +616,11 @@ mod tests { let w = test.workers.insert(()); test.send_from_pool(pool::FromPool::Spawned(w)); - test.send_from_pool(pool::FromPool::Concluded { worker: w, rip: false, result: Ok(()) }); + test.send_from_pool(pool::FromPool::Concluded { + worker: w, + rip: false, + result: Ok(Duration::default()), + }); assert_eq!(test.poll_and_recv_from_queue().await.artifact_id, pvf(1).as_artifact_id()); } @@ -647,7 +649,11 @@ mod tests { assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. }); assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. }); - test.send_from_pool(pool::FromPool::Concluded { worker: w1, rip: false, result: Ok(()) }); + test.send_from_pool(pool::FromPool::Concluded { + worker: w1, + rip: false, + result: Ok(Duration::default()), + }); assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. }); @@ -693,7 +699,11 @@ mod tests { // That's a bit silly in this context, but in production there will be an entire pool up // to the `soft_capacity` of workers and it doesn't matter which one to cull. Either way, // we just check that edge case of an edge case works. - test.send_from_pool(pool::FromPool::Concluded { worker: w1, rip: false, result: Ok(()) }); + test.send_from_pool(pool::FromPool::Concluded { + worker: w1, + rip: false, + result: Ok(Duration::default()), + }); assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Kill(w1)); } @@ -719,7 +729,11 @@ mod tests { assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. }); // Conclude worker 1 and rip it. - test.send_from_pool(pool::FromPool::Concluded { worker: w1, rip: true, result: Ok(()) }); + test.send_from_pool(pool::FromPool::Concluded { + worker: w1, + rip: true, + result: Ok(Duration::default()), + }); // Since there is still work, the queue requested one extra worker to spawn to handle the // remaining enqueued work items. diff --git a/node/core/pvf/src/prepare/worker.rs b/node/core/pvf/src/prepare/worker.rs index 1cf512894740..4e0c411e45de 100644 --- a/node/core/pvf/src/prepare/worker.rs +++ b/node/core/pvf/src/prepare/worker.rs @@ -18,8 +18,9 @@ use crate::{ artifacts::CompiledArtifact, error::{PrepareError, PrepareResult}, worker_common::{ - bytes_to_path, framed_recv, framed_send, path_to_bytes, spawn_with_program_path, - tmpfile_in, worker_event_loop, IdleWorker, SpawnErr, WorkerHandle, + bytes_to_path, cpu_time_monitor_loop, framed_recv, framed_send, path_to_bytes, + spawn_with_program_path, tmpfile_in, worker_event_loop, IdleWorker, JobKind, SpawnErr, + WorkerHandle, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }, LOG_TARGET, }; @@ -27,10 +28,20 @@ use async_std::{ io, os::unix::net::UnixStream, path::{Path, PathBuf}, + task, }; +use cpu_time::ProcessTime; use parity_scale_codec::{Decode, Encode}; use sp_core::hexdisplay::HexDisplay; -use std::{panic, sync::Arc, time::Duration}; +use std::{ + panic, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + thread, + time::Duration, +}; /// Spawns a new worker with the given program path that acts as the worker and the spawn timeout. /// @@ -58,6 +69,13 @@ pub enum Outcome { DidNotMakeIt, } +#[derive(Debug)] +enum Selected { + Done(PrepareResult), + IoErr, + Deadline, +} + /// Given the idle token of a worker and parameters of work, communicates with the worker and /// returns the outcome. pub async fn start_work( @@ -77,7 +95,7 @@ pub async fn start_work( ); with_tmp_file(pid, cache_path, |tmp_file| async move { - if let Err(err) = send_request(&mut stream, code, &tmp_file).await { + if let Err(err) = send_request(&mut stream, code, &tmp_file, preparation_timeout).await { gum::warn!( target: LOG_TARGET, worker_pid = %pid, @@ -88,78 +106,52 @@ pub async fn start_work( } // Wait for the result from the worker, keeping in mind that there may be a timeout, the - // worker may get killed, or something along these lines. + // worker may get killed, or something along these lines. In that case we should propagate + // the error to the pool. // - // In that case we should propagate the error to the pool. + // We use a generous timeout here. This is in addition to the one in the child process, in + // case the child stalls. We have a wall clock timeout here in the host, but a CPU timeout + // in the child. We want to use CPU time because it varies less than wall clock time under + // load, but the CPU resources of the child can only be measured from the parent after the + // child process terminates. + let timeout = preparation_timeout * JOB_TIMEOUT_WALL_CLOCK_FACTOR; + let result = async_std::future::timeout(timeout, framed_recv(&mut stream)).await; - #[derive(Debug)] - enum Selected { - Done(PrepareResult), - IoErr, - Deadline, - } - - let selected = - match async_std::future::timeout(preparation_timeout, framed_recv(&mut stream)).await { - Ok(Ok(response_bytes)) => { - // Received bytes from worker within the time limit. - // By convention we expect encoded `PrepareResult`. - if let Ok(result) = PrepareResult::decode(&mut response_bytes.as_slice()) { - if result.is_ok() { - gum::debug!( - target: LOG_TARGET, - worker_pid = %pid, - "promoting WIP artifact {} to {}", - tmp_file.display(), - artifact_path.display(), - ); - - async_std::fs::rename(&tmp_file, &artifact_path) - .await - .map(|_| Selected::Done(result)) - .unwrap_or_else(|err| { - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - "failed to rename the artifact from {} to {}: {:?}", - tmp_file.display(), - artifact_path.display(), - err, - ); - Selected::IoErr - }) - } else { - Selected::Done(result) - } - } else { - // We received invalid bytes from the worker. - let bound_bytes = &response_bytes[..response_bytes.len().min(4)]; - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - "received unexpected response from the prepare worker: {}", - HexDisplay::from(&bound_bytes), - ); - Selected::IoErr - } - }, - Ok(Err(err)) => { - // Communication error within the time limit. - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - "failed to recv a prepare response: {:?}", - err, - ); - Selected::IoErr - }, - Err(_) => { - // Timed out. - Selected::Deadline - }, - }; + let selected = match result { + // Received bytes from worker within the time limit. + Ok(Ok(response_bytes)) => + handle_response_bytes( + response_bytes, + pid, + tmp_file, + artifact_path, + preparation_timeout, + ) + .await, + Ok(Err(err)) => { + // Communication error within the time limit. + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + "failed to recv a prepare response: {:?}", + err, + ); + Selected::IoErr + }, + Err(_) => { + // Timed out here on the host. + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + "did not recv a prepare response within the time limit", + ); + Selected::Deadline + }, + }; match selected { + // Timed out on the child. This should already be logged by the child. + Selected::Done(Err(PrepareError::TimedOut)) => Outcome::TimedOut, Selected::Done(result) => Outcome::Concluded { worker: IdleWorker { stream, pid }, result }, Selected::Deadline => Outcome::TimedOut, @@ -169,6 +161,76 @@ pub async fn start_work( .await } +/// Handles the case where we successfully received response bytes on the host from the child. +async fn handle_response_bytes( + response_bytes: Vec, + pid: u32, + tmp_file: PathBuf, + artifact_path: PathBuf, + preparation_timeout: Duration, +) -> Selected { + // By convention we expect encoded `PrepareResult`. + let result = match PrepareResult::decode(&mut response_bytes.as_slice()) { + Ok(result) => result, + Err(_) => { + // We received invalid bytes from the worker. + let bound_bytes = &response_bytes[..response_bytes.len().min(4)]; + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + "received unexpected response from the prepare worker: {}", + HexDisplay::from(&bound_bytes), + ); + return Selected::IoErr + }, + }; + let cpu_time_elapsed = match result { + Ok(result) => result, + Err(_) => return Selected::Done(result), + }; + + if cpu_time_elapsed > preparation_timeout { + // The job didn't complete within the timeout. + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + "prepare job took {}ms cpu time, exceeded preparation timeout {}ms. Clearing WIP artifact {}", + cpu_time_elapsed.as_millis(), + preparation_timeout.as_millis(), + tmp_file.display(), + ); + + // Return a timeout error. + // + // NOTE: The artifact exists, but is located in a temporary file which + // will be cleared by `with_tmp_file`. + return Selected::Deadline + } + + gum::debug!( + target: LOG_TARGET, + worker_pid = %pid, + "promoting WIP artifact {} to {}", + tmp_file.display(), + artifact_path.display(), + ); + + async_std::fs::rename(&tmp_file, &artifact_path) + .await + .map(|_| Selected::Done(result)) + .unwrap_or_else(|err| { + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + "failed to rename the artifact from {} to {}: {:?}", + tmp_file.display(), + artifact_path.display(), + err, + ); + Selected::IoErr + }) +} + /// Create a temporary file for an artifact at the given cache path and execute the given /// future/closure passing the file path in. /// @@ -218,13 +280,15 @@ async fn send_request( stream: &mut UnixStream, code: Arc>, tmp_file: &Path, + preparation_timeout: Duration, ) -> io::Result<()> { - framed_send(stream, &*code).await?; + framed_send(stream, &code).await?; framed_send(stream, path_to_bytes(tmp_file)).await?; + framed_send(stream, &preparation_timeout.encode()).await?; Ok(()) } -async fn recv_request(stream: &mut UnixStream) -> io::Result<(Vec, PathBuf)> { +async fn recv_request(stream: &mut UnixStream) -> io::Result<(Vec, PathBuf, Duration)> { let code = framed_recv(stream).await?; let tmp_file = framed_recv(stream).await?; let tmp_file = bytes_to_path(&tmp_file).ok_or_else(|| { @@ -233,7 +297,14 @@ async fn recv_request(stream: &mut UnixStream) -> io::Result<(Vec, PathBuf)> "prepare pvf recv_request: non utf-8 artifact path".to_string(), ) })?; - Ok((code, tmp_file)) + let preparation_timeout = framed_recv(stream).await?; + let preparation_timeout = Duration::decode(&mut &preparation_timeout[..]).map_err(|_| { + io::Error::new( + io::ErrorKind::Other, + "prepare pvf recv_request: failed to decode duration".to_string(), + ) + })?; + Ok((code, tmp_file, preparation_timeout)) } /// The entrypoint that the spawned prepare worker should start with. The `socket_path` specifies @@ -241,7 +312,7 @@ async fn recv_request(stream: &mut UnixStream) -> io::Result<(Vec, PathBuf)> pub fn worker_entrypoint(socket_path: &str) { worker_event_loop("prepare", socket_path, |mut stream| async move { loop { - let (code, dest) = recv_request(&mut stream).await?; + let (code, dest, preparation_timeout) = recv_request(&mut stream).await?; gum::debug!( target: LOG_TARGET, @@ -249,18 +320,54 @@ pub fn worker_entrypoint(socket_path: &str) { "worker: preparing artifact", ); - let result = match prepare_artifact(&code) { + // Create a lock flag. We set it when either thread finishes. + let lock = Arc::new(AtomicBool::new(false)); + let cpu_time_start = ProcessTime::now(); + + // Spawn a new thread that runs the CPU time monitor. Continuously wakes up from + // sleeping and then either sleeps for the remaining CPU time, or kills the process if + // we exceed the CPU timeout. + let (stream_2, cpu_time_start_2, preparation_timeout_2, lock_2) = + (stream.clone(), cpu_time_start, preparation_timeout, lock.clone()); + let handle = + thread::Builder::new().name("CPU time monitor".into()).spawn(move || { + task::block_on(async { + cpu_time_monitor_loop( + JobKind::Prepare, + stream_2, + cpu_time_start_2, + preparation_timeout_2, + lock_2, + ) + .await; + }) + })?; + + // Prepares the artifact in a separate thread. + let result = match prepare_artifact(&code).await { Err(err) => { // Serialized error will be written into the socket. Err(err) }, Ok(compiled_artifact) => { + let cpu_time_elapsed = cpu_time_start.elapsed(); + + let lock_result = + lock.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed); + if lock_result.is_err() { + // The other thread is still sending an error response over the socket. Wait on it and + // return. + let _ = handle.join(); + // Monitor thread detected timeout and likely already terminated the + // process, nothing to do. + continue + } + // Write the serialized artifact into a temp file. - // PVF host only keeps artifacts statuses in its memory, - // successfully compiled code gets stored on the disk (and - // consequently deserialized by execute-workers). The prepare - // worker is only required to send an empty `Ok` to the pool - // to indicate the success. + // + // PVF host only keeps artifacts statuses in its memory, successfully compiled code gets stored + // on the disk (and consequently deserialized by execute-workers). The prepare worker is only + // required to send `Ok` to the pool to indicate the success. gum::debug!( target: LOG_TARGET, @@ -270,7 +377,7 @@ pub fn worker_entrypoint(socket_path: &str) { ); async_std::fs::write(&dest, &compiled_artifact).await?; - Ok(()) + Ok(cpu_time_elapsed) }, }; @@ -279,7 +386,7 @@ pub fn worker_entrypoint(socket_path: &str) { }); } -fn prepare_artifact(code: &[u8]) -> Result { +async fn prepare_artifact(code: &[u8]) -> Result { panic::catch_unwind(|| { let blob = match crate::executor_intf::prevalidate(code) { Err(err) => return Err(PrepareError::Prevalidation(format!("{:?}", err))), diff --git a/node/core/pvf/src/testing.rs b/node/core/pvf/src/testing.rs index 3b64d130fc6a..cbd37b06d403 100644 --- a/node/core/pvf/src/testing.rs +++ b/node/core/pvf/src/testing.rs @@ -34,7 +34,7 @@ pub fn validate_candidate( let code = sp_maybe_compressed_blob::decompress(code, 10 * 1024 * 1024) .expect("Decompressing code failed"); - let blob = prevalidate(&*code)?; + let blob = prevalidate(&code)?; let artifact = prepare(blob)?; let tmpdir = tempfile::tempdir()?; let artifact_path = tmpdir.path().join("blob"); diff --git a/node/core/pvf/src/worker_common.rs b/node/core/pvf/src/worker_common.rs index 572e3717832b..55c91a64424d 100644 --- a/node/core/pvf/src/worker_common.rs +++ b/node/core/pvf/src/worker_common.rs @@ -16,25 +16,54 @@ //! Common logic for implementation of worker processes. -use crate::LOG_TARGET; +use crate::{execute::ExecuteResponse, PrepareError, LOG_TARGET}; use async_std::{ io, os::unix::net::{UnixListener, UnixStream}, path::{Path, PathBuf}, }; +use cpu_time::ProcessTime; use futures::{ never::Never, AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _, FutureExt as _, }; use futures_timer::Delay; +use parity_scale_codec::Encode; use pin_project::pin_project; use rand::Rng; use std::{ fmt, mem, pin::Pin, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, task::{Context, Poll}, time::Duration, }; +/// A multiple of the job timeout (in CPU time) for which we are willing to wait on the host (in +/// wall clock time). This is lenient because CPU time may go slower than wall clock time. +pub const JOB_TIMEOUT_WALL_CLOCK_FACTOR: u32 = 4; + +/// Some allowed overhead that we account for in the "CPU time monitor" thread's sleeps, on the +/// child process. +pub const JOB_TIMEOUT_OVERHEAD: Duration = Duration::from_millis(50); + +#[derive(Copy, Clone, Debug)] +pub enum JobKind { + Prepare, + Execute, +} + +impl fmt::Display for JobKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Prepare => write!(f, "prepare"), + Self::Execute => write!(f, "execute"), + } + } +} + /// This is publicly exposed only for integration tests. #[doc(hidden)] pub async fn spawn_with_program_path( @@ -169,6 +198,74 @@ where ); } +/// Loop that runs in the CPU time monitor thread on prepare and execute jobs. Continuously wakes up +/// from sleeping and then either sleeps for the remaining CPU time, or kills the process if we +/// exceed the CPU timeout. +/// +/// NOTE: Killed processes are detected and cleaned up in `purge_dead`. +/// +/// NOTE: If the job completes and this thread is still sleeping, it will continue sleeping in the +/// background. When it wakes, it will see that the flag has been set and return. +pub async fn cpu_time_monitor_loop( + job_kind: JobKind, + mut stream: UnixStream, + cpu_time_start: ProcessTime, + timeout: Duration, + lock: Arc, +) { + loop { + let cpu_time_elapsed = cpu_time_start.elapsed(); + + // Treat the timeout as CPU time, which is less subject to variance due to load. + if cpu_time_elapsed > timeout { + let result = lock.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed); + if result.is_err() { + // Hit the job-completed case first, return from this thread. + return + } + + // Log if we exceed the timeout. + gum::warn!( + target: LOG_TARGET, + worker_pid = %std::process::id(), + "{job_kind} job took {}ms cpu time, exceeded {job_kind} timeout {}ms", + cpu_time_elapsed.as_millis(), + timeout.as_millis(), + ); + + // Send back a TimedOut error on timeout. + let encoded_result = match job_kind { + JobKind::Prepare => { + let result: Result<(), PrepareError> = Err(PrepareError::TimedOut); + result.encode() + }, + JobKind::Execute => { + let result = ExecuteResponse::TimedOut; + result.encode() + }, + }; + // If we error there is nothing else we can do here, and we are killing the process, + // anyway. The receiving side will just have to time out. + if let Err(err) = framed_send(&mut stream, encoded_result.as_slice()).await { + gum::warn!( + target: LOG_TARGET, + worker_pid = %std::process::id(), + "{job_kind} worker -> pvf host: error sending result over the socket: {:?}", + err + ); + } + + // Kill the process. + std::process::exit(1); + } + + // Sleep for the remaining CPU time, plus a bit to account for overhead. Note that the sleep + // is wall clock time. The CPU clock may be slower than the wall clock. + let sleep_interval = timeout - cpu_time_elapsed + JOB_TIMEOUT_OVERHEAD; + std::thread::sleep(sleep_interval); + } +} + /// A struct that represents an idle worker. /// /// This struct is supposed to be used as a token that is passed by move into a subroutine that @@ -200,8 +297,8 @@ pub enum SpawnErr { /// This is a representation of a potentially running worker. Drop it and the process will be killed. /// /// A worker's handle is also a future that resolves when it's detected that the worker's process -/// has been terminated. Since the worker is running in another process it is obviously not necessary -/// to poll this future to make the worker run, it's only for termination detection. +/// has been terminated. Since the worker is running in another process it is obviously not +/// necessary to poll this future to make the worker run, it's only for termination detection. /// /// This future relies on the fact that a child process's stdout `fd` is closed upon it's termination. #[pin_project] diff --git a/node/core/pvf/tests/it/adder.rs b/node/core/pvf/tests/it/adder.rs index 83cbd27b6ed5..69b6b7d21979 100644 --- a/node/core/pvf/tests/it/adder.rs +++ b/node/core/pvf/tests/it/adder.rs @@ -23,7 +23,7 @@ use polkadot_parachain::primitives::{ }; #[async_std::test] -async fn execute_good_on_parent() { +async fn execute_good_block_on_parent() { let parent_head = HeadData { number: 0, parent_hash: [0; 32], post_state: hash_state(0) }; let block_data = BlockData { state: 0, add: 512 }; @@ -89,7 +89,7 @@ async fn execute_good_chain_on_parent() { } #[async_std::test] -async fn execute_bad_on_parent() { +async fn execute_bad_block_on_parent() { let parent_head = HeadData { number: 0, parent_hash: [0; 32], post_state: hash_state(0) }; let block_data = BlockData { diff --git a/node/core/pvf/tests/it/main.rs b/node/core/pvf/tests/it/main.rs index bf0983d50874..a6aaf5d369d4 100644 --- a/node/core/pvf/tests/it/main.rs +++ b/node/core/pvf/tests/it/main.rs @@ -101,6 +101,7 @@ async fn terminates_on_timeout() { #[async_std::test] async fn parallel_execution() { + // Run some jobs that do not complete, thus timing out. let host = TestHost::new(); let execute_pvf_future_1 = host.validate_candidate( halt::wasm_binary_unwrap(), @@ -124,11 +125,14 @@ async fn parallel_execution() { let start = std::time::Instant::now(); let (_, _) = futures::join!(execute_pvf_future_1, execute_pvf_future_2); - // total time should be < 2 x EXECUTION_TIMEOUT_SEC - const EXECUTION_TIMEOUT_SEC: u64 = 3; + // Total time should be < 2 x TEST_EXECUTION_TIMEOUT (two workers run in parallel). + let duration = std::time::Instant::now().duration_since(start); + let max_duration = 2 * TEST_EXECUTION_TIMEOUT; assert!( - std::time::Instant::now().duration_since(start) < - std::time::Duration::from_secs(EXECUTION_TIMEOUT_SEC * 2) + duration < max_duration, + "Expected duration {}ms to be less than {}ms", + duration.as_millis(), + max_duration.as_millis() ); } @@ -141,6 +145,7 @@ async fn execute_queue_doesnt_stall_if_workers_died() { // Here we spawn 8 validation jobs for the `halt` PVF and share those between 5 workers. The // first five jobs should timeout and the workers killed. For the next 3 jobs a new batch of // workers should be spun up. + let start = std::time::Instant::now(); futures::future::join_all((0u8..=8).map(|_| { host.validate_candidate( halt::wasm_binary_unwrap(), @@ -153,4 +158,15 @@ async fn execute_queue_doesnt_stall_if_workers_died() { ) })) .await; + + // Total time should be >= 2 x TEST_EXECUTION_TIMEOUT (two separate sets of workers that should + // both timeout). + let duration = std::time::Instant::now().duration_since(start); + let max_duration = 2 * TEST_EXECUTION_TIMEOUT; + assert!( + duration >= max_duration, + "Expected duration {}ms to be greater than or equal to {}ms", + duration.as_millis(), + max_duration.as_millis() + ); } diff --git a/node/core/runtime-api/Cargo.toml b/node/core/runtime-api/Cargo.toml index ff7ea662603c..e828d7c4c7dd 100644 --- a/node/core/runtime-api/Cargo.toml +++ b/node/core/runtime-api/Cargo.toml @@ -1,14 +1,13 @@ [package] name = "polkadot-node-core-runtime-api" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" [dependencies] futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } -memory-lru = "0.1.1" -parity-util-mem = { version = "0.12.0", default-features = false } +lru = "0.8" sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/core/runtime-api/src/cache.rs b/node/core/runtime-api/src/cache.rs index 0fe9b74dc86d..d202b46d0da3 100644 --- a/node/core/runtime-api/src/cache.rs +++ b/node/core/runtime-api/src/cache.rs @@ -14,10 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::collections::btree_map::BTreeMap; +use std::{collections::btree_map::BTreeMap, num::NonZeroUsize}; -use memory_lru::{MemoryLruCache, ResidentSize}; -use parity_util_mem::{MallocSizeOf, MallocSizeOfExt}; +use lru::LruCache; use sp_consensus_babe::Epoch; use polkadot_primitives::v2::{ @@ -28,126 +27,67 @@ use polkadot_primitives::v2::{ ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; -const AUTHORITIES_CACHE_SIZE: usize = 128 * 1024; -const VALIDATORS_CACHE_SIZE: usize = 64 * 1024; -const VALIDATOR_GROUPS_CACHE_SIZE: usize = 64 * 1024; -const AVAILABILITY_CORES_CACHE_SIZE: usize = 64 * 1024; -const PERSISTED_VALIDATION_DATA_CACHE_SIZE: usize = 64 * 1024; -const ASSUMED_VALIDATION_DATA_CACHE_SIZE: usize = 64 * 1024; -const CHECK_VALIDATION_OUTPUTS_CACHE_SIZE: usize = 64 * 1024; -const SESSION_INDEX_FOR_CHILD_CACHE_SIZE: usize = 64 * 1024; -const VALIDATION_CODE_CACHE_SIZE: usize = 10 * 1024 * 1024; -const CANDIDATE_PENDING_AVAILABILITY_CACHE_SIZE: usize = 64 * 1024; -const CANDIDATE_EVENTS_CACHE_SIZE: usize = 64 * 1024; -const SESSION_INFO_CACHE_SIZE: usize = 64 * 1024; -const DMQ_CONTENTS_CACHE_SIZE: usize = 64 * 1024; -const INBOUND_HRMP_CHANNELS_CACHE_SIZE: usize = 64 * 1024; -const CURRENT_BABE_EPOCH_CACHE_SIZE: usize = 64 * 1024; -const ON_CHAIN_VOTES_CACHE_SIZE: usize = 3 * 1024; -const PVFS_REQUIRE_PRECHECK_SIZE: usize = 1024; -const VALIDATION_CODE_HASH_CACHE_SIZE: usize = 64 * 1024; -const VERSION_CACHE_SIZE: usize = 4 * 1024; -const DISPUTES_CACHE_SIZE: usize = 64 * 1024; - -struct ResidentSizeOf(T); - -impl ResidentSize for ResidentSizeOf { - fn resident_size(&self) -> usize { - std::mem::size_of::() + self.0.malloc_size_of() - } -} - -struct DoesNotAllocate(T); - -impl ResidentSize for DoesNotAllocate { - fn resident_size(&self) -> usize { - std::mem::size_of::() - } -} - -// this is an ugly workaround for `AuthorityDiscoveryId` -// not implementing `MallocSizeOf` -struct VecOfDoesNotAllocate(Vec); - -impl ResidentSize for VecOfDoesNotAllocate { - fn resident_size(&self) -> usize { - std::mem::size_of::() * self.0.capacity() - } -} +/// For consistency we have the same capacity for all caches. We use 128 as we'll only need that +/// much if finality stalls (we only query state for unfinalized blocks + maybe latest finalized). +/// In any case, a cache is an optimization. We should avoid a situation where having a large cache +/// leads to OOM or puts pressure on other important stuff like PVF execution/preparation. +const DEFAULT_CACHE_CAP: NonZeroUsize = match NonZeroUsize::new(128) { + Some(cap) => cap, + None => panic!("lru capacity must be non-zero"), +}; pub(crate) struct RequestResultCache { - authorities: MemoryLruCache>, - validators: MemoryLruCache>>, - validator_groups: - MemoryLruCache>, GroupRotationInfo)>>, - availability_cores: MemoryLruCache>>, - persisted_validation_data: MemoryLruCache< - (Hash, ParaId, OccupiedCoreAssumption), - ResidentSizeOf>, - >, - assumed_validation_data: MemoryLruCache< - (ParaId, Hash), - ResidentSizeOf>, - >, - check_validation_outputs: - MemoryLruCache<(Hash, ParaId, CandidateCommitments), ResidentSizeOf>, - session_index_for_child: MemoryLruCache>, - validation_code: MemoryLruCache< - (Hash, ParaId, OccupiedCoreAssumption), - ResidentSizeOf>, - >, - validation_code_by_hash: - MemoryLruCache>>, - candidate_pending_availability: - MemoryLruCache<(Hash, ParaId), ResidentSizeOf>>, - candidate_events: MemoryLruCache>>, - session_info: MemoryLruCache>, - dmq_contents: - MemoryLruCache<(Hash, ParaId), ResidentSizeOf>>>, - inbound_hrmp_channels_contents: MemoryLruCache< - (Hash, ParaId), - ResidentSizeOf>>>, - >, - current_babe_epoch: MemoryLruCache>, - on_chain_votes: MemoryLruCache>>, - pvfs_require_precheck: MemoryLruCache>>, - validation_code_hash: MemoryLruCache< - (Hash, ParaId, OccupiedCoreAssumption), - ResidentSizeOf>, - >, - version: MemoryLruCache>, - disputes: MemoryLruCache< - Hash, - ResidentSizeOf)>>, - >, + authorities: LruCache>, + validators: LruCache>, + validator_groups: LruCache>, GroupRotationInfo)>, + availability_cores: LruCache>, + persisted_validation_data: + LruCache<(Hash, ParaId, OccupiedCoreAssumption), Option>, + assumed_validation_data: + LruCache<(ParaId, Hash), Option<(PersistedValidationData, ValidationCodeHash)>>, + check_validation_outputs: LruCache<(Hash, ParaId, CandidateCommitments), bool>, + session_index_for_child: LruCache, + validation_code: LruCache<(Hash, ParaId, OccupiedCoreAssumption), Option>, + validation_code_by_hash: LruCache>, + candidate_pending_availability: LruCache<(Hash, ParaId), Option>, + candidate_events: LruCache>, + session_info: LruCache, + dmq_contents: LruCache<(Hash, ParaId), Vec>>, + inbound_hrmp_channels_contents: + LruCache<(Hash, ParaId), BTreeMap>>>, + current_babe_epoch: LruCache, + on_chain_votes: LruCache>, + pvfs_require_precheck: LruCache>, + validation_code_hash: + LruCache<(Hash, ParaId, OccupiedCoreAssumption), Option>, + version: LruCache, + disputes: LruCache)>>, } impl Default for RequestResultCache { fn default() -> Self { Self { - authorities: MemoryLruCache::new(AUTHORITIES_CACHE_SIZE), - validators: MemoryLruCache::new(VALIDATORS_CACHE_SIZE), - validator_groups: MemoryLruCache::new(VALIDATOR_GROUPS_CACHE_SIZE), - availability_cores: MemoryLruCache::new(AVAILABILITY_CORES_CACHE_SIZE), - persisted_validation_data: MemoryLruCache::new(PERSISTED_VALIDATION_DATA_CACHE_SIZE), - assumed_validation_data: MemoryLruCache::new(ASSUMED_VALIDATION_DATA_CACHE_SIZE), - check_validation_outputs: MemoryLruCache::new(CHECK_VALIDATION_OUTPUTS_CACHE_SIZE), - session_index_for_child: MemoryLruCache::new(SESSION_INDEX_FOR_CHILD_CACHE_SIZE), - validation_code: MemoryLruCache::new(VALIDATION_CODE_CACHE_SIZE), - validation_code_by_hash: MemoryLruCache::new(VALIDATION_CODE_CACHE_SIZE), - candidate_pending_availability: MemoryLruCache::new( - CANDIDATE_PENDING_AVAILABILITY_CACHE_SIZE, - ), - candidate_events: MemoryLruCache::new(CANDIDATE_EVENTS_CACHE_SIZE), - session_info: MemoryLruCache::new(SESSION_INFO_CACHE_SIZE), - dmq_contents: MemoryLruCache::new(DMQ_CONTENTS_CACHE_SIZE), - inbound_hrmp_channels_contents: MemoryLruCache::new(INBOUND_HRMP_CHANNELS_CACHE_SIZE), - current_babe_epoch: MemoryLruCache::new(CURRENT_BABE_EPOCH_CACHE_SIZE), - on_chain_votes: MemoryLruCache::new(ON_CHAIN_VOTES_CACHE_SIZE), - pvfs_require_precheck: MemoryLruCache::new(PVFS_REQUIRE_PRECHECK_SIZE), - validation_code_hash: MemoryLruCache::new(VALIDATION_CODE_HASH_CACHE_SIZE), - version: MemoryLruCache::new(VERSION_CACHE_SIZE), - disputes: MemoryLruCache::new(DISPUTES_CACHE_SIZE), + authorities: LruCache::new(DEFAULT_CACHE_CAP), + validators: LruCache::new(DEFAULT_CACHE_CAP), + validator_groups: LruCache::new(DEFAULT_CACHE_CAP), + availability_cores: LruCache::new(DEFAULT_CACHE_CAP), + persisted_validation_data: LruCache::new(DEFAULT_CACHE_CAP), + assumed_validation_data: LruCache::new(DEFAULT_CACHE_CAP), + check_validation_outputs: LruCache::new(DEFAULT_CACHE_CAP), + session_index_for_child: LruCache::new(DEFAULT_CACHE_CAP), + validation_code: LruCache::new(DEFAULT_CACHE_CAP), + validation_code_by_hash: LruCache::new(DEFAULT_CACHE_CAP), + candidate_pending_availability: LruCache::new(DEFAULT_CACHE_CAP), + candidate_events: LruCache::new(DEFAULT_CACHE_CAP), + session_info: LruCache::new(DEFAULT_CACHE_CAP), + dmq_contents: LruCache::new(DEFAULT_CACHE_CAP), + inbound_hrmp_channels_contents: LruCache::new(DEFAULT_CACHE_CAP), + current_babe_epoch: LruCache::new(DEFAULT_CACHE_CAP), + on_chain_votes: LruCache::new(DEFAULT_CACHE_CAP), + pvfs_require_precheck: LruCache::new(DEFAULT_CACHE_CAP), + validation_code_hash: LruCache::new(DEFAULT_CACHE_CAP), + version: LruCache::new(DEFAULT_CACHE_CAP), + disputes: LruCache::new(DEFAULT_CACHE_CAP), } } } @@ -157,7 +97,7 @@ impl RequestResultCache { &mut self, relay_parent: &Hash, ) -> Option<&Vec> { - self.authorities.get(relay_parent).map(|v| &v.0) + self.authorities.get(relay_parent) } pub(crate) fn cache_authorities( @@ -165,22 +105,22 @@ impl RequestResultCache { relay_parent: Hash, authorities: Vec, ) { - self.authorities.insert(relay_parent, VecOfDoesNotAllocate(authorities)); + self.authorities.put(relay_parent, authorities); } pub(crate) fn validators(&mut self, relay_parent: &Hash) -> Option<&Vec> { - self.validators.get(relay_parent).map(|v| &v.0) + self.validators.get(relay_parent) } pub(crate) fn cache_validators(&mut self, relay_parent: Hash, validators: Vec) { - self.validators.insert(relay_parent, ResidentSizeOf(validators)); + self.validators.put(relay_parent, validators); } pub(crate) fn validator_groups( &mut self, relay_parent: &Hash, ) -> Option<&(Vec>, GroupRotationInfo)> { - self.validator_groups.get(relay_parent).map(|v| &v.0) + self.validator_groups.get(relay_parent) } pub(crate) fn cache_validator_groups( @@ -188,22 +128,22 @@ impl RequestResultCache { relay_parent: Hash, groups: (Vec>, GroupRotationInfo), ) { - self.validator_groups.insert(relay_parent, ResidentSizeOf(groups)); + self.validator_groups.put(relay_parent, groups); } pub(crate) fn availability_cores(&mut self, relay_parent: &Hash) -> Option<&Vec> { - self.availability_cores.get(relay_parent).map(|v| &v.0) + self.availability_cores.get(relay_parent) } pub(crate) fn cache_availability_cores(&mut self, relay_parent: Hash, cores: Vec) { - self.availability_cores.insert(relay_parent, ResidentSizeOf(cores)); + self.availability_cores.put(relay_parent, cores); } pub(crate) fn persisted_validation_data( &mut self, key: (Hash, ParaId, OccupiedCoreAssumption), ) -> Option<&Option> { - self.persisted_validation_data.get(&key).map(|v| &v.0) + self.persisted_validation_data.get(&key) } pub(crate) fn cache_persisted_validation_data( @@ -211,14 +151,14 @@ impl RequestResultCache { key: (Hash, ParaId, OccupiedCoreAssumption), data: Option, ) { - self.persisted_validation_data.insert(key, ResidentSizeOf(data)); + self.persisted_validation_data.put(key, data); } pub(crate) fn assumed_validation_data( &mut self, key: (Hash, ParaId, Hash), ) -> Option<&Option<(PersistedValidationData, ValidationCodeHash)>> { - self.assumed_validation_data.get(&(key.1, key.2)).map(|v| &v.0) + self.assumed_validation_data.get(&(key.1, key.2)) } pub(crate) fn cache_assumed_validation_data( @@ -226,14 +166,14 @@ impl RequestResultCache { key: (ParaId, Hash), data: Option<(PersistedValidationData, ValidationCodeHash)>, ) { - self.assumed_validation_data.insert(key, ResidentSizeOf(data)); + self.assumed_validation_data.put(key, data); } pub(crate) fn check_validation_outputs( &mut self, key: (Hash, ParaId, CandidateCommitments), ) -> Option<&bool> { - self.check_validation_outputs.get(&key).map(|v| &v.0) + self.check_validation_outputs.get(&key) } pub(crate) fn cache_check_validation_outputs( @@ -241,11 +181,11 @@ impl RequestResultCache { key: (Hash, ParaId, CandidateCommitments), value: bool, ) { - self.check_validation_outputs.insert(key, ResidentSizeOf(value)); + self.check_validation_outputs.put(key, value); } pub(crate) fn session_index_for_child(&mut self, relay_parent: &Hash) -> Option<&SessionIndex> { - self.session_index_for_child.get(relay_parent).map(|v| &v.0) + self.session_index_for_child.get(relay_parent) } pub(crate) fn cache_session_index_for_child( @@ -253,14 +193,14 @@ impl RequestResultCache { relay_parent: Hash, index: SessionIndex, ) { - self.session_index_for_child.insert(relay_parent, ResidentSizeOf(index)); + self.session_index_for_child.put(relay_parent, index); } pub(crate) fn validation_code( &mut self, key: (Hash, ParaId, OccupiedCoreAssumption), ) -> Option<&Option> { - self.validation_code.get(&key).map(|v| &v.0) + self.validation_code.get(&key) } pub(crate) fn cache_validation_code( @@ -268,7 +208,7 @@ impl RequestResultCache { key: (Hash, ParaId, OccupiedCoreAssumption), value: Option, ) { - self.validation_code.insert(key, ResidentSizeOf(value)); + self.validation_code.put(key, value); } // the actual key is `ValidationCodeHash` (`Hash` is ignored), @@ -277,7 +217,7 @@ impl RequestResultCache { &mut self, key: (Hash, ValidationCodeHash), ) -> Option<&Option> { - self.validation_code_by_hash.get(&key.1).map(|v| &v.0) + self.validation_code_by_hash.get(&key.1) } pub(crate) fn cache_validation_code_by_hash( @@ -285,14 +225,14 @@ impl RequestResultCache { key: ValidationCodeHash, value: Option, ) { - self.validation_code_by_hash.insert(key, ResidentSizeOf(value)); + self.validation_code_by_hash.put(key, value); } pub(crate) fn candidate_pending_availability( &mut self, key: (Hash, ParaId), ) -> Option<&Option> { - self.candidate_pending_availability.get(&key).map(|v| &v.0) + self.candidate_pending_availability.get(&key) } pub(crate) fn cache_candidate_pending_availability( @@ -300,11 +240,11 @@ impl RequestResultCache { key: (Hash, ParaId), value: Option, ) { - self.candidate_pending_availability.insert(key, ResidentSizeOf(value)); + self.candidate_pending_availability.put(key, value); } pub(crate) fn candidate_events(&mut self, relay_parent: &Hash) -> Option<&Vec> { - self.candidate_events.get(relay_parent).map(|v| &v.0) + self.candidate_events.get(relay_parent) } pub(crate) fn cache_candidate_events( @@ -312,22 +252,22 @@ impl RequestResultCache { relay_parent: Hash, events: Vec, ) { - self.candidate_events.insert(relay_parent, ResidentSizeOf(events)); + self.candidate_events.put(relay_parent, events); } pub(crate) fn session_info(&mut self, key: SessionIndex) -> Option<&SessionInfo> { - self.session_info.get(&key).map(|v| &v.0) + self.session_info.get(&key) } pub(crate) fn cache_session_info(&mut self, key: SessionIndex, value: SessionInfo) { - self.session_info.insert(key, ResidentSizeOf(value)); + self.session_info.put(key, value); } pub(crate) fn dmq_contents( &mut self, key: (Hash, ParaId), ) -> Option<&Vec>> { - self.dmq_contents.get(&key).map(|v| &v.0) + self.dmq_contents.get(&key) } pub(crate) fn cache_dmq_contents( @@ -335,14 +275,14 @@ impl RequestResultCache { key: (Hash, ParaId), value: Vec>, ) { - self.dmq_contents.insert(key, ResidentSizeOf(value)); + self.dmq_contents.put(key, value); } pub(crate) fn inbound_hrmp_channels_contents( &mut self, key: (Hash, ParaId), ) -> Option<&BTreeMap>>> { - self.inbound_hrmp_channels_contents.get(&key).map(|v| &v.0) + self.inbound_hrmp_channels_contents.get(&key) } pub(crate) fn cache_inbound_hrmp_channel_contents( @@ -350,22 +290,22 @@ impl RequestResultCache { key: (Hash, ParaId), value: BTreeMap>>, ) { - self.inbound_hrmp_channels_contents.insert(key, ResidentSizeOf(value)); + self.inbound_hrmp_channels_contents.put(key, value); } pub(crate) fn current_babe_epoch(&mut self, relay_parent: &Hash) -> Option<&Epoch> { - self.current_babe_epoch.get(relay_parent).map(|v| &v.0) + self.current_babe_epoch.get(relay_parent) } pub(crate) fn cache_current_babe_epoch(&mut self, relay_parent: Hash, epoch: Epoch) { - self.current_babe_epoch.insert(relay_parent, DoesNotAllocate(epoch)); + self.current_babe_epoch.put(relay_parent, epoch); } pub(crate) fn on_chain_votes( &mut self, relay_parent: &Hash, ) -> Option<&Option> { - self.on_chain_votes.get(relay_parent).map(|v| &v.0) + self.on_chain_votes.get(relay_parent) } pub(crate) fn cache_on_chain_votes( @@ -373,14 +313,14 @@ impl RequestResultCache { relay_parent: Hash, scraped: Option, ) { - self.on_chain_votes.insert(relay_parent, ResidentSizeOf(scraped)); + self.on_chain_votes.put(relay_parent, scraped); } pub(crate) fn pvfs_require_precheck( &mut self, relay_parent: &Hash, ) -> Option<&Vec> { - self.pvfs_require_precheck.get(relay_parent).map(|v| &v.0) + self.pvfs_require_precheck.get(relay_parent) } pub(crate) fn cache_pvfs_require_precheck( @@ -388,14 +328,14 @@ impl RequestResultCache { relay_parent: Hash, pvfs: Vec, ) { - self.pvfs_require_precheck.insert(relay_parent, ResidentSizeOf(pvfs)) + self.pvfs_require_precheck.put(relay_parent, pvfs); } pub(crate) fn validation_code_hash( &mut self, key: (Hash, ParaId, OccupiedCoreAssumption), ) -> Option<&Option> { - self.validation_code_hash.get(&key).map(|v| &v.0) + self.validation_code_hash.get(&key) } pub(crate) fn cache_validation_code_hash( @@ -403,22 +343,22 @@ impl RequestResultCache { key: (Hash, ParaId, OccupiedCoreAssumption), value: Option, ) { - self.validation_code_hash.insert(key, ResidentSizeOf(value)); + self.validation_code_hash.put(key, value); } pub(crate) fn version(&mut self, relay_parent: &Hash) -> Option<&u32> { - self.version.get(&relay_parent).map(|v| &v.0) + self.version.get(relay_parent) } pub(crate) fn cache_version(&mut self, key: Hash, value: u32) { - self.version.insert(key, ResidentSizeOf(value)); + self.version.put(key, value); } pub(crate) fn disputes( &mut self, relay_parent: &Hash, ) -> Option<&Vec<(SessionIndex, CandidateHash, DisputeState)>> { - self.disputes.get(relay_parent).map(|v| &v.0) + self.disputes.get(relay_parent) } pub(crate) fn cache_disputes( @@ -426,7 +366,7 @@ impl RequestResultCache { relay_parent: Hash, value: Vec<(SessionIndex, CandidateHash, DisputeState)>, ) { - self.disputes.insert(relay_parent, ResidentSizeOf(value)); + self.disputes.put(relay_parent, value); } } diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index 36355b5759e6..de42ace3af0c 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -268,7 +268,7 @@ where let (sender, receiver) = oneshot::channel(); // TODO: make the cache great again https://github.com/paritytech/polkadot/issues/5546 - let request = match self.query_cache(relay_parent.clone(), request) { + let request = match self.query_cache(relay_parent, request) { Some(request) => request, None => return, }; diff --git a/node/gum/Cargo.toml b/node/gum/Cargo.toml index c9338cd71a81..a42116154bad 100644 --- a/node/gum/Cargo.toml +++ b/node/gum/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tracing-gum" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" description = "Stick logs together with the TraceID as provided by tempo" diff --git a/node/gum/proc-macro/Cargo.toml b/node/gum/proc-macro/Cargo.toml index 7fa597f759fe..2b4402a3828f 100644 --- a/node/gum/proc-macro/Cargo.toml +++ b/node/gum/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tracing-gum-proc-macro" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" description = "Generate an overseer including builder pattern and message wrapper from a single annotated struct definition." diff --git a/node/jaeger/Cargo.toml b/node/jaeger/Cargo.toml index 2042d6cb60d7..8faabfad8579 100644 --- a/node/jaeger/Cargo.toml +++ b/node/jaeger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-jaeger" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" description = "Polkadot Jaeger primitives, but equally useful for Grafana/Tempo" diff --git a/node/malus/Cargo.toml b/node/malus/Cargo.toml index 2f78fa43bdf2..5b477e58b6fd 100644 --- a/node/malus/Cargo.toml +++ b/node/malus/Cargo.toml @@ -2,7 +2,7 @@ name = "polkadot-test-malus" description = "Misbehaving nodes for local testnets, system and Simnet tests." license = "GPL-3.0-only" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" readme = "README.md" diff --git a/node/metrics/Cargo.toml b/node/metrics/Cargo.toml index 26ea24766e53..21cfbaa16d4b 100644 --- a/node/metrics/Cargo.toml +++ b/node/metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-metrics" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" description = "Subsystem metric helpers" diff --git a/node/metrics/src/metronome.rs b/node/metrics/src/metronome.rs index 9184f7ac20ad..ac47e20319d8 100644 --- a/node/metrics/src/metronome.rs +++ b/node/metrics/src/metronome.rs @@ -49,7 +49,7 @@ impl futures::Stream for Metronome { loop { match self.state { MetronomeState::SetAlarm => { - let val = self.period.clone(); + let val = self.period; self.delay.reset(val); self.state = MetronomeState::Snooze; }, diff --git a/node/network/approval-distribution/Cargo.toml b/node/network/approval-distribution/Cargo.toml index b6efa097ac28..e52b03faa525 100644 --- a/node/network/approval-distribution/Cargo.toml +++ b/node/network/approval-distribution/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-approval-distribution" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 5afae66ae818..017538cae5f3 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -309,7 +309,7 @@ enum MessageSource { impl MessageSource { fn peer_id(&self) -> Option { match self { - Self::Peer(id) => Some(id.clone()), + Self::Peer(id) => Some(*id), Self::Local => None, } } @@ -389,7 +389,7 @@ impl State { ) { let mut new_hashes = HashSet::new(); for meta in &metas { - match self.blocks.entry(meta.hash.clone()) { + match self.blocks.entry(meta.hash) { hash_map::Entry::Vacant(entry) => { let candidates_count = meta.candidates.len(); let mut candidates = Vec::with_capacity(candidates_count); @@ -398,7 +398,7 @@ impl State { entry.insert(BlockEntry { known_by: HashMap::new(), number: meta.number, - parent_hash: meta.parent_hash.clone(), + parent_hash: meta.parent_hash, knowledge: Knowledge::default(), candidates, session: meta.session, @@ -406,7 +406,7 @@ impl State { self.topologies.inc_session_refs(meta.session); - new_hashes.insert(meta.hash.clone()); + new_hashes.insert(meta.hash); // In case there are duplicates, we should only set this if the entry // was vacant. @@ -433,7 +433,7 @@ impl State { &mut self.blocks, &self.topologies, self.peer_views.len(), - peer_id.clone(), + *peer_id, view_intersection, rng, ) @@ -563,10 +563,8 @@ impl State { "Pending assignment", ); - pending.push(( - peer_id.clone(), - PendingMessage::Assignment(assignment, claimed_index), - )); + pending + .push((peer_id, PendingMessage::Assignment(assignment, claimed_index))); continue } @@ -574,7 +572,7 @@ impl State { self.import_and_circulate_assignment( ctx, metrics, - MessageSource::Peer(peer_id.clone()), + MessageSource::Peer(peer_id), assignment, claimed_index, rng, @@ -604,7 +602,7 @@ impl State { "Pending approval", ); - pending.push((peer_id.clone(), PendingMessage::Approval(approval_vote))); + pending.push((peer_id, PendingMessage::Approval(approval_vote))); continue } @@ -612,7 +610,7 @@ impl State { self.import_and_circulate_approval( ctx, metrics, - MessageSource::Peer(peer_id.clone()), + MessageSource::Peer(peer_id), approval_vote, ) .await; @@ -663,7 +661,7 @@ impl State { &mut self.blocks, &self.topologies, self.peer_views.len(), - peer_id.clone(), + peer_id, view, rng, ) @@ -709,7 +707,7 @@ impl State { ) where R: CryptoRng + Rng, { - let block_hash = assignment.block_hash.clone(); + let block_hash = assignment.block_hash; let validator_index = assignment.validator; let entry = match self.blocks.get_mut(&block_hash) { @@ -737,7 +735,7 @@ impl State { if let Some(peer_id) = source.peer_id() { // check if our knowledge of the peer already contains this assignment - match entry.known_by.entry(peer_id.clone()) { + match entry.known_by.entry(peer_id) { hash_map::Entry::Occupied(mut peer_knowledge) => { let peer_knowledge = peer_knowledge.get_mut(); if peer_knowledge.contains(&message_subject, message_kind) { @@ -761,13 +759,13 @@ impl State { ?message_subject, "Assignment from a peer is out of view", ); - modify_reputation(ctx.sender(), peer_id.clone(), COST_UNEXPECTED_MESSAGE).await; + modify_reputation(ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await; }, } // if the assignment is known to be valid, reward the peer if entry.knowledge.contains(&message_subject, message_kind) { - modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE).await; + modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE).await; if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { gum::trace!(target: LOG_TARGET, ?peer_id, ?message_subject, "Known assignment"); peer_knowledge.received.insert(message_subject, message_kind); @@ -803,8 +801,7 @@ impl State { ); match result { AssignmentCheckResult::Accepted => { - modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST) - .await; + modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE_FIRST).await; entry.knowledge.known_messages.insert(message_subject.clone(), message_kind); if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { peer_knowledge.received.insert(message_subject.clone(), message_kind); @@ -970,7 +967,7 @@ impl State { source: MessageSource, vote: IndirectSignedApprovalVote, ) { - let block_hash = vote.block_hash.clone(); + let block_hash = vote.block_hash; let validator_index = vote.validator; let candidate_index = vote.candidate_index; @@ -1003,7 +1000,7 @@ impl State { } // check if our knowledge of the peer already contains this approval - match entry.known_by.entry(peer_id.clone()) { + match entry.known_by.entry(peer_id) { hash_map::Entry::Occupied(mut knowledge) => { let peer_knowledge = knowledge.get_mut(); if peer_knowledge.contains(&message_subject, message_kind) { @@ -1027,14 +1024,14 @@ impl State { ?message_subject, "Approval from a peer is out of view", ); - modify_reputation(ctx.sender(), peer_id.clone(), COST_UNEXPECTED_MESSAGE).await; + modify_reputation(ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await; }, } // if the approval is known to be valid, reward the peer if entry.knowledge.contains(&message_subject, message_kind) { gum::trace!(target: LOG_TARGET, ?peer_id, ?message_subject, "Known approval"); - modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE).await; + modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE).await; if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { peer_knowledge.received.insert(message_subject.clone(), message_kind); } @@ -1065,8 +1062,7 @@ impl State { ); match result { ApprovalCheckResult::Accepted => { - modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST) - .await; + modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE_FIRST).await; entry.knowledge.insert(message_subject.clone(), message_kind); if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { @@ -1301,7 +1297,7 @@ impl State { break } - let peer_knowledge = entry.known_by.entry(peer_id.clone()).or_default(); + let peer_knowledge = entry.known_by.entry(peer_id).or_default(); let topology = topologies.get_topology(entry.session); @@ -1335,13 +1331,12 @@ impl State { } } - let message_subject = - MessageSubject(block.clone(), candidate_index, validator.clone()); + let message_subject = MessageSubject(block, candidate_index, *validator); let assignment_message = ( IndirectAssignmentCert { - block_hash: block.clone(), - validator: validator.clone(), + block_hash: block, + validator: *validator, cert: message_state.approval_state.assignment_cert().clone(), }, candidate_index, @@ -1350,8 +1345,8 @@ impl State { let approval_message = message_state.approval_state.approval_signature().map(|signature| { IndirectSignedApprovalVote { - block_hash: block.clone(), - validator: validator.clone(), + block_hash: block, + validator: *validator, candidate_index, signature, } @@ -1374,7 +1369,7 @@ impl State { } } - block = entry.parent_hash.clone(); + block = entry.parent_hash; } } @@ -1388,7 +1383,7 @@ impl State { sender .send_message(NetworkBridgeTxMessage::SendValidationMessage( - vec![peer_id.clone()], + vec![peer_id], Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( protocol_v1::ApprovalDistributionMessage::Assignments(assignments_to_send), )), @@ -1558,13 +1553,12 @@ async fn adjust_required_routing_and_propagate"] edition = "2021" diff --git a/node/network/availability-recovery/Cargo.toml b/node/network/availability-recovery/Cargo.toml index a82f2360a0fc..d82145fe5ed2 100644 --- a/node/network/availability-recovery/Cargo.toml +++ b/node/network/availability-recovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-availability-recovery" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/network/availability-recovery/src/lib.rs b/node/network/availability-recovery/src/lib.rs index 5a2905082379..38acfbe88ff9 100644 --- a/node/network/availability-recovery/src/lib.rs +++ b/node/network/availability-recovery/src/lib.rs @@ -338,8 +338,7 @@ impl RequestChunksFromValidators { index: validator_index, }; - let (req, res) = - OutgoingRequest::new(Recipient::Authority(validator), raw_request.clone()); + let (req, res) = OutgoingRequest::new(Recipient::Authority(validator), raw_request); requests.push(Requests::ChunkFetchingV1(req)); params.metrics.on_chunk_request_issued(); @@ -973,7 +972,7 @@ async fn query_full_data( ctx.send_message(AvailabilityStoreMessage::QueryAvailableData(candidate_hash, tx)) .await; - Ok(rx.await.map_err(error::Error::CanceledQueryFullData)?) + rx.await.map_err(error::Error::CanceledQueryFullData) } #[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)] diff --git a/node/network/bitfield-distribution/Cargo.toml b/node/network/bitfield-distribution/Cargo.toml index 6e819fbe7cf4..2bfd031765ee 100644 --- a/node/network/bitfield-distribution/Cargo.toml +++ b/node/network/bitfield-distribution/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-availability-bitfield-distribution" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 2a1e3b8d9ef3..1bd9230a3787 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -319,7 +319,7 @@ async fn handle_bitfield_distribution( } let validator_index = signed_availability.validator_index(); - let validator = if let Some(validator) = validator_set.get(*&validator_index.0 as usize) { + let validator = if let Some(validator) = validator_set.get(validator_index.0 as usize) { validator.clone() } else { gum::debug!(target: LOG_TARGET, validator_index = ?validator_index.0, "Could not find a validator for index"); @@ -395,7 +395,7 @@ async fn relay_message( }; if need_routing { - Some(peer.clone()) + Some(*peer) } else { None } @@ -412,7 +412,7 @@ async fn relay_message( // track the message as sent for this peer job_data .message_sent_to_peer - .entry(peer.clone()) + .entry(*peer) .or_default() .insert(validator.clone()); }); @@ -497,7 +497,7 @@ async fn process_incoming_peer_message( // Check if the peer already sent us a message for the validator denoted in the message earlier. // Must be done after validator index verification, in order to avoid storing an unbounded // number of set entries. - let received_set = job_data.message_received_from_peer.entry(origin.clone()).or_default(); + let received_set = job_data.message_received_from_peer.entry(origin).or_default(); if !received_set.contains(&validator) { received_set.insert(validator.clone()); @@ -656,7 +656,7 @@ async fn handle_peer_view_change( ) { let added = state .peer_views - .entry(origin.clone()) + .entry(origin) .or_default() .replace_difference(view) .cloned() @@ -681,11 +681,10 @@ async fn handle_peer_view_change( let delta_set: Vec<(ValidatorId, BitfieldGossipMessage)> = added .into_iter() .filter_map(|new_relay_parent_interest| { - if let Some(job_data) = (&*state).per_relay_parent.get(&new_relay_parent_interest) { + if let Some(job_data) = state.per_relay_parent.get(&new_relay_parent_interest) { // Send all jointly known messages for a validator (given the current relay parent) // to the peer `origin`... let one_per_validator = job_data.one_per_validator.clone(); - let origin = origin.clone(); Some(one_per_validator.into_iter().filter(move |(validator, _message)| { // ..except for the ones the peer already has. job_data.message_from_validator_needed_by_peer(&origin, validator) @@ -699,7 +698,7 @@ async fn handle_peer_view_change( .collect(); for (validator, message) in delta_set.into_iter() { - send_tracked_gossip_message(ctx, state, origin.clone(), validator, message).await; + send_tracked_gossip_message(ctx, state, origin, validator, message).await; } } @@ -727,11 +726,7 @@ async fn send_tracked_gossip_message( "Sending gossip message" ); - job_data - .message_sent_to_peer - .entry(dest.clone()) - .or_default() - .insert(validator.clone()); + job_data.message_sent_to_peer.entry(dest).or_default().insert(validator.clone()); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( vec![dest], @@ -760,14 +755,14 @@ async fn query_basics( // query validators ctx.send_message(RuntimeApiMessage::Request( - relay_parent.clone(), + relay_parent, RuntimeApiRequest::Validators(validators_tx), )) .await; // query signing context ctx.send_message(RuntimeApiMessage::Request( - relay_parent.clone(), + relay_parent, RuntimeApiRequest::SessionIndexForChild(session_tx), )) .await; diff --git a/node/network/bridge/Cargo.toml b/node/network/bridge/Cargo.toml index 4ca402536000..ff932203e834 100644 --- a/node/network/bridge/Cargo.toml +++ b/node/network/bridge/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-network-bridge" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index 9b326cbbfb38..32dc79d25814 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -174,7 +174,7 @@ impl Network for Arc> { Ok(v) => v, Err(_) => continue, }; - NetworkService::add_known_address(&*self, peer_id.clone(), addr); + NetworkService::add_known_address(self, peer_id, addr); found_peer_id = Some(peer_id); } found_peer_id @@ -197,7 +197,7 @@ impl Network for Arc> { }; NetworkService::start_request( - &*self, + self, peer_id, req_protocol_names.get_name(protocol), payload, diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs index 8adbcf857811..1d3052d3a218 100644 --- a/node/network/bridge/src/rx/mod.rs +++ b/node/network/bridge/src/rx/mod.rs @@ -213,7 +213,7 @@ where PeerSet::Collation => &mut shared.collation_peers, }; - match peer_map.entry(peer.clone()) { + match peer_map.entry(peer) { hash_map::Entry::Occupied(_) => continue, hash_map::Entry::Vacant(vacant) => { vacant.insert(PeerData { view: View::default(), version }); @@ -234,12 +234,12 @@ where dispatch_validation_events_to_all( vec![ NetworkBridgeEvent::PeerConnected( - peer.clone(), + peer, role, version, maybe_authority, ), - NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()), + NetworkBridgeEvent::PeerViewChange(peer, View::default()), ], &mut sender, ) @@ -259,12 +259,12 @@ where dispatch_collation_events_to_all( vec![ NetworkBridgeEvent::PeerConnected( - peer.clone(), + peer, role, version, maybe_authority, ), - NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()), + NetworkBridgeEvent::PeerViewChange(peer, View::default()), ], &mut sender, ) @@ -421,7 +421,7 @@ where Some(ValidationVersion::V1.into()) { handle_v1_peer_messages::( - remote.clone(), + remote, PeerSet::Validation, &mut shared.0.lock().validation_peers, v_messages, @@ -442,7 +442,7 @@ where }; for report in reports { - network_service.report_peer(remote.clone(), report); + network_service.report_peer(remote, report); } dispatch_validation_events_to_all(events, &mut sender).await; @@ -454,7 +454,7 @@ where Some(CollationVersion::V1.into()) { handle_v1_peer_messages::( - remote.clone(), + remote, PeerSet::Collation, &mut shared.0.lock().collation_peers, c_messages, @@ -475,7 +475,7 @@ where }; for report in reports { - network_service.report_peer(remote.clone(), report); + network_service.report_peer(remote, report); } dispatch_collation_events_to_all(events, &mut sender).await; @@ -795,11 +795,11 @@ fn handle_v1_peer_messages>( } else { peer_data.view = new_view; - NetworkBridgeEvent::PeerViewChange(peer.clone(), peer_data.view.clone()) + NetworkBridgeEvent::PeerViewChange(peer, peer_data.view.clone()) } }, WireMessage::ProtocolMessage(message) => - NetworkBridgeEvent::PeerMessage(peer.clone(), message.into()), + NetworkBridgeEvent::PeerMessage(peer, message.into()), }) } diff --git a/node/network/collator-protocol/Cargo.toml b/node/network/collator-protocol/Cargo.toml index cae4ec2b1730..d66dcb6443c0 100644 --- a/node/network/collator-protocol/Cargo.toml +++ b/node/network/collator-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-collator-protocol" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index 7a603a8a404a..f7b27583a6dd 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -561,7 +561,7 @@ async fn advertise_collation( let wire_message = protocol_v1::CollatorProtocolMessage::AdvertiseCollation(relay_parent); ctx.send_message(NetworkBridgeTxMessage::SendCollationMessage( - vec![peer.clone()], + vec![peer], Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)), )) .await; @@ -707,11 +707,8 @@ async fn handle_incoming_peer_message( "AdvertiseCollation message is not expected on the collator side of the protocol", ); - ctx.send_message(NetworkBridgeTxMessage::ReportPeer( - origin.clone(), - COST_UNEXPECTED_MESSAGE, - )) - .await; + ctx.send_message(NetworkBridgeTxMessage::ReportPeer(origin, COST_UNEXPECTED_MESSAGE)) + .await; // If we are advertised to, this is another collator, and we should disconnect. ctx.send_message(NetworkBridgeTxMessage::DisconnectPeer(origin, PeerSet::Collation)) @@ -838,14 +835,14 @@ async fn handle_peer_view_change( peer_id: PeerId, view: View, ) { - let current = state.peer_views.entry(peer_id.clone()).or_default(); + let current = state.peer_views.entry(peer_id).or_default(); let added: Vec = view.difference(&*current).cloned().collect(); *current = view; for added in added.into_iter() { - advertise_collation(ctx, state, added, peer_id.clone()).await; + advertise_collation(ctx, state, added, peer_id).await; } } diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index b2b3dc4824b5..1442fbcc2bcb 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -287,7 +287,7 @@ impl PeerData { PeerState::Collating(ref mut state) => if state.advertisements.insert(on_relay_parent) { state.last_active = Instant::now(); - Ok((state.collator_id.clone(), state.para_id.clone())) + Ok((state.collator_id.clone(), state.para_id)) } else { Err(AdvertisementError::Duplicate) }, @@ -375,22 +375,19 @@ impl ActiveParas { .await .await .ok() - .map(|x| x.ok()) - .flatten(); + .and_then(|x| x.ok()); let mg = polkadot_node_subsystem_util::request_validator_groups(relay_parent, sender) .await .await .ok() - .map(|x| x.ok()) - .flatten(); + .and_then(|x| x.ok()); let mc = polkadot_node_subsystem_util::request_availability_cores(relay_parent, sender) .await .await .ok() - .map(|x| x.ok()) - .flatten(); + .and_then(|x| x.ok()); let (validators, groups, rotation_info, cores) = match (mv, mg, mc) { (Some(v), Some((g, r)), Some(c)) => (v, g, r, c), @@ -486,12 +483,7 @@ struct PendingCollation { impl PendingCollation { fn new(relay_parent: Hash, para_id: &ParaId, peer_id: &PeerId) -> Self { - Self { - relay_parent, - para_id: para_id.clone(), - peer_id: peer_id.clone(), - commitments_hash: None, - } + Self { relay_parent, para_id: *para_id, peer_id: *peer_id, commitments_hash: None } } } @@ -629,9 +621,9 @@ fn collator_peer_id( peer_data: &HashMap, collator_id: &CollatorId, ) -> Option { - peer_data.iter().find_map(|(peer, data)| { - data.collator_id().filter(|c| c == &collator_id).map(|_| peer.clone()) - }) + peer_data + .iter() + .find_map(|(peer, data)| data.collator_id().filter(|c| c == &collator_id).map(|_| *peer)) } async fn disconnect_peer(sender: &mut impl overseer::CollatorProtocolSenderTrait, peer_id: PeerId) { @@ -655,9 +647,7 @@ async fn fetch_collation( Delay::new(MAX_UNSHARED_DOWNLOAD_TIME).await; (collator_id, relay_parent) }; - state - .collation_fetch_timeouts - .push(timeout(id.clone(), relay_parent.clone()).boxed()); + state.collation_fetch_timeouts.push(timeout(id.clone(), relay_parent).boxed()); if let Some(peer_data) = state.peer_data.get(&peer_id) { if peer_data.has_advertised(&relay_parent) { @@ -729,7 +719,7 @@ async fn notify_collation_seconded( /// - Ongoing collation requests have to be canceled. /// - Advertisements by this peer that are no longer relevant have to be removed. async fn handle_peer_view_change(state: &mut State, peer_id: PeerId, view: View) -> Result<()> { - let peer_data = state.peer_data.entry(peer_id.clone()).or_default(); + let peer_data = state.peer_data.entry(peer_id).or_default(); peer_data.update_view(view); state @@ -883,7 +873,7 @@ async fn process_incoming_peer_message( "Declared as collator for unneeded para", ); - modify_reputation(ctx.sender(), origin.clone(), COST_UNNEEDED_COLLATOR).await; + modify_reputation(ctx.sender(), origin, COST_UNNEEDED_COLLATOR).await; gum::trace!(target: LOG_TARGET, "Disconnecting unneeded collator"); disconnect_peer(ctx.sender(), origin).await; } @@ -1013,7 +1003,7 @@ async fn handle_our_view_change( .span_per_head() .iter() .filter(|v| !old_view.contains(&v.0)) - .map(|v| (v.0.clone(), v.1.clone())) + .map(|v| (*v.0, v.1.clone())) .collect(); added.into_iter().for_each(|(h, s)| { @@ -1046,7 +1036,7 @@ async fn handle_our_view_change( ?para_id, "Disconnecting peer on view change (not current parachain id)" ); - disconnect_peer(ctx.sender(), peer_id.clone()).await; + disconnect_peer(ctx.sender(), *peer_id).await; } } } @@ -1254,7 +1244,7 @@ async fn poll_requests( retained_requested.insert(pending_collation.clone()); } if let CollationFetchResult::Error(Some(rep)) = result { - reputation_changes.push((pending_collation.peer_id.clone(), rep)); + reputation_changes.push((pending_collation.peer_id, rep)); } } requested_collations.retain(|k, _| retained_requested.contains(k)); @@ -1337,11 +1327,7 @@ async fn handle_collation_fetched_result( if let Entry::Vacant(entry) = state.pending_candidates.entry(relay_parent) { collation_event.1.commitments_hash = Some(candidate_receipt.commitments_hash); ctx.sender() - .send_message(CandidateBackingMessage::Second( - relay_parent.clone(), - candidate_receipt, - pov, - )) + .send_message(CandidateBackingMessage::Second(relay_parent, candidate_receipt, pov)) .await; entry.insert(collation_event); @@ -1366,7 +1352,7 @@ async fn disconnect_inactive_peers( for (peer, peer_data) in peers { if peer_data.is_inactive(&eviction_policy) { gum::trace!(target: LOG_TARGET, "Disconnecting inactive peer"); - disconnect_peer(sender, peer.clone()).await; + disconnect_peer(sender, *peer).await; } } } diff --git a/node/network/dispute-distribution/Cargo.toml b/node/network/dispute-distribution/Cargo.toml index d70d41f6916f..38af6d3df7ba 100644 --- a/node/network/dispute-distribution/Cargo.toml +++ b/node/network/dispute-distribution/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-dispute-distribution" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/network/dispute-distribution/src/receiver/mod.rs b/node/network/dispute-distribution/src/receiver/mod.rs index 9030fc0b3f96..b84be7b2dfde 100644 --- a/node/network/dispute-distribution/src/receiver/mod.rs +++ b/node/network/dispute-distribution/src/receiver/mod.rs @@ -430,7 +430,7 @@ where ); return }, - Some(vote) => (vote.0.session_index(), vote.0.candidate_hash().clone()), + Some(vote) => (vote.0.session_index(), *vote.0.candidate_hash()), }; let (pending_confirmation, confirmation_rx) = oneshot::channel(); diff --git a/node/network/dispute-distribution/src/sender/mod.rs b/node/network/dispute-distribution/src/sender/mod.rs index b25561df5652..8cecc96c8dc7 100644 --- a/node/network/dispute-distribution/src/sender/mod.rs +++ b/node/network/dispute-distribution/src/sender/mod.rs @@ -304,7 +304,7 @@ impl DisputeSender { .get(*valid_index) .ok_or(JfyiError::InvalidStatementFromCoordinator)?; let valid_signed = SignedDisputeStatement::new_checked( - DisputeStatement::Valid(kind.clone()), + DisputeStatement::Valid(*kind), candidate_hash, session_index, valid_public.clone(), @@ -319,7 +319,7 @@ impl DisputeSender { .get(*invalid_index) .ok_or(JfyiError::InvalidValidatorIndexFromCoordinator)?; let invalid_signed = SignedDisputeStatement::new_checked( - DisputeStatement::Invalid(kind.clone()), + DisputeStatement::Invalid(*kind), candidate_hash, session_index, invalid_public.clone(), diff --git a/node/network/gossip-support/Cargo.toml b/node/network/gossip-support/Cargo.toml index fa99366c098d..5360efae09de 100644 --- a/node/network/gossip-support/Cargo.toml +++ b/node/network/gossip-support/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-gossip-support" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/network/protocol/Cargo.toml b/node/network/protocol/Cargo.toml index c3fa1faedb67..daa097886801 100644 --- a/node/network/protocol/Cargo.toml +++ b/node/network/protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-network-protocol" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" description = "Primitives types for the Node-side" diff --git a/node/network/protocol/src/grid_topology.rs b/node/network/protocol/src/grid_topology.rs index 100ef66957bd..2ae43c07c355 100644 --- a/node/network/protocol/src/grid_topology.rs +++ b/node/network/protocol/src/grid_topology.rs @@ -94,7 +94,7 @@ impl SessionGridTopology { let n = &self.canonical_shuffling[r_n]; grid_subset.validator_indices_x.insert(n.validator_index); for p in &n.peer_ids { - grid_subset.peers_x.insert(p.clone()); + grid_subset.peers_x.insert(*p); } } @@ -102,7 +102,7 @@ impl SessionGridTopology { let n = &self.canonical_shuffling[c_n]; grid_subset.validator_indices_y.insert(n.validator_index); for p in &n.peer_ids { - grid_subset.peers_y.insert(p.clone()); + grid_subset.peers_y.insert(*p); } } diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 169d916ce6f9..744217133eed 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -207,7 +207,7 @@ impl View { } /// Obtain an iterator over all heads. - pub fn iter<'a>(&'a self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator { self.heads.iter() } diff --git a/node/network/protocol/src/request_response/mod.rs b/node/network/protocol/src/request_response/mod.rs index d24537e219c7..6ce0c883cc6c 100644 --- a/node/network/protocol/src/request_response/mod.rs +++ b/node/network/protocol/src/request_response/mod.rs @@ -126,6 +126,17 @@ const STATEMENT_RESPONSE_SIZE: u64 = MAX_CODE_SIZE as u64 + 10_000; pub const DISPUTE_REQUEST_TIMEOUT: Duration = Duration::from_secs(12); impl Protocol { + /// Get a configuration for a given Request response protocol. + /// + /// Returns a `ProtocolConfig` for this protocol. + /// Use this if you plan only to send requests for this protocol. + pub fn get_outbound_only_config( + self, + req_protocol_names: &ReqProtocolNames, + ) -> RequestResponseConfig { + self.create_config(req_protocol_names, None) + } + /// Get a configuration for a given Request response protocol. /// /// Returns a receiver for messages received on this protocol and the requested @@ -134,10 +145,19 @@ impl Protocol { self, req_protocol_names: &ReqProtocolNames, ) -> (mpsc::Receiver, RequestResponseConfig) { + let (tx, rx) = mpsc::channel(self.get_channel_size()); + let cfg = self.create_config(req_protocol_names, Some(tx)); + (rx, cfg) + } + + fn create_config( + self, + req_protocol_names: &ReqProtocolNames, + tx: Option>, + ) -> RequestResponseConfig { let name = req_protocol_names.get_name(self); let fallback_names = self.get_fallback_names(); - let (tx, rx) = mpsc::channel(self.get_channel_size()); - let cfg = match self { + match self { Protocol::ChunkFetchingV1 => RequestResponseConfig { name, fallback_names, @@ -145,7 +165,7 @@ impl Protocol { max_response_size: POV_RESPONSE_SIZE as u64 * 3, // We are connected to all validators: request_timeout: CHUNK_REQUEST_TIMEOUT, - inbound_queue: Some(tx), + inbound_queue: tx, }, Protocol::CollationFetchingV1 => RequestResponseConfig { name, @@ -154,7 +174,7 @@ impl Protocol { max_response_size: POV_RESPONSE_SIZE, // Taken from initial implementation in collator protocol: request_timeout: POV_REQUEST_TIMEOUT_CONNECTED, - inbound_queue: Some(tx), + inbound_queue: tx, }, Protocol::PoVFetchingV1 => RequestResponseConfig { name, @@ -162,7 +182,7 @@ impl Protocol { max_request_size: 1_000, max_response_size: POV_RESPONSE_SIZE, request_timeout: POV_REQUEST_TIMEOUT_CONNECTED, - inbound_queue: Some(tx), + inbound_queue: tx, }, Protocol::AvailableDataFetchingV1 => RequestResponseConfig { name, @@ -171,7 +191,7 @@ impl Protocol { // Available data size is dominated by the PoV size. max_response_size: POV_RESPONSE_SIZE, request_timeout: POV_REQUEST_TIMEOUT_CONNECTED, - inbound_queue: Some(tx), + inbound_queue: tx, }, Protocol::StatementFetchingV1 => RequestResponseConfig { name, @@ -189,7 +209,7 @@ impl Protocol { // fail, but this is desired, so we can quickly move on to a faster one - we should // also decrease its reputation. request_timeout: Duration::from_secs(1), - inbound_queue: Some(tx), + inbound_queue: tx, }, Protocol::DisputeSendingV1 => RequestResponseConfig { name, @@ -199,10 +219,9 @@ impl Protocol { /// plenty. max_response_size: 100, request_timeout: DISPUTE_REQUEST_TIMEOUT, - inbound_queue: Some(tx), + inbound_queue: tx, }, - }; - (rx, cfg) + } } // Channel sizes for the supported protocols. diff --git a/node/network/statement-distribution/Cargo.toml b/node/network/statement-distribution/Cargo.toml index 1490ed01142a..7805cfeb0fda 100644 --- a/node/network/statement-distribution/Cargo.toml +++ b/node/network/statement-distribution/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-statement-distribution" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] description = "Statement Distribution Subsystem" edition = "2021" diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 055fd4123f9a..271072ab1031 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -278,10 +278,10 @@ impl PeerRelayParentKnowledge { let new_known = match fingerprint.0 { CompactStatement::Seconded(ref h) => { - self.seconded_counts.entry(fingerprint.1).or_default().note_local(h.clone()); + self.seconded_counts.entry(fingerprint.1).or_default().note_local(*h); let was_known = self.is_known_candidate(h); - self.sent_candidates.insert(h.clone()); + self.sent_candidates.insert(*h); !was_known }, CompactStatement::Valid(_) => false, @@ -345,7 +345,7 @@ impl PeerRelayParentKnowledge { .seconded_counts .entry(fingerprint.1) .or_insert_with(Default::default) - .note_remote(h.clone()); + .note_remote(*h); if !allowed_remote { return Err(COST_UNEXPECTED_STATEMENT_REMOTE) @@ -374,7 +374,7 @@ impl PeerRelayParentKnowledge { } self.received_statements.insert(fingerprint.clone()); - self.received_candidates.insert(candidate_hash.clone()); + self.received_candidates.insert(*candidate_hash); Ok(fresh) } @@ -1025,13 +1025,15 @@ async fn circulate_statement<'a, Context>( let mut peers_to_send: Vec = peers .iter() - .filter_map(|(peer, data)| { - if data.can_send(&relay_parent, &fingerprint) { - Some(peer.clone()) - } else { - None - } - }) + .filter_map( + |(peer, data)| { + if data.can_send(&relay_parent, &fingerprint) { + Some(*peer) + } else { + None + } + }, + ) .collect(); let good_peers: HashSet<&PeerId> = peers_to_send.iter().collect(); @@ -1087,7 +1089,7 @@ async fn circulate_statement<'a, Context>( "Sending statement", ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - peers_to_send.iter().map(|(p, _)| p.clone()).collect(), + peers_to_send.iter().map(|(p, _)| *p).collect(), payload, )) .await; @@ -1126,11 +1128,8 @@ async fn send_statements_about( statement = ?statement.statement, "Sending statement", ); - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vec![peer.clone()], - payload, - )) - .await; + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(vec![peer], payload)) + .await; metrics.on_statement_distributed(); } @@ -1161,11 +1160,8 @@ async fn send_statements( statement = ?statement.statement, "Sending statement" ); - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vec![peer.clone()], - payload, - )) - .await; + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(vec![peer], payload)) + .await; metrics.on_statement_distributed(); } @@ -1431,7 +1427,7 @@ async fn handle_incoming_message<'a, Context>( } let fingerprint = message.get_fingerprint(); - let candidate_hash = fingerprint.0.candidate_hash().clone(); + let candidate_hash = *fingerprint.0.candidate_hash(); let handle_incoming_span = active_head .span .child("handle-incoming") @@ -1551,7 +1547,7 @@ async fn handle_incoming_message<'a, Context>( // Send the peer all statements concerning the candidate that we have, // since it appears to have just learned about the candidate. send_statements_about( - peer.clone(), + peer, peer_data, ctx, relay_parent, @@ -1627,7 +1623,7 @@ async fn update_peer_view_and_maybe_send_unlocked( continue } if let Some(active_head) = active_heads.get(&new) { - send_statements(peer.clone(), peer_data, ctx, new, active_head, metrics).await; + send_statements(peer, peer_data, ctx, new, active_head, metrics).await; } } } @@ -1710,7 +1706,7 @@ async fn handle_network_update( topology_storage, peers, active_heads, - &*recent_outdated_heads, + recent_outdated_heads, ctx, message, req_sender, diff --git a/node/overseer/Cargo.toml b/node/overseer/Cargo.toml index 3aaa74e22da1..ddaf14c0af10 100644 --- a/node/overseer/Cargo.toml +++ b/node/overseer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-overseer" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/overseer/src/dummy.rs b/node/overseer/src/dummy.rs index 84ecdd1e8a89..0706244356aa 100644 --- a/node/overseer/src/dummy.rs +++ b/node/overseer/src/dummy.rs @@ -56,10 +56,10 @@ where /// Create an overseer with all subsystem being `Sub`. /// /// Preferred way of initializing a dummy overseer for subsystem tests. -pub fn dummy_overseer_builder<'a, Spawner, SupportsParachains>( +pub fn dummy_overseer_builder( spawner: Spawner, supports_parachains: SupportsParachains, - registry: Option<&'a Registry>, + registry: Option<&Registry>, ) -> Result< InitializedOverseerBuilder< SpawnGlue, @@ -97,11 +97,11 @@ where } /// Create an overseer with all subsystem being `Sub`. -pub fn one_for_all_overseer_builder<'a, Spawner, SupportsParachains, Sub>( +pub fn one_for_all_overseer_builder( spawner: Spawner, supports_parachains: SupportsParachains, subsystem: Sub, - registry: Option<&'a Registry>, + registry: Option<&Registry>, ) -> Result< InitializedOverseerBuilder< SpawnGlue, diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 70dbe92b2432..92baa12be79c 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -686,7 +686,7 @@ where subsystem_meters .iter() .cloned() - .filter_map(|x| x) + .flatten() .map(|(name, ref meters)| (name, meters.read())), ); @@ -861,7 +861,7 @@ where let mut span = jaeger::Span::new(*hash, "leaf-activated"); if let Some(parent_span) = parent_hash.and_then(|h| self.span_per_active_leaf.get(&h)) { - span.add_follows_from(&*parent_span); + span.add_follows_from(parent_span); } let span = Arc::new(span); diff --git a/node/primitives/Cargo.toml b/node/primitives/Cargo.toml index 3f0865b1e033..c7cfde987ce1 100644 --- a/node/primitives/Cargo.toml +++ b/node/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-primitives" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" description = "Primitives types for the Node-side" diff --git a/node/primitives/src/disputes/message.rs b/node/primitives/src/disputes/message.rs index 1a943f8dcee6..c31ff1ecb283 100644 --- a/node/primitives/src/disputes/message.rs +++ b/node/primitives/src/disputes/message.rs @@ -170,13 +170,13 @@ impl DisputeMessage { let valid_vote = ValidDisputeVote { validator_index: valid_index, signature: valid_statement.validator_signature().clone(), - kind: valid_kind.clone(), + kind: *valid_kind, }; let invalid_vote = InvalidDisputeVote { validator_index: invalid_index, signature: invalid_statement.validator_signature().clone(), - kind: invalid_kind.clone(), + kind: *invalid_kind, }; Ok(DisputeMessage(UncheckedDisputeMessage { diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index e75181b900e9..f9403ea6c186 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -383,7 +383,7 @@ impl std::fmt::Debug for CollationGenerationConfig { pub struct AvailableData { /// The Proof-of-Validation of the candidate. pub pov: std::sync::Arc, - /// The persisted validation data needed for secondary checks. + /// The persisted validation data needed for approval checks. pub validation_data: PersistedValidationData, } diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index e6e073546a13..d796f02aebb9 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-service" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" rust-version = "1.60" @@ -13,6 +13,8 @@ beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = " beefy-gadget = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } grandpa = { package = "sc-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "master" } +mmr-gadget = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-mmr-primitives = { git = "https://github.com/paritytech/substrate", branch = "master"} sc-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 3e535f3bd1f5..e80f085ef2bc 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -95,6 +95,7 @@ pub use polkadot_client::PolkadotExecutorDispatch; pub use chain_spec::{KusamaChainSpec, PolkadotChainSpec, RococoChainSpec, WestendChainSpec}; pub use consensus_common::{block_validation::Chain, Proposal, SelectChain}; +use mmr_gadget::MmrGadget; #[cfg(feature = "full-node")] pub use polkadot_client::{ AbstractClient, Client, ClientHandle, ExecuteWithClient, FullBackend, FullClient, @@ -758,6 +759,7 @@ where { use polkadot_node_network_protocol::request_response::IncomingRequest; + let is_offchain_indexing_enabled = config.offchain_worker.indexing_enabled; let role = config.role.clone(); let force_authoring = config.force_authoring; let backoff_authoring_blocks = { @@ -1219,6 +1221,18 @@ where } else { task_manager.spawn_handle().spawn_blocking("beefy-gadget", None, gadget); } + + if is_offchain_indexing_enabled { + task_manager.spawn_handle().spawn_blocking( + "mmr-gadget", + None, + MmrGadget::start( + client.clone(), + backend.clone(), + sp_mmr_primitives::INDEXING_PREFIX.to_vec(), + ), + ); + } } let config = grandpa::Config { diff --git a/node/service/src/overseer.rs b/node/service/src/overseer.rs index a8ce3e5eaaf0..7dff86693827 100644 --- a/node/service/src/overseer.rs +++ b/node/service/src/overseer.rs @@ -129,7 +129,7 @@ where /// Obtain a prepared `OverseerBuilder`, that is initialized /// with all default values. -pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>( +pub fn prepared_overseer_builder( OverseerGenArgs { leaves, keystore, @@ -155,7 +155,7 @@ pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>( overseer_message_channel_capacity_override, req_protocol_names, peerset_protocol_names, - }: OverseerGenArgs<'a, Spawner, RuntimeClient>, + }: OverseerGenArgs, ) -> Result< InitializedOverseerBuilder< SpawnGlue, @@ -257,7 +257,7 @@ where .collator_protocol({ let side = match is_collator { IsCollator::Yes(collator_pair) => ProtocolSide::Collator( - network_service.local_peer_id().clone(), + network_service.local_peer_id(), collator_pair, collation_req_receiver, Metrics::register(registry)?, @@ -334,10 +334,10 @@ where /// would do. pub trait OverseerGen { /// Overwrite the full generation of the overseer, including the subsystems. - fn generate<'a, Spawner, RuntimeClient>( + fn generate( &self, connector: OverseerConnector, - args: OverseerGenArgs<'a, Spawner, RuntimeClient>, + args: OverseerGenArgs, ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, @@ -358,10 +358,10 @@ use polkadot_overseer::KNOWN_LEAVES_CACHE_SIZE; pub struct RealOverseerGen; impl OverseerGen for RealOverseerGen { - fn generate<'a, Spawner, RuntimeClient>( + fn generate( &self, connector: OverseerConnector, - args: OverseerGenArgs<'a, Spawner, RuntimeClient>, + args: OverseerGenArgs, ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, diff --git a/node/service/src/relay_chain_selection.rs b/node/service/src/relay_chain_selection.rs index df3e68cc7b1a..890e4c16ec8f 100644 --- a/node/service/src/relay_chain_selection.rs +++ b/node/service/src/relay_chain_selection.rs @@ -343,12 +343,11 @@ where // The Chain Selection subsystem is supposed to treat the finalized // block as the best leaf in the case that there are no viable // leaves, so this should not happen in practice. - let best_leaf = self + let best_leaf = *self .leaves() .await? .first() - .ok_or_else(|| ConsensusError::Other(Box::new(Error::EmptyLeaves)))? - .clone(); + .ok_or_else(|| ConsensusError::Other(Box::new(Error::EmptyLeaves)))?; gum::trace!(target: LOG_TARGET, ?best_leaf, "Best chain"); diff --git a/node/subsystem-test-helpers/Cargo.toml b/node/subsystem-test-helpers/Cargo.toml index de4b0ae0a388..11c7e72b1015 100644 --- a/node/subsystem-test-helpers/Cargo.toml +++ b/node/subsystem-test-helpers/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-subsystem-test-helpers" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" description = "Subsystem traits and message definitions" diff --git a/node/subsystem-types/Cargo.toml b/node/subsystem-types/Cargo.toml index acd66a067241..be88b217804c 100644 --- a/node/subsystem-types/Cargo.toml +++ b/node/subsystem-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-subsystem-types" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" description = "Subsystem traits and message definitions" diff --git a/node/subsystem-types/src/errors.rs b/node/subsystem-types/src/errors.rs index 27c4fcdf8d37..48829e7fc779 100644 --- a/node/subsystem-types/src/errors.rs +++ b/node/subsystem-types/src/errors.rs @@ -79,7 +79,12 @@ pub enum RecoveryError { impl std::fmt::Display for RecoveryError { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { - write!(f, "{}", self) + let msg = match self { + RecoveryError::Invalid => "Invalid", + RecoveryError::Unavailable => "Unavailable", + }; + + write!(f, "{}", msg) } } diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index cb7caebcaa23..94562ae6baef 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -541,9 +541,7 @@ pub enum AvailabilityStoreMessage { impl AvailabilityStoreMessage { /// In fact, none of the `AvailabilityStore` messages assume a particular relay parent. pub fn relay_parent(&self) -> Option { - match self { - _ => None, - } + None } } diff --git a/node/subsystem-types/src/messages/network_bridge_event.rs b/node/subsystem-types/src/messages/network_bridge_event.rs index 5abad8a3c22c..06654153357a 100644 --- a/node/subsystem-types/src/messages/network_bridge_event.rs +++ b/node/subsystem-types/src/messages/network_bridge_event.rs @@ -86,24 +86,19 @@ impl NetworkBridgeEvent { { Ok(match *self { NetworkBridgeEvent::PeerMessage(ref peer, ref msg) => - NetworkBridgeEvent::PeerMessage(peer.clone(), T::try_from(msg)?), + NetworkBridgeEvent::PeerMessage(*peer, T::try_from(msg)?), NetworkBridgeEvent::PeerConnected( ref peer, ref role, ref version, ref authority_id, - ) => NetworkBridgeEvent::PeerConnected( - peer.clone(), - role.clone(), - *version, - authority_id.clone(), - ), + ) => NetworkBridgeEvent::PeerConnected(*peer, *role, *version, authority_id.clone()), NetworkBridgeEvent::PeerDisconnected(ref peer) => - NetworkBridgeEvent::PeerDisconnected(peer.clone()), + NetworkBridgeEvent::PeerDisconnected(*peer), NetworkBridgeEvent::NewGossipTopology(ref topology) => NetworkBridgeEvent::NewGossipTopology(topology.clone()), NetworkBridgeEvent::PeerViewChange(ref peer, ref view) => - NetworkBridgeEvent::PeerViewChange(peer.clone(), view.clone()), + NetworkBridgeEvent::PeerViewChange(*peer, view.clone()), NetworkBridgeEvent::OurViewChange(ref view) => NetworkBridgeEvent::OurViewChange(view.clone()), }) diff --git a/node/subsystem-util/Cargo.toml b/node/subsystem-util/Cargo.toml index d390fd2b42cc..15a06aaddd4c 100644 --- a/node/subsystem-util/Cargo.toml +++ b/node/subsystem-util/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-subsystem-util" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" description = "Subsystem traits and message definitions" diff --git a/node/subsystem-util/src/rolling_session_window.rs b/node/subsystem-util/src/rolling_session_window.rs index beac31292b7d..4ebfad405b5b 100644 --- a/node/subsystem-util/src/rolling_session_window.rs +++ b/node/subsystem-util/src/rolling_session_window.rs @@ -294,6 +294,11 @@ impl RollingSessionWindow { self.earliest_session + (self.session_info.len() as SessionIndex).saturating_sub(1) } + /// Returns `true` if `session_index` is contained in the window. + pub fn contains(&self, session_index: SessionIndex) -> bool { + session_index >= self.earliest_session() && session_index <= self.latest_session() + } + async fn earliest_non_finalized_block_session( sender: &mut Sender, ) -> Result @@ -783,6 +788,21 @@ mod tests { cache_session_info_test(1, 2, Some(window), 2, None); } + #[test] + fn cache_session_window_contains() { + let window = RollingSessionWindow { + earliest_session: 10, + session_info: vec![dummy_session_info(1)], + window_size: SESSION_WINDOW_SIZE, + db_params: Some(dummy_db_params()), + }; + + assert!(!window.contains(0)); + assert!(!window.contains(10 + SESSION_WINDOW_SIZE.get())); + assert!(!window.contains(11)); + assert!(!window.contains(10 + SESSION_WINDOW_SIZE.get() - 1)); + } + #[test] fn cache_session_info_first_late() { cache_session_info_test( diff --git a/node/subsystem/Cargo.toml b/node/subsystem/Cargo.toml index 1d783f748b45..67f6bd9559f7 100644 --- a/node/subsystem/Cargo.toml +++ b/node/subsystem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-node-subsystem" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" description = "Subsystem traits and message definitions and the generated overseer" diff --git a/node/test/client/Cargo.toml b/node/test/client/Cargo.toml index 712f2a902c2d..2e94093c072d 100644 --- a/node/test/client/Cargo.toml +++ b/node/test/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-test-client" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/test/performance-test/Cargo.toml b/node/test/performance-test/Cargo.toml index 55d74ad62f1d..cd07d1558e08 100644 --- a/node/test/performance-test/Cargo.toml +++ b/node/test/performance-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-performance-test" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/test/service/Cargo.toml b/node/test/service/Cargo.toml index f767bb4ae975..3add5d2c14a3 100644 --- a/node/test/service/Cargo.toml +++ b/node/test/service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-test-service" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/node/zombienet-backchannel/Cargo.toml b/node/zombienet-backchannel/Cargo.toml index 74cd56ec521a..b34e94408f26 100644 --- a/node/zombienet-backchannel/Cargo.toml +++ b/node/zombienet-backchannel/Cargo.toml @@ -2,7 +2,7 @@ name = "zombienet-backchannel" description = "Zombienet backchannel to notify test runner and coordinate with malus actors." license = "GPL-3.0-only" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" readme = "README.md" diff --git a/parachain/Cargo.toml b/parachain/Cargo.toml index 665a7b0986cc..7b2954d45138 100644 --- a/parachain/Cargo.toml +++ b/parachain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-parachain" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] description = "Types and utilities for creating and working with parachains" edition = "2021" diff --git a/parachain/test-parachains/Cargo.toml b/parachain/test-parachains/Cargo.toml index 8ed8ae6211c4..7ad254f8eb46 100644 --- a/parachain/test-parachains/Cargo.toml +++ b/parachain/test-parachains/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-parachains" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] description = "Integration tests using the test-parachains" edition = "2021" diff --git a/parachain/test-parachains/adder/Cargo.toml b/parachain/test-parachains/adder/Cargo.toml index 42ffaf728ac5..6cc1bb3230f0 100644 --- a/parachain/test-parachains/adder/Cargo.toml +++ b/parachain/test-parachains/adder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-parachain-adder" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] description = "Test parachain which adds to a number as its state transition" edition = "2021" diff --git a/parachain/test-parachains/adder/collator/Cargo.toml b/parachain/test-parachains/adder/collator/Cargo.toml index 132fe2c0a527..3359e7fa98f4 100644 --- a/parachain/test-parachains/adder/collator/Cargo.toml +++ b/parachain/test-parachains/adder/collator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-parachain-adder-collator" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] description = "Collator for the adder test parachain" edition = "2021" diff --git a/parachain/test-parachains/halt/Cargo.toml b/parachain/test-parachains/halt/Cargo.toml index 3a9096c4d56b..771207749f6c 100644 --- a/parachain/test-parachains/halt/Cargo.toml +++ b/parachain/test-parachains/halt/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-parachain-halt" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] description = "Test parachain which executes forever" edition = "2021" diff --git a/parachain/test-parachains/undying/Cargo.toml b/parachain/test-parachains/undying/Cargo.toml index ab27c07781c5..aae021ddea83 100644 --- a/parachain/test-parachains/undying/Cargo.toml +++ b/parachain/test-parachains/undying/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-parachain-undying" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] description = "Test parachain for zombienet integration tests" edition = "2021" diff --git a/parachain/test-parachains/undying/collator/Cargo.toml b/parachain/test-parachains/undying/collator/Cargo.toml index f8198f8e4006..273d96524a27 100644 --- a/parachain/test-parachains/undying/collator/Cargo.toml +++ b/parachain/test-parachains/undying/collator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-parachain-undying-collator" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] description = "Collator for the undying test parachain" edition = "2021" diff --git a/primitives/Cargo.toml b/primitives/Cargo.toml index b99ac7bc19e6..7233fa5bd4a3 100644 --- a/primitives/Cargo.toml +++ b/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-primitives" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/primitives/src/v2/mod.rs b/primitives/src/v2/mod.rs index ea1f4fcca0b9..646c657e8adf 100644 --- a/primitives/src/v2/mod.rs +++ b/primitives/src/v2/mod.rs @@ -766,7 +766,7 @@ pub fn check_candidate_backing + Clone + Encode>( .zip(backed.validity_votes.iter()) { let validator_id = validator_lookup(val_in_group_idx).ok_or(())?; - let payload = attestation.signed_payload(hash.clone(), signing_context); + let payload = attestation.signed_payload(hash, signing_context); let sig = attestation.signature(); if sig.verify(&payload[..], &validator_id) { diff --git a/primitives/test-helpers/Cargo.toml b/primitives/test-helpers/Cargo.toml index 3ea45e12df7f..401b5efaf5a1 100644 --- a/primitives/test-helpers/Cargo.toml +++ b/primitives/test-helpers/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-primitives-test-helpers" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/roadmap/implementers-guide/src/glossary.md b/roadmap/implementers-guide/src/glossary.md index ed6a358be6da..d379c2813b59 100644 --- a/roadmap/implementers-guide/src/glossary.md +++ b/roadmap/implementers-guide/src/glossary.md @@ -2,6 +2,7 @@ Here you can find definitions of a bunch of jargon, usually specific to the Polkadot project. +- **Approval Checker:** A validator who randomly self-selects so to perform validity checks on a parablock which is pending approval. - **BABE:** (Blind Assignment for Blockchain Extension). The algorithm validators use to safely extend the Relay Chain. See [the Polkadot wiki][0] for more information. - **Backable Candidate:** A Parachain Candidate which is backed by a majority of validators assigned to a given parachain. - **Backed Candidate:** A Backable Candidate noted in a relay-chain block @@ -31,11 +32,9 @@ exactly one downward message queue. - **PVF Prechecking:** This is the process of initially checking the PVF when it is first added. We attempt preparation of the PVF and make sure it succeeds within a given timeout. - **PVF Preparation:** This is the process of preparing the WASM blob and includes both prevalidation and compilation. As prevalidation is pretty minimal right now, preparation mostly consists of compilation. - **Relay Parent:** A block in the relay chain, referred to in a context where work is being done in the context of the state at this block. -- **Router:** The router module is a meta module that consists of three runtime modules responsible for routing messages between paras and the relay chain. The three separate runtime modules are: Dmp, Ump, Hrmp, each responsible for the respective part of message routing. - **Runtime:** The relay-chain state machine. - **Runtime Module:** See Module. - **Runtime API:** A means for the node-side behavior to access structured information based on the state of a fork of the blockchain. -- **Secondary Checker:** A validator who has been randomly selected to perform secondary approval checks on a parablock which is pending approval. - **Subsystem:** A long-running task which is responsible for carrying out a particular category of work. - **UMP:** (Upward Message Passing) A vertical message passing mechanism from a parachain to the relay chain. - **Validator:** Specially-selected node in the network who is responsible for validating parachain blocks and issuing attestations about their validity. diff --git a/roadmap/implementers-guide/src/node/availability/README.md b/roadmap/implementers-guide/src/node/availability/README.md index 46ee4b204982..76bd6467e178 100644 --- a/roadmap/implementers-guide/src/node/availability/README.md +++ b/roadmap/implementers-guide/src/node/availability/README.md @@ -1,3 +1,3 @@ # Availability Subsystems -The availability subsystems are responsible for ensuring that Proofs of Validity of backed candidates are widely available within the validator set, without requiring every node to retain a full copy. They accomplish this by broadly distributing erasure-coded chunks of the PoV, keeping track of which validator has which chunk by means of signed bitfields. They are also responsible for reassembling a complete PoV when required, e.g. when a fisherman reports a potentially invalid block. +The availability subsystems are responsible for ensuring that Proofs of Validity of backed candidates are widely available within the validator set, without requiring every node to retain a full copy. They accomplish this by broadly distributing erasure-coded chunks of the PoV, keeping track of which validator has which chunk by means of signed bitfields. They are also responsible for reassembling a complete PoV when required, e.g. when an approval checker needs to validate a parachain block. diff --git a/roadmap/implementers-guide/src/node/utility/candidate-validation.md b/roadmap/implementers-guide/src/node/utility/candidate-validation.md index 4e67be069155..6e7a5f3d0c8f 100644 --- a/roadmap/implementers-guide/src/node/utility/candidate-validation.md +++ b/roadmap/implementers-guide/src/node/utility/candidate-validation.md @@ -77,10 +77,18 @@ time they can take. As the time for a job can vary depending on the machine and load on the machine, this can potentially lead to disputes where some validators successfuly execute a PVF and others don't. -One mitigation we have in place is a more lenient timeout for preparation during -execution than during pre-checking. The rationale is that the PVF has already -passed pre-checking, so we know it should be valid, and we allow it to take -longer than expected, as this is likely due to an issue with the machine and not -the PVF. +One dispute mitigation we have in place is a more lenient timeout for +preparation during execution than during pre-checking. The rationale is that the +PVF has already passed pre-checking, so we know it should be valid, and we allow +it to take longer than expected, as this is likely due to an issue with the +machine and not the PVF. + +#### CPU clock timeouts + +Another timeout-related mitigation we employ is to measure the time taken by +jobs using CPU time, rather than wall clock time. This is because the CPU time +of a process is less variable under different system conditions. When the +overall system is under heavy load, the wall clock time of a job is affected +more than the CPU time. [CVM]: ../../types/overseer-protocol.md#validationrequesttype diff --git a/roadmap/implementers-guide/src/protocol-overview.md b/roadmap/implementers-guide/src/protocol-overview.md index 77b3a7448c44..fa5a866e6121 100644 --- a/roadmap/implementers-guide/src/protocol-overview.md +++ b/roadmap/implementers-guide/src/protocol-overview.md @@ -8,7 +8,6 @@ First, it's important to go over the main actors we have involved in this protoc 1. Validators. These nodes are responsible for validating proposed parachain blocks. They do so by checking a Proof-of-Validity (PoV) of the block and ensuring that the PoV remains available. They put financial capital down as "skin in the game" which can be slashed (destroyed) if they are proven to have misvalidated. 1. Collators. These nodes are responsible for creating the Proofs-of-Validity that validators know how to check. Creating a PoV typically requires familiarity with the transaction format and block authoring rules of the parachain, as well as having access to the full state of the parachain. -1. Fishermen. These are user-operated, permissionless nodes whose goal is to catch misbehaving validators in exchange for a bounty. Collators and validators can behave as Fishermen too. Fishermen aren't necessary for security, and aren't covered in-depth by this document. This implies a simple pipeline where collators send validators parachain blocks and their requisite PoV to check. Then, validators validate the block using the PoV, signing statements which describe either the positive or negative outcome, and with enough positive statements, the block can be noted on the relay-chain. Negative statements are not a veto but will lead to a dispute, with those on the wrong side being slashed. If another validator later detects that a validator or group of validators incorrectly signed a statement claiming a block was valid, then those validators will be _slashed_, with the checker receiving a bounty. diff --git a/roadmap/implementers-guide/src/runtime/README.md b/roadmap/implementers-guide/src/runtime/README.md index 178346e184f5..f1f9d6c950e2 100644 --- a/roadmap/implementers-guide/src/runtime/README.md +++ b/roadmap/implementers-guide/src/runtime/README.md @@ -14,16 +14,18 @@ There is some functionality of the relay chain relating to parachains that we al We will split the logic of the runtime up into these modules: -* Initializer: manage initialization order of the other modules. +* Initializer: manages initialization order of the other modules. * Shared: manages shared storage and configurations for other modules. -* Configuration: manage configuration and configuration updates in a non-racy manner. -* Paras: manage chain-head and validation code for parachains and parathreads. +* Configuration: manages configuration and configuration updates in a non-racy manner. +* Paras: manages chain-head and validation code for parachains and parathreads. * Scheduler: manages parachain and parathread scheduling as well as validator assignments. * Inclusion: handles the inclusion and availability of scheduled parachains and parathreads. -* Validity: handles secondary checks and dispute resolution for included, available parablocks. +* SessionInfo: manages various session keys of validators and other params stored per session. +* Disputes: handles dispute resolution for included, available parablocks. +* Slashing: handles slashing logic for concluded disputes. * HRMP: handles horizontal messages between paras. -* UMP: Handles upward messages from a para to the relay chain. -* DMP: Handles downward messages from the relay chain to the para. +* UMP: handles upward messages from a para to the relay chain. +* DMP: handles downward messages from the relay chain to the para. The [Initializer module](initializer.md) is special - it's responsible for handling the initialization logic of the other modules to ensure that the correct initialization order and related invariants are maintained. The other modules won't specify a on-initialize logic, but will instead expose a special semi-private routine that the initialization module will call. The other modules are relatively straightforward and perform the roles described above. diff --git a/roadmap/implementers-guide/src/types/approval.md b/roadmap/implementers-guide/src/types/approval.md index e85a625b0710..b58e0a8187e1 100644 --- a/roadmap/implementers-guide/src/types/approval.md +++ b/roadmap/implementers-guide/src/types/approval.md @@ -6,7 +6,7 @@ The public key of a keypair used by a validator for determining assignments to a ## `AssignmentCert` -An `AssignmentCert`, short for Assignment Certificate, is a piece of data provided by a validator to prove that they have been selected to perform secondary approval checks on an included candidate. +An `AssignmentCert`, short for Assignment Certificate, is a piece of data provided by a validator to prove that they have been selected to perform approval checks on an included candidate. These certificates can be checked in the context of a specific block, candidate, and validator assignment VRF key. The block state will also provide further context about the availability core states at that block. diff --git a/roadmap/implementers-guide/src/types/candidate.md b/roadmap/implementers-guide/src/types/candidate.md index baad5b07e6cd..729c72180ee5 100644 --- a/roadmap/implementers-guide/src/types/candidate.md +++ b/roadmap/implementers-guide/src/types/candidate.md @@ -76,7 +76,7 @@ struct CandidateDescriptor { collator: CollatorId, /// The blake2-256 hash of the persisted validation data. These are extra parameters /// derived from relay-chain state that influence the validity of the block which - /// must also be kept available for secondary checkers. + /// must also be kept available for approval checkers. persisted_validation_data_hash: Hash, /// The blake2-256 hash of the `pov-block`. pov_hash: Hash, @@ -116,7 +116,7 @@ Since this data is used to form inputs to the validation function, it needs to b Furthermore, the validation data acts as a way to authorize the additional data the collator needs to pass to the validation function. For example, the validation function can check whether the incoming messages (e.g. downward messages) were actually sent by using the data provided in the validation data using so called MQC heads. -Since the commitments of the validation function are checked by the relay-chain, secondary checkers can rely on the invariant that the relay-chain only includes para-blocks for which these checks have already been done. As such, there is no need for the validation data used to inform validators and collators about the checks the relay-chain will perform to be persisted by the availability system. +Since the commitments of the validation function are checked by the relay-chain, approval checkers can rely on the invariant that the relay-chain only includes para-blocks for which these checks have already been done. As such, there is no need for the validation data used to inform validators and collators about the checks the relay-chain will perform to be persisted by the availability system. The `PersistedValidationData` should be relatively lightweight primarily because it is constructed during inclusion for each candidate and therefore lies on the critical path of inclusion. diff --git a/roadmap/implementers-guide/src/types/overseer-protocol.md b/roadmap/implementers-guide/src/types/overseer-protocol.md index ad66d0132788..41d624670363 100644 --- a/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -825,7 +825,7 @@ pub enum CandidateValidationMessage { /// /// This request doesn't involve acceptance criteria checking, therefore only useful for the /// cases where the validity of the candidate is established. This is the case for the typical - /// use-case: secondary checkers would use this request relying on the full prior checks + /// use-case: approval checkers would use this request relying on the full prior checks /// performed by the relay-chain. ValidateFromExhaustive( PersistedValidationData, diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 401eec5a30d9..cb5d8e59b24e 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-rpc" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" @@ -24,7 +24,7 @@ sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/substrate", bra sc-sync-state-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } txpool-api = { package = "sc-transaction-pool-api", git = "https://github.com/paritytech/substrate", branch = "master" } frame-rpc-system = { package = "substrate-frame-rpc-system", git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-mmr-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } +mmr-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } beefy-gadget = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 2b3497832caa..43efefcae15b 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -108,11 +108,7 @@ where + Sync + 'static, C::Api: frame_rpc_system::AccountNonceApi, - C::Api: pallet_mmr_rpc::MmrRuntimeApi< - Block, - ::Hash, - BlockNumber, - >, + C::Api: mmr_rpc::MmrRuntimeApi::Hash, BlockNumber>, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BabeApi, C::Api: BlockBuilder, @@ -123,7 +119,7 @@ where { use beefy_gadget_rpc::{Beefy, BeefyApiServer}; use frame_rpc_system::{System, SystemApiServer}; - use pallet_mmr_rpc::{Mmr, MmrApiServer}; + use mmr_rpc::{Mmr, MmrApiServer}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; use sc_consensus_babe_rpc::{Babe, BabeApiServer}; use sc_finality_grandpa_rpc::{Grandpa, GrandpaApiServer}; diff --git a/runtime/common/Cargo.toml b/runtime/common/Cargo.toml index 96f29eae9257..d7664ba2d078 100644 --- a/runtime/common/Cargo.toml +++ b/runtime/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-runtime-common" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/runtime/common/slot_range_helper/Cargo.toml b/runtime/common/slot_range_helper/Cargo.toml index 0cd436955827..1957f35551c5 100644 --- a/runtime/common/slot_range_helper/Cargo.toml +++ b/runtime/common/slot_range_helper/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "slot-range-helper" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/runtime/common/src/claims.rs b/runtime/common/src/claims.rs index bb0663ec34f7..1bb5b0cdc8d3 100644 --- a/runtime/common/src/claims.rs +++ b/runtime/common/src/claims.rs @@ -247,12 +247,9 @@ pub mod pallet { impl GenesisBuild for GenesisConfig { fn build(&self) { // build `Claims` - self.claims - .iter() - .map(|(a, b, _, _)| (a.clone(), b.clone())) - .for_each(|(a, b)| { - Claims::::insert(a, b); - }); + self.claims.iter().map(|(a, b, _, _)| (*a, *b)).for_each(|(a, b)| { + Claims::::insert(a, b); + }); // build `Total` Total::::put( self.claims @@ -266,17 +263,16 @@ pub mod pallet { // build `Signing` self.claims .iter() - .filter_map(|(a, _, _, s)| Some((a.clone(), s.clone()?))) + .filter_map(|(a, _, _, s)| Some((*a, (*s)?))) .for_each(|(a, s)| { Signing::::insert(a, s); }); // build `Preclaims` - self.claims - .iter() - .filter_map(|(a, _, i, _)| Some((i.clone()?, a.clone()))) - .for_each(|(i, a)| { + self.claims.iter().filter_map(|(a, _, i, _)| Some((i.clone()?, *a))).for_each( + |(i, a)| { Preclaims::::insert(i, a); - }); + }, + ); } } @@ -538,7 +534,7 @@ impl Pallet { } let mut v = b"\x19Ethereum Signed Message:\n".to_vec(); v.extend(rev.into_iter().rev()); - v.extend_from_slice(&prefix[..]); + v.extend_from_slice(prefix); v.extend_from_slice(what); v.extend_from_slice(extra); v @@ -645,7 +641,7 @@ where info: &DispatchInfoOf, len: usize, ) -> Result { - Ok(self.validate(who, call, info, len).map(|_| ())?) + self.validate(who, call, info, len).map(|_| ()) } // diff --git a/runtime/common/src/crowdloan/migration.rs b/runtime/common/src/crowdloan/migration.rs index 775d70f92458..1ba1f20e8060 100644 --- a/runtime/common/src/crowdloan/migration.rs +++ b/runtime/common/src/crowdloan/migration.rs @@ -67,12 +67,10 @@ pub mod crowdloan_index_migration { let leases = Leases::::get(para_id).unwrap_or_default(); let mut found_lease_deposit = false; - for maybe_deposit in leases.iter() { - if let Some((who, _amount)) = maybe_deposit { - if *who == old_fund_account { - found_lease_deposit = true; - break - } + for (who, _amount) in leases.iter().flatten() { + if *who == old_fund_account { + found_lease_deposit = true; + break } } if found_lease_deposit { @@ -112,11 +110,9 @@ pub mod crowdloan_index_migration { weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 2)); let mut leases = Leases::::get(para_id).unwrap_or_default(); - for maybe_deposit in leases.iter_mut() { - if let Some((who, _amount)) = maybe_deposit { - if *who == old_fund_account { - *who = new_fund_account.clone(); - } + for (who, _amount) in leases.iter_mut().flatten() { + if *who == old_fund_account { + *who = new_fund_account.clone(); } } @@ -162,13 +158,11 @@ pub mod crowdloan_index_migration { let leases = Leases::::get(para_id).unwrap_or_default(); let mut new_account_found = false; - for maybe_deposit in leases.iter() { - if let Some((who, _amount)) = maybe_deposit { - if *who == old_fund_account { - panic!("Old fund account found after migration!"); - } else if *who == new_fund_account { - new_account_found = true; - } + for (who, _amount) in leases.iter().flatten() { + if *who == old_fund_account { + panic!("Old fund account found after migration!"); + } else if *who == new_fund_account { + new_account_found = true; } } if new_account_found { diff --git a/runtime/common/src/slots/migration.rs b/runtime/common/src/slots/migration.rs index 33d221b209d5..a87f1cd7a074 100644 --- a/runtime/common/src/slots/migration.rs +++ b/runtime/common/src/slots/migration.rs @@ -31,18 +31,16 @@ pub mod slots_crowdloan_index_migration { for (para_id, leases) in Leases::::iter() { let old_fund_account = old_fund_account_id::(para_id); - for maybe_deposit in leases.iter() { - if let Some((who, _amount)) = maybe_deposit { - if *who == old_fund_account { - let crowdloan = - crowdloan::Funds::::get(para_id).ok_or("no crowdloan found")?; - log::info!( - target: "runtime", - "para_id={:?}, old_fund_account={:?}, fund_id={:?}, leases={:?}", - para_id, old_fund_account, crowdloan.fund_index, leases, - ); - break - } + for (who, _amount) in leases.iter().flatten() { + if *who == old_fund_account { + let crowdloan = + crowdloan::Funds::::get(para_id).ok_or("no crowdloan found")?; + log::info!( + target: "runtime", + "para_id={:?}, old_fund_account={:?}, fund_id={:?}, leases={:?}", + para_id, old_fund_account, crowdloan.fund_index, leases, + ); + break } } } @@ -61,11 +59,9 @@ pub mod slots_crowdloan_index_migration { let new_fund_account = crowdloan::Pallet::::fund_account_id(fund.fund_index); // look for places the old account is used, and replace with the new account. - for maybe_deposit in leases.iter_mut() { - if let Some((who, _amount)) = maybe_deposit { - if *who == old_fund_account { - *who = new_fund_account.clone(); - } + for (who, _amount) in leases.iter_mut().flatten() { + if *who == old_fund_account { + *who = new_fund_account.clone(); } } @@ -83,11 +79,9 @@ pub mod slots_crowdloan_index_migration { let old_fund_account = old_fund_account_id::(para_id); log::info!(target: "runtime", "checking para_id: {:?}", para_id); // check the old fund account doesn't exist anywhere. - for maybe_deposit in leases.iter() { - if let Some((who, _amount)) = maybe_deposit { - if *who == old_fund_account { - panic!("old fund account found after migration!"); - } + for (who, _amount) in leases.iter().flatten() { + if *who == old_fund_account { + panic!("old fund account found after migration!"); } } } diff --git a/runtime/kusama/Cargo.toml b/runtime/kusama/Cargo.toml index ec3de40ec5e6..969f85e7e721 100644 --- a/runtime/kusama/Cargo.toml +++ b/runtime/kusama/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kusama-runtime" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" build = "build.rs" diff --git a/runtime/kusama/constants/Cargo.toml b/runtime/kusama/constants/Cargo.toml index 6f85387f743d..caaf4e3af577 100644 --- a/runtime/kusama/constants/Cargo.toml +++ b/runtime/kusama/constants/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kusama-runtime-constants" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index 7ef451e52ea9..4accce8a860a 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -124,13 +124,13 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("kusama"), impl_name: create_runtime_str!("parity-kusama"), authoring_version: 2, - spec_version: 9310, + spec_version: 9330, impl_version: 0, #[cfg(not(feature = "disable-runtime-api"))] apis: RUNTIME_API_VERSIONS, #[cfg(feature = "disable-runtime-api")] apis: sp_version::create_apis_vec![[]], - transaction_version: 15, + transaction_version: 16, state_version: 0, }; @@ -1005,6 +1005,11 @@ impl InstanceFilter for ProxyType { RuntimeCall::Bounties(..) | RuntimeCall::ChildBounties(..) | RuntimeCall::Tips(..) | + RuntimeCall::ConvictionVoting(..) | + RuntimeCall::Referenda(..) | + RuntimeCall::FellowshipCollective(..) | + RuntimeCall::FellowshipReferenda(..) | + RuntimeCall::Whitelist(..) | RuntimeCall::Claims(..) | RuntimeCall::Utility(..) | RuntimeCall::Identity(..) | @@ -1034,17 +1039,22 @@ impl InstanceFilter for ProxyType { RuntimeCall::NominationPools(..) | RuntimeCall::FastUnstake(..) ), - ProxyType::Governance => - matches!( - c, - RuntimeCall::Democracy(..) | - RuntimeCall::Council(..) | RuntimeCall::TechnicalCommittee(..) | - RuntimeCall::PhragmenElection(..) | - RuntimeCall::Treasury(..) | - RuntimeCall::Bounties(..) | - RuntimeCall::Tips(..) | RuntimeCall::Utility(..) | - RuntimeCall::ChildBounties(..) - ), + ProxyType::Governance => matches!( + c, + RuntimeCall::Democracy(..) | + RuntimeCall::Council(..) | RuntimeCall::TechnicalCommittee(..) | + RuntimeCall::PhragmenElection(..) | + RuntimeCall::Treasury(..) | + RuntimeCall::Bounties(..) | + RuntimeCall::Tips(..) | RuntimeCall::Utility(..) | + RuntimeCall::ChildBounties(..) | + // OpenGov calls + RuntimeCall::ConvictionVoting(..) | + RuntimeCall::Referenda(..) | + RuntimeCall::FellowshipCollective(..) | + RuntimeCall::FellowshipReferenda(..) | + RuntimeCall::Whitelist(..) + ), ProxyType::Staking => { matches!( c, @@ -1453,17 +1463,7 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPalletsWithSystem, - ( - // "Bound uses of call" - pallet_preimage::migration::v1::Migration, - pallet_scheduler::migration::v3::MigrateToV4, - pallet_democracy::migrations::v1::Migration, - pallet_multisig::migrations::v1::MigrateToV1, - // "Properly migrate weights to v2" - parachains_configuration::migration::v3::MigrateToV3, - pallet_election_provider_multi_phase::migrations::v1::MigrateToV1, - pallet_fast_unstake::migrations::v1::MigrateToV1, - ), + (), >; /// The payload being signed in the transactions. pub type SignedPayload = generic::SignedPayload; @@ -1704,6 +1704,10 @@ sp_api::impl_runtime_apis! { Err(mmr::Error::PalletNotIncluded) } + fn mmr_leaf_count() -> Result { + Err(mmr::Error::PalletNotIncluded) + } + fn generate_proof( _block_numbers: Vec, _best_known_block_number: Option, diff --git a/runtime/metrics/Cargo.toml b/runtime/metrics/Cargo.toml index efa7569d1130..73c0b751f01b 100644 --- a/runtime/metrics/Cargo.toml +++ b/runtime/metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-runtime-metrics" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/runtime/parachains/Cargo.toml b/runtime/parachains/Cargo.toml index cb8b989ccdda..1d055c7630f2 100644 --- a/runtime/parachains/Cargo.toml +++ b/runtime/parachains/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-runtime-parachains" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/runtime/parachains/src/disputes.rs b/runtime/parachains/src/disputes.rs index 438a71ca722a..2e6c6db77b56 100644 --- a/runtime/parachains/src/disputes.rs +++ b/runtime/parachains/src/disputes.rs @@ -1209,7 +1209,7 @@ impl Pallet { // it's sufficient to count the votes in the statement set after they set.statements.iter().for_each(|(statement, v_i, _signature)| { if Some(true) == - summary.new_participants.get(v_i.0 as usize).map(|b| b.as_ref().clone()) + summary.new_participants.get(v_i.0 as usize).map(|b| *b.as_ref()) { match statement { // `summary.new_flags` contains the spam free votes. diff --git a/runtime/parachains/src/hrmp.rs b/runtime/parachains/src/hrmp.rs index 53ad6781048f..c0624cdcacfd 100644 --- a/runtime/parachains/src/hrmp.rs +++ b/runtime/parachains/src/hrmp.rs @@ -751,10 +751,10 @@ impl Pallet { let ingress = ::HrmpIngressChannelsIndex::take(outgoing_para) .into_iter() - .map(|sender| HrmpChannelId { sender, recipient: outgoing_para.clone() }); + .map(|sender| HrmpChannelId { sender, recipient: *outgoing_para }); let egress = ::HrmpEgressChannelsIndex::take(outgoing_para) .into_iter() - .map(|recipient| HrmpChannelId { sender: outgoing_para.clone(), recipient }); + .map(|recipient| HrmpChannelId { sender: *outgoing_para, recipient }); let mut to_close = ingress.chain(egress).collect::>(); to_close.sort(); to_close.dedup(); @@ -1075,7 +1075,7 @@ impl Pallet { channel.total_size += inbound.data.len() as u32; // compute the new MQC head of the channel - let prev_head = channel.mqc_head.clone().unwrap_or(Default::default()); + let prev_head = channel.mqc_head.unwrap_or(Default::default()); let new_head = BlakeTwo256::hash_of(&( prev_head, inbound.sent_at, diff --git a/runtime/parachains/src/inclusion/mod.rs b/runtime/parachains/src/inclusion/mod.rs index f74a8cfd3f8d..1df6c141e9df 100644 --- a/runtime/parachains/src/inclusion/mod.rs +++ b/runtime/parachains/src/inclusion/mod.rs @@ -102,7 +102,7 @@ impl CandidatePendingAvailability { /// Get the core index. pub(crate) fn core_occupied(&self) -> CoreIndex { - self.core.clone() + self.core } /// Get the candidate hash. @@ -383,7 +383,7 @@ impl Pallet { let mut freed_cores = Vec::with_capacity(expected_bits); for (para_id, pending_availability) in assigned_paras_record .into_iter() - .filter_map(|x| x) + .flatten() .filter_map(|(id, p)| p.map(|p| (id, p))) { if pending_availability.availability_votes.count_ones() >= threshold { @@ -644,8 +644,7 @@ impl Pallet { }; // one more sweep for actually writing to storage. - let core_indices = - core_indices_and_backers.iter().map(|&(ref c, _, _)| c.clone()).collect(); + let core_indices = core_indices_and_backers.iter().map(|&(ref c, _, _)| *c).collect(); for (candidate, (core, backers, group)) in candidates.into_iter().zip(core_indices_and_backers) { diff --git a/runtime/parachains/src/initializer.rs b/runtime/parachains/src/initializer.rs index eaa4510fafcf..ef00e5b884cc 100644 --- a/runtime/parachains/src/initializer.rs +++ b/runtime/parachains/src/initializer.rs @@ -247,7 +247,7 @@ impl Pallet { let validators = shared::Pallet::::initializer_on_new_session( session_index, - random_seed.clone(), + random_seed, &new_config, all_validators, ); diff --git a/runtime/parachains/src/paras/mod.rs b/runtime/parachains/src/paras/mod.rs index 3c5744b96546..c4fadcf5642b 100644 --- a/runtime/parachains/src/paras/mod.rs +++ b/runtime/parachains/src/paras/mod.rs @@ -659,7 +659,7 @@ pub mod pallet { /// Past code of parachains. The parachains themselves may not be registered anymore, /// but we also keep their code on-chain for the same amount of time as outdated code - /// to keep it available for secondary checkers. + /// to keep it available for approval checkers. #[pallet::storage] #[pallet::getter(fn past_code_meta)] pub(super) type PastCodeMeta = diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs index 188a8f677979..a053e3dbfaf9 100644 --- a/runtime/parachains/src/paras_inherent/mod.rs +++ b/runtime/parachains/src/paras_inherent/mod.rs @@ -513,7 +513,7 @@ impl Pallet { METRICS.on_candidates_sanitized(backed_candidates.len() as u64); // Process backed candidates according to scheduled cores. - let parent_storage_root = parent_header.state_root().clone(); + let parent_storage_root = *parent_header.state_root(); let inclusion::ProcessedCandidates::<::Hash> { core_indices: occupied, candidate_receipt_with_backing_validator_indices, @@ -711,7 +711,7 @@ impl Pallet { let scheduled = >::scheduled(); let relay_parent_number = now - One::one(); - let parent_storage_root = parent_header.state_root().clone(); + let parent_storage_root = *parent_header.state_root(); let check_ctx = CandidateCheckContext::::new(now, relay_parent_number); let backed_candidates = sanitize_backed_candidates::( @@ -1201,7 +1201,7 @@ fn compute_entropy(parent_hash: T::Hash) -> [u8; 32] { // known 2 epochs ago. it is marginally better than using the parent block // hash since it's harder to influence the VRF output than the block hash. let vrf_random = ParentBlockRandomness::::random(&CANDIDATE_SEED_SUBJECT[..]).0; - let mut entropy: [u8; 32] = CANDIDATE_SEED_SUBJECT.clone(); + let mut entropy: [u8; 32] = CANDIDATE_SEED_SUBJECT; if let Some(vrf_random) = vrf_random { entropy.as_mut().copy_from_slice(vrf_random.as_ref()); } else { diff --git a/runtime/parachains/src/runtime_api_impl/v2.rs b/runtime/parachains/src/runtime_api_impl/v2.rs index 77ea96742b54..57345a819de0 100644 --- a/runtime/parachains/src/runtime_api_impl/v2.rs +++ b/runtime/parachains/src/runtime_api_impl/v2.rs @@ -107,7 +107,7 @@ pub fn availability_cores() -> Vec>::pending_availability(para_id) .expect("Occupied core always has pending availability; qed"); - let backed_in_number = pending_availability.backed_in_number().clone(); + let backed_in_number = *pending_availability.backed_in_number(); OccupiedCore { next_up_on_available: >::next_up_on_available( CoreIndex(i as u32), @@ -135,7 +135,7 @@ pub fn availability_cores() -> Vec>::pending_availability(para_id) .expect("Occupied core always has pending availability; qed"); - let backed_in_number = pending_availability.backed_in_number().clone(); + let backed_in_number = *pending_availability.backed_in_number(); OccupiedCore { next_up_on_available: >::next_up_on_available( CoreIndex(i as u32), diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index 0185817b2aa1..6eb1b732705f 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -483,7 +483,7 @@ impl Pallet { Some(CoreAssignment { kind: AssignmentKind::Parachain, para_id: parachains[core_index], - core: core.clone(), + core, group_idx: Self::group_assigned_to_core(core, now).expect( "core is not out of bounds and we are guaranteed \ to be after the most recent session start; qed", @@ -496,7 +496,7 @@ impl Pallet { parathread_queue.take_next_on_core(core_offset).map(|entry| CoreAssignment { kind: AssignmentKind::Parathread(entry.claim.1, entry.retries), para_id: entry.claim.0, - core: core.clone(), + core, group_idx: Self::group_assigned_to_core(core, now).expect( "core is not out of bounds and we are guaranteed \ to be after the most recent session start; qed", @@ -610,11 +610,9 @@ impl Pallet { (at - session_start_block) / config.group_rotation_frequency.into(); let rotations_since_session_start = - match >::try_into(rotations_since_session_start) { - Ok(i) => i, - Err(_) => 0, // can only happen if rotations occur only once every u32::max(), - // so functionally no difference in behavior. - }; + >::try_into(rotations_since_session_start).unwrap_or(0); + // Error case can only happen if rotations occur only once every u32::max(), + // so functionally no difference in behavior. let group_idx = (core.0 as usize + rotations_since_session_start as usize) % validator_groups.len(); diff --git a/runtime/parachains/src/ump.rs b/runtime/parachains/src/ump.rs index 5aa7b17d923c..8d734acb3464 100644 --- a/runtime/parachains/src/ump.rs +++ b/runtime/parachains/src/ump.rs @@ -107,7 +107,7 @@ impl, C: Config> UmpSink VersionedXcm, }; - let id = upward_message_id(&data[..]); + let id = upward_message_id(data); let maybe_msg_and_weight = VersionedXcm::::decode_all_with_depth_limit( xcm::MAX_XCM_DECODE_DEPTH, &mut data, diff --git a/runtime/polkadot/Cargo.toml b/runtime/polkadot/Cargo.toml index 9809fb8ff843..c621fa876573 100644 --- a/runtime/polkadot/Cargo.toml +++ b/runtime/polkadot/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-runtime" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" build = "build.rs" diff --git a/runtime/polkadot/constants/Cargo.toml b/runtime/polkadot/constants/Cargo.toml index 953ff98c2d5b..6b930c0e4196 100644 --- a/runtime/polkadot/constants/Cargo.toml +++ b/runtime/polkadot/constants/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-runtime-constants" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index 25b142a13be1..4e15d4feb9c4 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -114,13 +114,13 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("polkadot"), impl_name: create_runtime_str!("parity-polkadot"), authoring_version: 0, - spec_version: 9310, + spec_version: 9330, impl_version: 0, #[cfg(not(feature = "disable-runtime-api"))] apis: RUNTIME_API_VERSIONS, #[cfg(feature = "disable-runtime-api")] apis: sp_version::create_apis_vec![[]], - transaction_version: 16, + transaction_version: 17, state_version: 0, }; @@ -1609,17 +1609,7 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPalletsWithSystem, - ( - // "Bound uses of call" - pallet_preimage::migration::v1::Migration, - pallet_scheduler::migration::v3::MigrateToV4, - pallet_democracy::migrations::v1::Migration, - pallet_multisig::migrations::v1::MigrateToV1, - // "Properly migrate weights to v2" - parachains_configuration::migration::v3::MigrateToV3, - pallet_election_provider_multi_phase::migrations::v1::MigrateToV1, - pallet_fast_unstake::migrations::v1::MigrateToV1, - ), + (), >; /// The payload being signed in transactions. @@ -1861,6 +1851,10 @@ sp_api::impl_runtime_apis! { Err(mmr::Error::PalletNotIncluded) } + fn mmr_leaf_count() -> Result { + Err(mmr::Error::PalletNotIncluded) + } + fn generate_proof( _block_numbers: Vec, _best_known_block_number: Option, diff --git a/runtime/polkadot/src/xcm_config.rs b/runtime/polkadot/src/xcm_config.rs index 846d98a59d99..39f7a337479a 100644 --- a/runtime/polkadot/src/xcm_config.rs +++ b/runtime/polkadot/src/xcm_config.rs @@ -107,10 +107,12 @@ pub type XcmRouter = ( parameter_types! { pub const Polkadot: MultiAssetFilter = Wild(AllOf { fun: WildFungible, id: Concrete(DotLocation::get()) }); pub const PolkadotForStatemint: (MultiAssetFilter, MultiLocation) = (Polkadot::get(), Parachain(1000).into()); + pub const PolkadotForCollectives: (MultiAssetFilter, MultiLocation) = (Polkadot::get(), Parachain(1001).into()); } -/// Polkadot Relay recognizes/respects the Statemint chain as a teleporter. -pub type TrustedTeleporters = (xcm_builder::Case,); +/// Polkadot Relay recognizes/respects System parachains as teleporters. +pub type TrustedTeleporters = + (xcm_builder::Case, xcm_builder::Case); match_types! { pub type OnlyParachains: impl Contains = { diff --git a/runtime/rococo/Cargo.toml b/runtime/rococo/Cargo.toml index b79b2cf444be..b07703060a7f 100644 --- a/runtime/rococo/Cargo.toml +++ b/runtime/rococo/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rococo-runtime" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" build = "build.rs" diff --git a/runtime/rococo/constants/Cargo.toml b/runtime/rococo/constants/Cargo.toml index ec520965684b..772d66766d2d 100644 --- a/runtime/rococo/constants/Cargo.toml +++ b/runtime/rococo/constants/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rococo-runtime-constants" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index b230832b7454..8f3599686672 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -108,7 +108,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 9310, + spec_version: 9330, impl_version: 0, #[cfg(not(feature = "disable-runtime-api"))] apis: RUNTIME_API_VERSIONS, @@ -1224,7 +1224,7 @@ impl pallet_beefy::Config for Runtime { type MmrHash = ::Output; impl pallet_mmr::Config for Runtime { - const INDEXING_PREFIX: &'static [u8] = b"mmr"; + const INDEXING_PREFIX: &'static [u8] = mmr::INDEXING_PREFIX; type Hashing = Keccak256; type Hash = MmrHash; type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; @@ -1456,15 +1456,7 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPalletsWithSystem, - ( - // "Bound uses of call" - pallet_preimage::migration::v1::Migration, - pallet_scheduler::migration::v3::MigrateToV4, - pallet_democracy::migrations::v1::Migration, - pallet_multisig::migrations::v1::MigrateToV1, - // "Properly migrate weights to v2" - parachains_configuration::migration::v3::MigrateToV3, - ), + (), >; /// The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; @@ -1723,6 +1715,10 @@ sp_api::impl_runtime_apis! { Ok(Mmr::mmr_root()) } + fn mmr_leaf_count() -> Result { + Ok(Mmr::mmr_leaves()) + } + fn generate_proof( block_numbers: Vec, best_known_block_number: Option, diff --git a/runtime/test-runtime/Cargo.toml b/runtime/test-runtime/Cargo.toml index e05ee85c6250..ae0ec62c8493 100644 --- a/runtime/test-runtime/Cargo.toml +++ b/runtime/test-runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-test-runtime" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" build = "build.rs" diff --git a/runtime/test-runtime/constants/Cargo.toml b/runtime/test-runtime/constants/Cargo.toml index 4f6f144c2b61..c1d7b89408e7 100644 --- a/runtime/test-runtime/constants/Cargo.toml +++ b/runtime/test-runtime/constants/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-runtime-constants" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index 0210f1ecd8b7..e4abb2392727 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -930,6 +930,10 @@ sp_api::impl_runtime_apis! { Err(mmr::Error::PalletNotIncluded) } + fn mmr_leaf_count() -> Result { + Err(mmr::Error::PalletNotIncluded) + } + fn generate_proof( _block_numbers: Vec, _best_known_block_number: Option, diff --git a/runtime/westend/Cargo.toml b/runtime/westend/Cargo.toml index d5efbb28d5a4..fa3456f762d8 100644 --- a/runtime/westend/Cargo.toml +++ b/runtime/westend/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "westend-runtime" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" build = "build.rs" diff --git a/runtime/westend/constants/Cargo.toml b/runtime/westend/constants/Cargo.toml index d891ea0aa3a3..1c3703678f61 100644 --- a/runtime/westend/constants/Cargo.toml +++ b/runtime/westend/constants/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "westend-runtime-constants" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index a7930ff06ea6..2fee85a336a2 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -109,13 +109,13 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 9310, + spec_version: 9330, impl_version: 0, #[cfg(not(feature = "disable-runtime-api"))] apis: RUNTIME_API_VERSIONS, #[cfg(feature = "disable-runtime-api")] apis: sp_version::create_apis_vec![[]], - transaction_version: 14, + transaction_version: 15, state_version: 0, }; @@ -1222,16 +1222,7 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPalletsWithSystem, - ( - // "Bound uses of call" - pallet_preimage::migration::v1::Migration, - pallet_scheduler::migration::v3::MigrateToV4, - pallet_multisig::migrations::v1::MigrateToV1, - // "Properly migrate weights to v2" - parachains_configuration::migration::v3::MigrateToV3, - pallet_election_provider_multi_phase::migrations::v1::MigrateToV1, - pallet_fast_unstake::migrations::v1::MigrateToV1, - ), + (), >; /// The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; @@ -1459,7 +1450,10 @@ sp_api::impl_runtime_apis! { impl mmr::MmrApi for Runtime { fn mmr_root() -> Result { + Err(mmr::Error::PalletNotIncluded) + } + fn mmr_leaf_count() -> Result { Err(mmr::Error::PalletNotIncluded) } diff --git a/scripts/ci/gitlab/pipeline/build.yml b/scripts/ci/gitlab/pipeline/build.yml index 0a01ea9d8ccd..1553456a9e7b 100644 --- a/scripts/ci/gitlab/pipeline/build.yml +++ b/scripts/ci/gitlab/pipeline/build.yml @@ -22,7 +22,6 @@ build-linux-stable: RUN_UI_TESTS: 1 script: - time cargo build --profile testnet --features pyroscope --verbose --bin polkadot - - sccache -s # pack artifacts - mkdir -p ./artifacts - VERSION="${CI_COMMIT_REF_NAME}" # will be tag or branch name @@ -98,7 +97,6 @@ build-malus: - .collect-artifacts script: - time cargo build --profile testnet --verbose -p polkadot-test-malus - - sccache -s # pack artifacts - mkdir -p ./artifacts - mv ./target/testnet/malus ./artifacts/. @@ -165,18 +163,18 @@ build-implementers-guide: - job: test-deterministic-wasm artifacts: false extends: - - .docker-env + - .kubernetes-env - .test-refs - .collect-artifacts-short # git depth is set on purpose: https://github.com/paritytech/polkadot/issues/6284 variables: + GIT_STRATEGY: clone GIT_DEPTH: 0 CI_IMAGE: paritytech/mdbook-utils:e14aae4a-20221123 script: - mdbook build ./roadmap/implementers-guide - mkdir -p artifacts - mv roadmap/implementers-guide/book artifacts/ - - ls -la artifacts/ build-short-benchmark: stage: build diff --git a/scripts/ci/gitlab/pipeline/publish.yml b/scripts/ci/gitlab/pipeline/publish.yml index 9d638ad7b84a..3484fcae336e 100644 --- a/scripts/ci/gitlab/pipeline/publish.yml +++ b/scripts/ci/gitlab/pipeline/publish.yml @@ -7,6 +7,7 @@ publish-polkadot-debug-image: stage: publish extends: + - .kubernetes-env - .build-push-image rules: # Don't run when triggered from another pipeline @@ -18,6 +19,7 @@ publish-polkadot-debug-image: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 variables: + CI_IMAGE: ${BUILDAH_IMAGE} GIT_STRATEGY: none DOCKER_USER: ${PARITYPR_USER} DOCKER_PASS: ${PARITYPR_PASS} @@ -42,9 +44,11 @@ publish-test-collators-image: # service image for Simnet stage: publish extends: + - .kubernetes-env - .build-push-image - .zombienet-refs variables: + CI_IMAGE: ${BUILDAH_IMAGE} GIT_STRATEGY: none DOCKER_USER: ${PARITYPR_USER} DOCKER_PASS: ${PARITYPR_PASS} @@ -68,9 +72,11 @@ publish-malus-image: # service image for Simnet stage: publish extends: + - .kubernetes-env - .build-push-image - .zombienet-refs variables: + CI_IMAGE: ${BUILDAH_IMAGE} GIT_STRATEGY: none DOCKER_USER: ${PARITYPR_USER} DOCKER_PASS: ${PARITYPR_PASS} @@ -93,9 +99,11 @@ publish-malus-image: publish-staking-miner-image: stage: publish extends: + - .kubernetes-env - .build-push-image - .publish-refs variables: + CI_IMAGE: ${BUILDAH_IMAGE} # scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile DOCKERFILE: ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile IMAGE_NAME: docker.io/paritytech/staking-miner @@ -114,8 +122,8 @@ publish-s3-release: needs: - job: build-linux-stable artifacts: true - image: paritytech/awscli:latest variables: + CI_IMAGE: paritytech/awscli:latest GIT_STRATEGY: none PREFIX: "builds/polkadot/${ARCH}-${DOCKER_OS}" rules: @@ -152,7 +160,8 @@ publish-rustdoc: stage: publish extends: - .kubernetes-env - image: paritytech/tools:latest + variables: + CI_IMAGE: paritytech/tools:latest rules: - if: $CI_PIPELINE_SOURCE == "pipeline" when: never diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index 9a3dd0270fbb..3a21a77d90f2 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -72,3 +72,18 @@ test-deterministic-wasm: - .compiler-info script: - ./scripts/ci/gitlab/test_deterministic_wasm.sh + +cargo-clippy: + stage: test + # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs + # the job can be found in check.yml + needs: + - job: job-starter + artifacts: false + variables: + RUSTY_CACHIER_TOOLCHAIN: nightly + extends: + - .docker-env + - .test-refs + script: + - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo +nightly clippy --all-targets diff --git a/statement-table/Cargo.toml b/statement-table/Cargo.toml index 81b4be51c8e4..a7d9eba3a440 100644 --- a/statement-table/Cargo.toml +++ b/statement-table/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-statement-table" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/tests/benchmark_block.rs b/tests/benchmark_block.rs index ee68d43b2aa5..dc3b174599a9 100644 --- a/tests/benchmark_block.rs +++ b/tests/benchmark_block.rs @@ -32,7 +32,7 @@ use tempfile::tempdir; pub mod common; -static RUNTIMES: [&'static str; 4] = ["polkadot", "kusama", "westend", "rococo"]; +static RUNTIMES: [&str; 4] = ["polkadot", "kusama", "westend", "rococo"]; /// `benchmark block` works for all dev runtimes using the wasm executor. #[tokio::test] @@ -54,7 +54,7 @@ async fn build_chain(runtime: &str, base_path: &Path) -> Result<(), String> { let mut cmd = Command::new(cargo_bin("polkadot")) .stdout(process::Stdio::piped()) .stderr(process::Stdio::piped()) - .args(["--chain", &runtime, "--force-authoring", "--alice"]) + .args(["--chain", runtime, "--force-authoring", "--alice"]) .arg("-d") .arg(base_path) .arg("--no-hardware-benchmarks") @@ -77,7 +77,7 @@ async fn build_chain(runtime: &str, base_path: &Path) -> Result<(), String> { fn benchmark_block(runtime: &str, base_path: &Path, block: u32) -> Result<(), String> { // Invoke `benchmark block` with all options to make sure that they are valid. let status = Command::new(cargo_bin("polkadot")) - .args(["benchmark", "block", "--chain", &runtime]) + .args(["benchmark", "block", "--chain", runtime]) .arg("-d") .arg(base_path) .args(["--from", &block.to_string(), "--to", &block.to_string()]) diff --git a/tests/benchmark_extrinsic.rs b/tests/benchmark_extrinsic.rs index c112a8c023f8..79a7d4c45322 100644 --- a/tests/benchmark_extrinsic.rs +++ b/tests/benchmark_extrinsic.rs @@ -17,10 +17,9 @@ use assert_cmd::cargo::cargo_bin; use std::{process::Command, result::Result}; -static RUNTIMES: [&'static str; 4] = ["polkadot", "kusama", "westend", "rococo"]; +static RUNTIMES: [&str; 4] = ["polkadot", "kusama", "westend", "rococo"]; -static EXTRINSICS: [(&'static str, &'static str); 2] = - [("system", "remark"), ("balances", "transfer_keep_alive")]; +static EXTRINSICS: [(&str, &str); 2] = [("system", "remark"), ("balances", "transfer_keep_alive")]; /// `benchmark extrinsic` works for all dev runtimes and some extrinsics. #[test] @@ -43,8 +42,8 @@ fn benchmark_extrinsic_rejects_non_dev_runtimes() { fn benchmark_extrinsic(runtime: &str, pallet: &str, extrinsic: &str) -> Result<(), String> { let status = Command::new(cargo_bin("polkadot")) - .args(["benchmark", "extrinsic", "--chain", &runtime]) - .args(&["--pallet", pallet, "--extrinsic", extrinsic]) + .args(["benchmark", "extrinsic", "--chain", runtime]) + .args(["--pallet", pallet, "--extrinsic", extrinsic]) // Run with low repeats for faster execution. .args(["--repeat=1", "--warmup=1", "--max-ext-per-block=1"]) .status() diff --git a/tests/benchmark_overhead.rs b/tests/benchmark_overhead.rs index a3b4ed1160ea..10582870168e 100644 --- a/tests/benchmark_overhead.rs +++ b/tests/benchmark_overhead.rs @@ -18,7 +18,7 @@ use assert_cmd::cargo::cargo_bin; use std::{process::Command, result::Result}; use tempfile::tempdir; -static RUNTIMES: [&'static str; 4] = ["polkadot", "kusama", "westend", "rococo"]; +static RUNTIMES: [&str; 4] = ["polkadot", "kusama", "westend", "rococo"]; /// `benchmark overhead` works for all dev runtimes. #[test] diff --git a/tests/benchmark_storage_works.rs b/tests/benchmark_storage_works.rs index f5e2851f250f..8d9694aa0a0e 100644 --- a/tests/benchmark_storage_works.rs +++ b/tests/benchmark_storage_works.rs @@ -38,7 +38,7 @@ fn benchmark_storage_works() { /// Invoke the `benchmark storage` sub-command. fn benchmark_storage(db: &str, base_path: &Path) -> ExitStatus { Command::new(cargo_bin("polkadot")) - .args(&["benchmark", "storage", "--dev"]) + .args(["benchmark", "storage", "--dev"]) .arg("--db") .arg(db) .arg("--weight-path") diff --git a/tests/common.rs b/tests/common.rs index 3f040208972c..6a41975f87fa 100644 --- a/tests/common.rs +++ b/tests/common.rs @@ -91,11 +91,13 @@ pub fn find_ws_url_from_output(read: impl Read + Send) -> (String, String) { // does the line contain our port (we expect this specific output from substrate). let sock_addr = match line.split_once("Running JSON-RPC WS server: addr=") { None => return None, - Some((_, after)) => after.split_once(",").unwrap().0, + Some((_, after)) => after.split_once(',').unwrap().0, }; Some(format!("ws://{}", sock_addr)) }) - .expect(&format!("Could not find WebSocket address in process output:\n{}", &data)); + .unwrap_or_else(|| { + panic!("Could not find WebSocket address in process output:\n{}", &data) + }); (ws_url, data) } diff --git a/tests/invalid_order_arguments.rs b/tests/invalid_order_arguments.rs index f205e935ab95..f8dc32a82a26 100644 --- a/tests/invalid_order_arguments.rs +++ b/tests/invalid_order_arguments.rs @@ -24,7 +24,7 @@ fn invalid_order_arguments() { let tmpdir = tempdir().expect("could not create temp dir"); let status = Command::new(cargo_bin("polkadot")) - .args(&["--dev", "invalid_order_arguments", "-d"]) + .args(["--dev", "invalid_order_arguments", "-d"]) .arg(tmpdir.path()) .arg("-y") .status() diff --git a/tests/purge_chain_works.rs b/tests/purge_chain_works.rs index c69d8cc4a81a..ab3ee506b60a 100644 --- a/tests/purge_chain_works.rs +++ b/tests/purge_chain_works.rs @@ -36,7 +36,7 @@ async fn purge_chain_rocksdb_works() { let mut cmd = Command::new(cargo_bin("polkadot")) .stdout(process::Stdio::piped()) .stderr(process::Stdio::piped()) - .args(&["--dev", "-d"]) + .args(["--dev", "-d"]) .arg(tmpdir.path()) .arg("--port") .arg("33034") @@ -61,7 +61,7 @@ async fn purge_chain_rocksdb_works() { // Purge chain let status = Command::new(cargo_bin("polkadot")) - .args(&["purge-chain", "--dev", "-d"]) + .args(["purge-chain", "--dev", "-d"]) .arg(tmpdir.path()) .arg("-y") .status() @@ -86,7 +86,7 @@ async fn purge_chain_paritydb_works() { let mut cmd = Command::new(cargo_bin("polkadot")) .stdout(process::Stdio::piped()) .stderr(process::Stdio::piped()) - .args(&["--dev", "-d"]) + .args(["--dev", "-d"]) .arg(tmpdir.path()) .arg("--database") .arg("paritydb-experimental") @@ -111,7 +111,7 @@ async fn purge_chain_paritydb_works() { // Purge chain let status = Command::new(cargo_bin("polkadot")) - .args(&["purge-chain", "--dev", "-d"]) + .args(["purge-chain", "--dev", "-d"]) .arg(tmpdir.path()) .arg("--database") .arg("paritydb-experimental") diff --git a/tests/running_the_node_and_interrupt.rs b/tests/running_the_node_and_interrupt.rs index 895db534bc5c..5b0e6ec8b013 100644 --- a/tests/running_the_node_and_interrupt.rs +++ b/tests/running_the_node_and_interrupt.rs @@ -40,7 +40,7 @@ async fn running_the_node_works_and_can_be_interrupted() { let mut cmd = Command::new(cargo_bin("polkadot")) .stdout(process::Stdio::piped()) .stderr(process::Stdio::piped()) - .args(&["--dev", "-d"]) + .args(["--dev", "-d"]) .arg(tmpdir.path()) .arg("--no-hardware-benchmarks") .spawn() diff --git a/utils/generate-bags/Cargo.toml b/utils/generate-bags/Cargo.toml index 8f2d8e79e1e5..fc6b9b69f9b4 100644 --- a/utils/generate-bags/Cargo.toml +++ b/utils/generate-bags/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-voter-bags" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/utils/remote-ext-tests/bags-list/Cargo.toml b/utils/remote-ext-tests/bags-list/Cargo.toml index 20dc36fd5385..98ff17f777f8 100644 --- a/utils/remote-ext-tests/bags-list/Cargo.toml +++ b/utils/remote-ext-tests/bags-list/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "remote-ext-tests-bags-list" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/utils/staking-miner/Cargo.toml b/utils/staking-miner/Cargo.toml index 45cb5fec6d1a..28904d225d34 100644 --- a/utils/staking-miner/Cargo.toml +++ b/utils/staking-miner/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "staking-miner" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] edition = "2021" diff --git a/xcm/Cargo.toml b/xcm/Cargo.toml index 5a2e6813b0b4..7ac855b9c63d 100644 --- a/xcm/Cargo.toml +++ b/xcm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "xcm" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] description = "The basic XCM datastructures." edition = "2021" diff --git a/xcm/pallet-xcm-benchmarks/Cargo.toml b/xcm/pallet-xcm-benchmarks/Cargo.toml index a4fd91004e66..483a22f72b37 100644 --- a/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-xcm-benchmarks" authors = ["Parity Technologies "] edition = "2021" -version = "0.9.31" +version = "0.9.33" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/xcm/pallet-xcm/Cargo.toml b/xcm/pallet-xcm/Cargo.toml index a5b6ad7de395..8545490a9d8e 100644 --- a/xcm/pallet-xcm/Cargo.toml +++ b/xcm/pallet-xcm/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Parity Technologies "] edition = "2021" name = "pallet-xcm" -version = "0.9.31" +version = "0.9.33" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } diff --git a/xcm/procedural/Cargo.toml b/xcm/procedural/Cargo.toml index 14a7d068db68..3e10f246e53d 100644 --- a/xcm/procedural/Cargo.toml +++ b/xcm/procedural/Cargo.toml @@ -1,7 +1,7 @@ [package] authors = ["Parity Technologies "] name = "xcm-procedural" -version = "0.9.31" +version = "0.9.33" edition = "2021" [lib] diff --git a/xcm/xcm-builder/Cargo.toml b/xcm/xcm-builder/Cargo.toml index 3b3f97fd823c..7fb2dfc2bd40 100644 --- a/xcm/xcm-builder/Cargo.toml +++ b/xcm/xcm-builder/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Parity Technologies "] edition = "2021" name = "xcm-builder" description = "Tools & types for building with XCM and its executor." -version = "0.9.31" +version = "0.9.33" [dependencies] parity-scale-codec = { version = "3.1.5", default-features = false, features = ["derive"] } diff --git a/xcm/xcm-executor/Cargo.toml b/xcm/xcm-executor/Cargo.toml index 1f9842887eae..6ba0e89d9fb9 100644 --- a/xcm/xcm-executor/Cargo.toml +++ b/xcm/xcm-executor/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Parity Technologies "] edition = "2021" name = "xcm-executor" description = "An abstract and configurable XCM message executor." -version = "0.9.31" +version = "0.9.33" [dependencies] impl-trait-for-tuples = "0.2.2" diff --git a/xcm/xcm-executor/integration-tests/Cargo.toml b/xcm/xcm-executor/integration-tests/Cargo.toml index 9e5adab26286..4e3ec402bd07 100644 --- a/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/xcm/xcm-executor/integration-tests/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Parity Technologies "] edition = "2021" name = "xcm-executor-integration-tests" description = "Integration tests for the XCM Executor" -version = "0.9.31" +version = "0.9.33" [dependencies] frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/xcm/xcm-executor/src/assets.rs b/xcm/xcm-executor/src/assets.rs index 324e92dce9ff..6ecbf0e0cf44 100644 --- a/xcm/xcm-executor/src/assets.rs +++ b/xcm/xcm-executor/src/assets.rs @@ -100,14 +100,14 @@ impl Assets { } /// A borrowing iterator over the fungible assets. - pub fn fungible_assets_iter<'a>(&'a self) -> impl Iterator + 'a { + pub fn fungible_assets_iter(&self) -> impl Iterator + '_ { self.fungible .iter() .map(|(id, &amount)| MultiAsset { fun: Fungible(amount), id: id.clone() }) } /// A borrowing iterator over the non-fungible assets. - pub fn non_fungible_assets_iter<'a>(&'a self) -> impl Iterator + 'a { + pub fn non_fungible_assets_iter(&self) -> impl Iterator + '_ { self.non_fungible .iter() .map(|(id, instance)| MultiAsset { fun: NonFungible(instance.clone()), id: id.clone() }) @@ -126,7 +126,7 @@ impl Assets { } /// A borrowing iterator over all assets. - pub fn assets_iter<'a>(&'a self) -> impl Iterator + 'a { + pub fn assets_iter(&self) -> impl Iterator + '_ { self.fungible_assets_iter().chain(self.non_fungible_assets_iter()) } diff --git a/xcm/xcm-simulator/Cargo.toml b/xcm/xcm-simulator/Cargo.toml index 4c3c45c404b7..5e2bda46e494 100644 --- a/xcm/xcm-simulator/Cargo.toml +++ b/xcm/xcm-simulator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "xcm-simulator" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] description = "Test kit to simulate cross-chain message passing and XCM execution" edition = "2021" diff --git a/xcm/xcm-simulator/example/Cargo.toml b/xcm/xcm-simulator/example/Cargo.toml index 7fa6ade69fc5..ce95b05450e1 100644 --- a/xcm/xcm-simulator/example/Cargo.toml +++ b/xcm/xcm-simulator/example/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "xcm-simulator-example" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] description = "Examples of xcm-simulator usage." edition = "2021" diff --git a/xcm/xcm-simulator/fuzzer/Cargo.toml b/xcm/xcm-simulator/fuzzer/Cargo.toml index 7ecff2f8c021..85f99e6bd884 100644 --- a/xcm/xcm-simulator/fuzzer/Cargo.toml +++ b/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "xcm-simulator-fuzzer" -version = "0.9.31" +version = "0.9.33" authors = ["Parity Technologies "] description = "Examples of xcm-simulator usage." edition = "2021"