diff --git a/Cargo.lock b/Cargo.lock index 419654736dd3..7262d55d8865 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -523,7 +523,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "hash-db", "log", @@ -691,9 +691,9 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "bounded-collections" -version = "0.1.5" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a071c348a5ef6da1d3a87166b408170b46002382b1dda83992b5c2208cefb370" +checksum = "07fbd1d11282a1eb134d3c3b7cf8ce213b5161c6e5f73fb1b98618482c606b64" dependencies = [ "log", "parity-scale-codec 3.4.0", @@ -1126,6 +1126,12 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "common-path" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2382f75942f4b3be3690fe4f86365e9c853c1587d6ee58212cebf6e2a9ccd101" + [[package]] name = "concurrent-queue" version = "1.2.2" @@ -1582,7 +1588,7 @@ dependencies = [ "cfg-if", "fiat-crypto", "packed_simd_2", - "platforms 3.0.2", + "platforms", "subtle", "zeroize", ] @@ -1931,16 +1937,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] -name = "downcast" -version = "0.11.0" +name = "docify" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" +checksum = "18b972b74c30cbe838fc6a07665132ff94f257350e26fd01d80bc59ee7fcf129" +dependencies = [ + "docify_macros", +] [[package]] -name = "downcast-rs" -version = "1.2.0" +name = "docify_macros" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" +checksum = "c93004d1011191c56df9e853dca42f2012e7488638bcd5078935f5ce43e06cf3" +dependencies = [ + "common-path", + "derive-syn-parse", + "lazy_static", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "syn 2.0.16", + "termcolor", + "walkdir", +] + +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "dtoa" @@ -2516,7 +2543,7 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "parity-scale-codec 3.4.0", ] @@ -2539,7 +2566,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-support", "frame-support-procedural", @@ -2564,7 +2591,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "Inflector", "array-bytes", @@ -2611,7 +2638,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2622,7 +2649,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2639,7 +2666,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-support", "frame-system", @@ -2668,7 +2695,7 @@ dependencies = [ [[package]] name = "frame-remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-recursion", "futures", @@ -2689,7 +2716,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "bitflags", "environmental", @@ -2698,6 +2725,7 @@ dependencies = [ "impl-trait-for-tuples", "k256", "log", + "macro_magic", "once_cell", "parity-scale-codec 3.4.0", "paste", @@ -2723,13 +2751,14 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "Inflector", "cfg-expr", "derive-syn-parse", "frame-support-procedural-tools", "itertools", + "macro_magic", "proc-macro-warning", "proc-macro2", "quote", @@ -2739,7 +2768,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2751,7 +2780,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "proc-macro2", "quote", @@ -2761,7 +2790,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-executive", @@ -2781,24 +2810,26 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-version", + "static_assertions", "trybuild", ] [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-support", "frame-system", "parity-scale-codec 3.4.0", "scale-info", + "serde", ] [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "cfg-if", "frame-support", @@ -2817,7 +2848,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -2832,7 +2863,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "parity-scale-codec 3.4.0", "sp-api", @@ -2841,7 +2872,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-support", "parity-scale-codec 3.4.0", @@ -3029,7 +3060,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "chrono", "frame-election-provider-support", @@ -4117,12 +4148,6 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" -[[package]] -name = "libm" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" - [[package]] name = "libp2p" version = "0.51.3" @@ -4695,15 +4720,6 @@ dependencies = [ "value-bag", ] -[[package]] -name = "lru" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" -dependencies = [ - "hashbrown 0.12.3", -] - [[package]] name = "lru" version = "0.9.0" @@ -4760,6 +4776,53 @@ dependencies = [ "libc", ] +[[package]] +name = "macro_magic" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8e7c1b5ffe892e88b288611ccf55f9c4f4e43214aea6f7f80f0c2c53c85e68e" +dependencies = [ + "macro_magic_core", + "macro_magic_macros", + "quote", + "syn 2.0.16", +] + +[[package]] +name = "macro_magic_core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e812c59de90e5d50405131c676dad7d239de39ccc975620c72d467c70138851" +dependencies = [ + "derive-syn-parse", + "macro_magic_core_macros", + "proc-macro2", + "quote", + "syn 2.0.16", +] + +[[package]] +name = "macro_magic_core_macros" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b1906fa06ee8c02b24595e121be94e0036cb64f9dce5e587edd1e823c87c94" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.16", +] + +[[package]] +name = "macro_magic_macros" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49e8939ee52e99672a887d8ee13776d0f54262c058ce7e911185fed8e43e3a59" +dependencies = [ + "macro_magic_core", + "quote", + "syn 2.0.16", +] + [[package]] name = "maplit" version = "1.0.2" @@ -4874,12 +4937,6 @@ dependencies = [ "hash-db", ] -[[package]] -name = "memory_units" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" - [[package]] name = "merlin" version = "2.0.1" @@ -4949,7 +5006,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "futures", "log", @@ -4968,7 +5025,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "anyhow", "jsonrpsee", @@ -5545,13 +5602,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1914cd452d8fccd6f9db48147b29fd4ae05bea9dc5d9ad578509f72415de282" dependencies = [ "cfg-if", - "libm 0.1.4", + "libm", ] [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5566,7 +5623,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-support", "frame-system", @@ -5582,7 +5639,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-support", "frame-system", @@ -5596,7 +5653,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5620,7 +5677,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5640,7 +5697,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-election-provider-support", "frame-remote-externalities", @@ -5659,7 +5716,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5674,7 +5731,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-support", "frame-system", @@ -5693,7 +5750,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "array-bytes", "binary-merkle-tree", @@ -5717,7 +5774,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5735,7 +5792,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5754,7 +5811,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5771,7 +5828,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5788,7 +5845,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5806,7 +5863,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5829,7 +5886,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5842,7 +5899,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5860,8 +5917,9 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ + "docify", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -5878,7 +5936,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5901,7 +5959,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5917,7 +5975,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5937,7 +5995,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5954,7 +6012,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5971,7 +6029,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "7.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5990,7 +6048,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6007,7 +6065,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6023,7 +6081,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6039,7 +6097,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-support", "frame-system", @@ -6056,7 +6114,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6076,7 +6134,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "pallet-nomination-pools", "parity-scale-codec 3.4.0", @@ -6087,7 +6145,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-support", "frame-system", @@ -6104,7 +6162,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6128,7 +6186,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6145,7 +6203,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6160,7 +6218,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6178,7 +6236,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6193,7 +6251,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "assert_matches", "frame-benchmarking", @@ -6212,7 +6270,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6229,7 +6287,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-support", "frame-system", @@ -6250,7 +6308,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6266,7 +6324,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-support", "frame-system", @@ -6280,7 +6338,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6303,7 +6361,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6314,7 +6372,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "log", "sp-arithmetic", @@ -6323,7 +6381,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "parity-scale-codec 3.4.0", "sp-api", @@ -6332,7 +6390,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6349,7 +6407,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6364,7 +6422,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6382,7 +6440,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6401,7 +6459,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-support", "frame-system", @@ -6417,7 +6475,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6433,7 +6491,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "pallet-transaction-payment", "parity-scale-codec 3.4.0", @@ -6445,7 +6503,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6462,7 +6520,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6477,7 +6535,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6493,7 +6551,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6508,7 +6566,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6872,12 +6930,6 @@ version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12295df4f294471248581bc09bef3c38a5e46f1e36d6a37353621a0c6c357e1f" -[[package]] -name = "platforms" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" - [[package]] name = "platforms" version = "3.0.2" @@ -6893,7 +6945,7 @@ dependencies = [ "nix 0.26.2", "polkadot-cli", "polkadot-core-primitives", - "polkadot-node-core-pvf-worker", + "polkadot-node-core-pvf-prepare-worker", "polkadot-overseer", "substrate-rpc-client", "tempfile", @@ -7020,7 +7072,8 @@ dependencies = [ "futures", "log", "polkadot-client", - "polkadot-node-core-pvf-worker", + "polkadot-node-core-pvf-execute-worker", + "polkadot-node-core-pvf-prepare-worker", "polkadot-node-metrics", "polkadot-performance-test", "polkadot-service", @@ -7525,6 +7578,9 @@ dependencies = [ "parity-scale-codec 3.4.0", "pin-project", "polkadot-core-primitives", + "polkadot-node-core-pvf-common", + "polkadot-node-core-pvf-execute-worker", + "polkadot-node-core-pvf-prepare-worker", "polkadot-node-metrics", "polkadot-node-primitives", "polkadot-parachain", @@ -7537,6 +7593,8 @@ dependencies = [ "sp-wasm-interface", "substrate-build-script-utils", "tempfile", + "test-parachain-adder", + "test-parachain-halt", "tokio", "tracing-gum", ] @@ -7565,15 +7623,32 @@ dependencies = [ ] [[package]] -name = "polkadot-node-core-pvf-worker" +name = "polkadot-node-core-pvf-common" version = "0.9.41" dependencies = [ - "assert_matches", "cpu-time", "futures", "libc", "parity-scale-codec 3.4.0", - "polkadot-node-core-pvf", + "polkadot-parachain", + "polkadot-primitives", + "sc-executor-common", + "sc-executor-wasmtime", + "sp-core", + "sp-tracing", + "substrate-build-script-utils", + "tokio", + "tracing-gum", +] + +[[package]] +name = "polkadot-node-core-pvf-execute-worker" +version = "0.9.41" +dependencies = [ + "cpu-time", + "futures", + "parity-scale-codec 3.4.0", + "polkadot-node-core-pvf-common", "polkadot-parachain", "polkadot-primitives", "rayon", @@ -7585,10 +7660,28 @@ dependencies = [ "sp-io", "sp-maybe-compressed-blob", "sp-tracing", - "substrate-build-script-utils", - "tempfile", - "test-parachain-adder", - "test-parachain-halt", + "tikv-jemalloc-ctl", + "tokio", + "tracing-gum", +] + +[[package]] +name = "polkadot-node-core-pvf-prepare-worker" +version = "0.9.41" +dependencies = [ + "futures", + "libc", + "parity-scale-codec 3.4.0", + "polkadot-node-core-pvf-common", + "polkadot-parachain", + "polkadot-primitives", + "rayon", + "sc-executor", + "sc-executor-common", + "sc-executor-wasmtime", + "sp-io", + "sp-maybe-compressed-blob", + "sp-tracing", "tikv-jemalloc-ctl", "tokio", "tracing-gum", @@ -7845,7 +7938,7 @@ dependencies = [ "kusama-runtime", "log", "polkadot-erasure-coding", - "polkadot-node-core-pvf-worker", + "polkadot-node-core-pvf-prepare-worker", "polkadot-node-primitives", "polkadot-primitives", "quote", @@ -8356,7 +8449,8 @@ dependencies = [ "polkadot-node-core-backing", "polkadot-node-core-candidate-validation", "polkadot-node-core-dispute-coordinator", - "polkadot-node-core-pvf-worker", + "polkadot-node-core-pvf-execute-worker", + "polkadot-node-core-pvf-prepare-worker", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -8696,13 +8790,13 @@ dependencies = [ [[package]] name = "proc-macro-warning" -version = "0.3.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e25495609acefcaeb5052edad8ac91017c9bc98fc38ef321ed524e50b68bac" +checksum = "70550716265d1ec349c41f70dd4f964b4fd88394efe4405f0c1da679c4799a07" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.16", ] [[package]] @@ -8942,9 +9036,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ "proc-macro2", ] @@ -9653,7 +9747,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "log", "sp-core", @@ -9664,7 +9758,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-trait", "futures", @@ -9693,7 +9787,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "futures", "futures-timer", @@ -9716,7 +9810,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "parity-scale-codec 3.4.0", "sc-client-api", @@ -9731,7 +9825,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -9750,7 +9844,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9761,7 +9855,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "array-bytes", "chrono", @@ -9801,7 +9895,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "fnv", "futures", @@ -9828,7 +9922,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "hash-db", "kvdb", @@ -9854,7 +9948,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-trait", "futures", @@ -9879,7 +9973,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-trait", "fork-tree", @@ -9915,7 +10009,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "futures", "jsonrpsee", @@ -9937,7 +10031,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "array-bytes", "async-channel", @@ -9973,7 +10067,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "futures", "jsonrpsee", @@ -9992,7 +10086,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "fork-tree", "parity-scale-codec 3.4.0", @@ -10005,7 +10099,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "ahash 0.8.2", "array-bytes", @@ -10045,7 +10139,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "finality-grandpa", "futures", @@ -10065,7 +10159,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-trait", "futures", @@ -10088,9 +10182,9 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ - "lru 0.8.1", + "lru 0.10.0", "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "sc-executor-common", @@ -10110,7 +10204,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -10122,7 +10216,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "anyhow", "cfg-if", @@ -10140,7 +10234,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "ansi_term", "futures", @@ -10156,7 +10250,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "array-bytes", "parking_lot 0.12.1", @@ -10170,7 +10264,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "array-bytes", "async-channel", @@ -10185,17 +10279,17 @@ dependencies = [ "libp2p", "linked_hash_set", "log", - "lru 0.8.1", + "lru 0.10.0", "mockall", "parity-scale-codec 3.4.0", "parking_lot 0.12.1", + "partial_sort", "pin-project", "rand 0.8.5", "sc-block-builder", "sc-client-api", "sc-consensus", "sc-network-common", - "sc-peerset", "sc-utils", "serde", "serde_json", @@ -10209,13 +10303,14 @@ dependencies = [ "substrate-prometheus-endpoint", "thiserror", "unsigned-varint", + "wasm-timer", "zeroize", ] [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-channel", "cid", @@ -10236,7 +10331,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "array-bytes", "async-trait", @@ -10248,7 +10343,6 @@ dependencies = [ "parity-scale-codec 3.4.0", "prost-build", "sc-consensus", - "sc-peerset", "sc-utils", "serde", "smallvec", @@ -10264,17 +10358,16 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "ahash 0.8.2", "futures", "futures-timer", "libp2p", "log", - "lru 0.8.1", + "lru 0.10.0", "sc-network", "sc-network-common", - "sc-peerset", "sp-runtime", "substrate-prometheus-endpoint", "tracing", @@ -10283,7 +10376,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "array-bytes", "async-channel", @@ -10296,7 +10389,6 @@ dependencies = [ "sc-client-api", "sc-network", "sc-network-common", - "sc-peerset", "sp-blockchain", "sp-core", "sp-runtime", @@ -10306,7 +10398,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "array-bytes", "async-channel", @@ -10316,7 +10408,7 @@ dependencies = [ "futures-timer", "libp2p", "log", - "lru 0.8.1", + "lru 0.10.0", "mockall", "parity-scale-codec 3.4.0", "prost 0.11.0", @@ -10325,7 +10417,6 @@ dependencies = [ "sc-consensus", "sc-network", "sc-network-common", - "sc-peerset", "sc-utils", "smallvec", "sp-arithmetic", @@ -10341,17 +10432,15 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "array-bytes", "futures", "libp2p", "log", "parity-scale-codec 3.4.0", - "pin-project", "sc-network", "sc-network-common", - "sc-peerset", "sc-utils", "sp-consensus", "sp-runtime", @@ -10361,7 +10450,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "array-bytes", "bytes", @@ -10379,7 +10468,6 @@ dependencies = [ "sc-client-api", "sc-network", "sc-network-common", - "sc-peerset", "sc-utils", "sp-api", "sp-core", @@ -10389,26 +10477,10 @@ dependencies = [ "tracing", ] -[[package]] -name = "sc-peerset" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" -dependencies = [ - "futures", - "libp2p-identity", - "log", - "parking_lot 0.12.1", - "partial_sort", - "sc-utils", - "serde_json", - "sp-arithmetic", - "wasm-timer", -] - [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -10417,7 +10489,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "futures", "jsonrpsee", @@ -10448,7 +10520,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "jsonrpsee", "parity-scale-codec 3.4.0", @@ -10467,7 +10539,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "http", "jsonrpsee", @@ -10482,7 +10554,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "array-bytes", "futures", @@ -10508,7 +10580,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-trait", "directories", @@ -10574,7 +10646,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "log", "parity-scale-codec 3.4.0", @@ -10585,7 +10657,7 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "clap 4.2.5", "fs4", @@ -10601,7 +10673,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "jsonrpsee", "parity-scale-codec 3.4.0", @@ -10620,7 +10692,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "futures", "libc", @@ -10639,7 +10711,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "chrono", "futures", @@ -10658,7 +10730,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "ansi_term", "atty", @@ -10689,7 +10761,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10700,7 +10772,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-trait", "futures", @@ -10727,13 +10799,15 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-trait", "futures", "log", + "parity-scale-codec 3.4.0", "serde", "sp-blockchain", + "sp-core", "sp-runtime", "thiserror", ] @@ -10741,7 +10815,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-channel", "futures", @@ -10972,18 +11046,18 @@ checksum = "f97841a747eef040fcd2e7b3b9a220a7205926e60488e673d9e4926d27772ce5" [[package]] name = "serde" -version = "1.0.158" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771d4d9c4163ee138805e12c710dd365e4f44be8be0503cb1bb9eb989425d9c9" +checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.158" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e801c1712f48475582b7696ac71e0ca34ebb30e09338425384269d9717c62cad" +checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ "proc-macro2", "quote", @@ -11289,7 +11363,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "hash-db", "log", @@ -11309,7 +11383,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "Inflector", "blake2", @@ -11322,8 +11396,8 @@ dependencies = [ [[package]] name = "sp-application-crypto" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "23.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "parity-scale-codec 3.4.0", "scale-info", @@ -11335,8 +11409,8 @@ dependencies = [ [[package]] name = "sp-arithmetic" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "16.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "integer-sqrt", "num-traits", @@ -11350,7 +11424,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "parity-scale-codec 3.4.0", "scale-info", @@ -11363,7 +11437,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "parity-scale-codec 3.4.0", "sp-api", @@ -11375,11 +11449,11 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "futures", "log", - "lru 0.8.1", + "lru 0.10.0", "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "sp-api", @@ -11393,7 +11467,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-trait", "futures", @@ -11408,7 +11482,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-trait", "parity-scale-codec 3.4.0", @@ -11426,7 +11500,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-trait", "parity-scale-codec 3.4.0", @@ -11447,7 +11521,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "lazy_static", "parity-scale-codec 3.4.0", @@ -11466,7 +11540,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "finality-grandpa", "log", @@ -11484,7 +11558,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "parity-scale-codec 3.4.0", "scale-info", @@ -11495,8 +11569,8 @@ dependencies = [ [[package]] name = "sp-core" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "21.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "array-bytes", "bitflags", @@ -11539,8 +11613,8 @@ dependencies = [ [[package]] name = "sp-core-hashing" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "9.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "blake2b_simd", "byteorder", @@ -11553,8 +11627,8 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "9.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "proc-macro2", "quote", @@ -11565,7 +11639,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -11573,8 +11647,8 @@ dependencies = [ [[package]] name = "sp-debug-derive" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "8.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "proc-macro2", "quote", @@ -11583,8 +11657,8 @@ dependencies = [ [[package]] name = "sp-externalities" -version = "0.13.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "0.19.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "environmental", "parity-scale-codec 3.4.0", @@ -11595,7 +11669,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -11609,8 +11683,8 @@ dependencies = [ [[package]] name = "sp-io" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "23.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "bytes", "ed25519", @@ -11635,8 +11709,8 @@ dependencies = [ [[package]] name = "sp-keyring" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "24.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "lazy_static", "sp-core", @@ -11646,8 +11720,8 @@ dependencies = [ [[package]] name = "sp-keystore" -version = "0.13.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "0.27.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "futures", "parity-scale-codec 3.4.0", @@ -11661,7 +11735,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "thiserror", "zstd 0.12.3+zstd.1.5.2", @@ -11670,7 +11744,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-metadata", "parity-scale-codec 3.4.0", @@ -11681,7 +11755,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -11699,7 +11773,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "parity-scale-codec 3.4.0", "scale-info", @@ -11713,7 +11787,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "sp-api", "sp-core", @@ -11722,8 +11796,8 @@ dependencies = [ [[package]] name = "sp-panic-handler" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "8.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "backtrace", "lazy_static", @@ -11733,7 +11807,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "rustc-hash", "serde", @@ -11742,8 +11816,8 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "24.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "either", "hash256-std-hasher", @@ -11764,8 +11838,8 @@ dependencies = [ [[package]] name = "sp-runtime-interface" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "17.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -11782,8 +11856,8 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "11.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "Inflector", "proc-macro-crate", @@ -11795,7 +11869,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "parity-scale-codec 3.4.0", "scale-info", @@ -11809,7 +11883,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "parity-scale-codec 3.4.0", "scale-info", @@ -11821,8 +11895,8 @@ dependencies = [ [[package]] name = "sp-state-machine" -version = "0.13.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "0.28.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "hash-db", "log", @@ -11842,7 +11916,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "log", "parity-scale-codec 3.4.0", @@ -11859,13 +11933,13 @@ dependencies = [ [[package]] name = "sp-std" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "8.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" [[package]] name = "sp-storage" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "13.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "impl-serde", "parity-scale-codec 3.4.0", @@ -11878,7 +11952,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-trait", "futures-timer", @@ -11892,8 +11966,8 @@ dependencies = [ [[package]] name = "sp-tracing" -version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "10.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "parity-scale-codec 3.4.0", "sp-std", @@ -11905,7 +11979,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "sp-api", "sp-runtime", @@ -11914,7 +11988,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-trait", "log", @@ -11929,8 +12003,8 @@ dependencies = [ [[package]] name = "sp-trie" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "22.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "ahash 0.8.2", "hash-db", @@ -11952,8 +12026,8 @@ dependencies = [ [[package]] name = "sp-version" -version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "22.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "impl-serde", "parity-scale-codec 3.4.0", @@ -11969,8 +12043,8 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "8.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "parity-scale-codec 3.4.0", "proc-macro2", @@ -11980,22 +12054,21 @@ dependencies = [ [[package]] name = "sp-wasm-interface" -version = "7.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "14.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "anyhow", "impl-trait-for-tuples", "log", "parity-scale-codec 3.4.0", "sp-std", - "wasmi", "wasmtime", ] [[package]] name = "sp-weights" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +version = "20.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "parity-scale-codec 3.4.0", "scale-info", @@ -12236,15 +12309,15 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ - "platforms 2.0.0", + "platforms", ] [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -12263,7 +12336,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "hyper", "log", @@ -12275,7 +12348,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-trait", "jsonrpsee", @@ -12288,7 +12361,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "jsonrpsee", "log", @@ -12307,7 +12380,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "array-bytes", "async-trait", @@ -12333,7 +12406,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "futures", "substrate-test-utils-derive", @@ -12343,7 +12416,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -12354,12 +12427,13 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "ansi_term", "build-helper", "cargo_metadata", "filetime", + "parity-wasm", "sp-maybe-compressed-blob", "strum", "tempfile", @@ -12534,7 +12608,7 @@ dependencies = [ "log", "parity-scale-codec 3.4.0", "polkadot-cli", - "polkadot-node-core-pvf-worker", + "polkadot-node-core-pvf", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-parachain", @@ -12582,7 +12656,7 @@ dependencies = [ "log", "parity-scale-codec 3.4.0", "polkadot-cli", - "polkadot-node-core-pvf-worker", + "polkadot-node-core-pvf", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-parachain", @@ -13198,7 +13272,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#79d37ef461a4bdcf6ede7b37d64b28b58484dc7c" +source = "git+https://github.com/paritytech/substrate?branch=master#6ef184e33f6ce0f56999ae84b212ea6148c0624d" dependencies = [ "async-trait", "clap 4.2.5", @@ -13654,39 +13728,6 @@ dependencies = [ "web-sys", ] -[[package]] -name = "wasmi" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c326c93fbf86419608361a2c925a31754cf109da1b8b55737070b4d6669422" -dependencies = [ - "parity-wasm", - "wasmi-validation", - "wasmi_core", -] - -[[package]] -name = "wasmi-validation" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ff416ad1ff0c42e5a926ed5d5fab74c0f098749aa0ad8b2a34b982ce0e867b" -dependencies = [ - "parity-wasm", -] - -[[package]] -name = "wasmi_core" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d20cb3c59b788653d99541c646c561c9dd26506f25c0cebfe810659c54c6d7" -dependencies = [ - "downcast-rs", - "libm 0.2.1", - "memory_units", - "num-rational", - "num-traits", -] - [[package]] name = "wasmparser" version = "0.102.0" diff --git a/Cargo.toml b/Cargo.toml index 330fb45dd39c..9e9ad908292f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,7 +24,7 @@ tikv-jemallocator = "0.5.0" # Crates in our workspace, defined as dependencies so we can pass them feature flags. polkadot-cli = { path = "cli", features = [ "kusama-native", "westend-native", "rococo-native" ] } -polkadot-node-core-pvf-worker = { path = "node/core/pvf/worker" } +polkadot-node-core-pvf-prepare-worker = { path = "node/core/pvf/prepare-worker" } polkadot-overseer = { path = "node/overseer" } [dev-dependencies] @@ -82,7 +82,9 @@ members = [ "node/core/prospective-parachains", "node/core/provisioner", "node/core/pvf", - "node/core/pvf/worker", + "node/core/pvf/common", + "node/core/pvf/execute-worker", + "node/core/pvf/prepare-worker", "node/core/pvf-checker", "node/core/runtime-api", "node/network/approval-distribution", @@ -209,7 +211,7 @@ try-runtime = [ "polkadot-cli/try-runtime" ] fast-runtime = [ "polkadot-cli/fast-runtime" ] runtime-metrics = [ "polkadot-cli/runtime-metrics" ] pyroscope = ["polkadot-cli/pyroscope"] -jemalloc-allocator = ["polkadot-node-core-pvf-worker/jemalloc-allocator", "polkadot-overseer/jemalloc-allocator"] +jemalloc-allocator = ["polkadot-node-core-pvf-prepare-worker/jemalloc-allocator", "polkadot-overseer/jemalloc-allocator"] network-protocol-staging = ["polkadot-cli/network-protocol-staging"] diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 86a508426af9..aa1ee52a6fa9 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -23,7 +23,8 @@ pyroscope_pprofrs = { version = "0.2", optional = true } service = { package = "polkadot-service", path = "../node/service", default-features = false, optional = true } polkadot-client = { path = "../node/client", optional = true } -polkadot-node-core-pvf-worker = { path = "../node/core/pvf/worker", optional = true } +polkadot-node-core-pvf-execute-worker = { path = "../node/core/pvf/execute-worker", optional = true } +polkadot-node-core-pvf-prepare-worker = { path = "../node/core/pvf/prepare-worker", optional = true } polkadot-performance-test = { path = "../node/test/performance-test", optional = true } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -54,7 +55,8 @@ cli = [ "frame-benchmarking-cli", "try-runtime-cli", "polkadot-client", - "polkadot-node-core-pvf-worker", + "polkadot-node-core-pvf-execute-worker", + "polkadot-node-core-pvf-prepare-worker", ] runtime-benchmarks = [ "service/runtime-benchmarks", diff --git a/cli/src/cli.rs b/cli/src/cli.rs index b775bb6b77ad..69c54b428a92 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -114,9 +114,10 @@ pub struct RunCmd { #[arg(long = "grandpa-pause", num_args = 2)] pub grandpa_pause: Vec, - /// Enable the BEEFY gadget (only on Rococo or Wococo for now). + /// Disable the BEEFY gadget + /// (currently enabled by default on Rococo, Wococo and Versi). #[arg(long)] - pub beefy: bool, + pub no_beefy: bool, /// Add the destination address to the jaeger agent. /// diff --git a/cli/src/command.rs b/cli/src/command.rs index 802ba93941c3..378e8e7650c2 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -298,12 +298,9 @@ where .map_err(Error::from)?; let chain_spec = &runner.config().chain_spec; - // Disallow BEEFY on production networks. - if cli.run.beefy && - (chain_spec.is_polkadot() || chain_spec.is_kusama() || chain_spec.is_westend()) - { - return Err(Error::Other("BEEFY disallowed on production networks".to_string())) - } + // By default, enable BEEFY on test networks. + let enable_beefy = (chain_spec.is_rococo() || chain_spec.is_wococo() || chain_spec.is_versi()) && + !cli.run.no_beefy; set_default_ss58_version(chain_spec); @@ -346,7 +343,7 @@ where config, service::IsCollator::No, grandpa_pause, - cli.run.beefy, + enable_beefy, jaeger_agent, None, false, @@ -495,7 +492,7 @@ pub fn run() -> Result<()> { #[cfg(not(target_os = "android"))] { - polkadot_node_core_pvf_worker::prepare_worker_entrypoint( + polkadot_node_core_pvf_prepare_worker::worker_entrypoint( &cmd.socket_path, Some(&cmd.node_impl_version), ); @@ -517,7 +514,7 @@ pub fn run() -> Result<()> { #[cfg(not(target_os = "android"))] { - polkadot_node_core_pvf_worker::execute_worker_entrypoint( + polkadot_node_core_pvf_execute_worker::worker_entrypoint( &cmd.socket_path, Some(&cmd.node_impl_version), ); diff --git a/node/core/approval-voting/src/approval_db/v1/mod.rs b/node/core/approval-voting/src/approval_db/v1/mod.rs index d2a13ad54550..c31389269d2e 100644 --- a/node/core/approval-voting/src/approval_db/v1/mod.rs +++ b/node/core/approval-voting/src/approval_db/v1/mod.rs @@ -15,6 +15,12 @@ // along with Polkadot. If not, see . //! Version 1 of the DB schema. +//! +//! Note that the version here differs from the actual version of the parachains +//! database (check `CURRENT_VERSION` in `node/service/src/parachains_db/upgrade.rs`). +//! The code in this module implements the way approval voting works with +//! its data in the database. Any breaking changes here will still +//! require a db migration (check `node/service/src/parachains_db/upgrade.rs`). use parity_scale_codec::{Decode, Encode}; use polkadot_node_primitives::approval::{AssignmentCert, DelayTranche}; @@ -154,8 +160,6 @@ pub type Bitfield = BitVec; pub struct Config { /// The column family in the database where data is stored. pub col_approval_data: u32, - /// The column of the database where rolling session window data is stored. - pub col_session_data: u32, } /// Details pertaining to our assignment on a block. diff --git a/node/core/approval-voting/src/approval_db/v1/tests.rs b/node/core/approval-voting/src/approval_db/v1/tests.rs index 0d30cc8c0cdc..07d8242b772e 100644 --- a/node/core/approval-voting/src/approval_db/v1/tests.rs +++ b/node/core/approval-voting/src/approval_db/v1/tests.rs @@ -28,12 +28,10 @@ use std::{collections::HashMap, sync::Arc}; use ::test_helpers::{dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash}; const DATA_COL: u32 = 0; -const SESSION_DATA_COL: u32 = 1; -const NUM_COLUMNS: u32 = 2; +const NUM_COLUMNS: u32 = 1; -const TEST_CONFIG: Config = - Config { col_approval_data: DATA_COL, col_session_data: SESSION_DATA_COL }; +const TEST_CONFIG: Config = Config { col_approval_data: DATA_COL }; fn make_db() -> (DbBackend, Arc) { let db = kvdb_memorydb::create(NUM_COLUMNS); diff --git a/node/core/approval-voting/src/import.rs b/node/core/approval-voting/src/import.rs index 1ea2687a0246..e33caed49c5f 100644 --- a/node/core/approval-voting/src/import.rs +++ b/node/core/approval-voting/src/import.rs @@ -609,12 +609,10 @@ pub(crate) mod tests { use crate::{approval_db::v1::Config as DatabaseConfig, criteria, BlockEntry}; const DATA_COL: u32 = 0; - const SESSION_DATA_COL: u32 = 1; - const NUM_COLUMNS: u32 = 2; + const NUM_COLUMNS: u32 = 1; - const TEST_CONFIG: DatabaseConfig = - DatabaseConfig { col_approval_data: DATA_COL, col_session_data: SESSION_DATA_COL }; + const TEST_CONFIG: DatabaseConfig = DatabaseConfig { col_approval_data: DATA_COL }; #[derive(Default)] struct MockClock; diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 18b8746ca317..f5e888c7c538 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -116,8 +116,6 @@ const LOG_TARGET: &str = "parachain::approval-voting"; pub struct Config { /// The column family in the DB where approval-voting data is stored. pub col_approval_data: u32, - /// The of the DB where rolling session info is stored. - pub col_session_data: u32, /// The slot duration of the consensus algorithm, in milliseconds. Should be evenly /// divisible by 500. pub slot_duration_millis: u64, @@ -357,10 +355,7 @@ impl ApprovalVotingSubsystem { keystore, slot_duration_millis: config.slot_duration_millis, db, - db_config: DatabaseConfig { - col_approval_data: config.col_approval_data, - col_session_data: config.col_session_data, - }, + db_config: DatabaseConfig { col_approval_data: config.col_approval_data }, mode: Mode::Syncing(sync_oracle), metrics, } @@ -369,10 +364,8 @@ impl ApprovalVotingSubsystem { /// Revert to the block corresponding to the specified `hash`. /// The operation is not allowed for blocks older than the last finalized one. pub fn revert_to(&self, hash: Hash) -> Result<(), SubsystemError> { - let config = approval_db::v1::Config { - col_approval_data: self.db_config.col_approval_data, - col_session_data: self.db_config.col_session_data, - }; + let config = + approval_db::v1::Config { col_approval_data: self.db_config.col_approval_data }; let mut backend = approval_db::v1::DbBackend::new(self.db.clone(), config); let mut overlay = OverlayedBackend::new(&backend); diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index d7e19a8c09f3..f58e60c6a487 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -14,8 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::tests::test_constants::TEST_CONFIG; - use super::*; use polkadot_node_primitives::{ approval::{ @@ -115,12 +113,10 @@ fn make_sync_oracle(val: bool) -> (Box, TestSyncOracleHan pub mod test_constants { use crate::approval_db::v1::Config as DatabaseConfig; const DATA_COL: u32 = 0; - const SESSION_DATA_COL: u32 = 1; - pub(crate) const NUM_COLUMNS: u32 = 2; + pub(crate) const NUM_COLUMNS: u32 = 1; - pub(crate) const TEST_CONFIG: DatabaseConfig = - DatabaseConfig { col_approval_data: DATA_COL, col_session_data: SESSION_DATA_COL }; + pub(crate) const TEST_CONFIG: DatabaseConfig = DatabaseConfig { col_approval_data: DATA_COL }; } struct MockSupportsParachains; @@ -493,7 +489,6 @@ fn test_harness>( Config { col_approval_data: test_constants::TEST_CONFIG.col_approval_data, slot_duration_millis: SLOT_DURATION_MILLIS, - col_session_data: TEST_CONFIG.col_session_data, }, Arc::new(db), Arc::new(keystore), diff --git a/node/core/dispute-coordinator/src/db/v1.rs b/node/core/dispute-coordinator/src/db/v1.rs index aa67781ddd25..2d14f5151003 100644 --- a/node/core/dispute-coordinator/src/db/v1.rs +++ b/node/core/dispute-coordinator/src/db/v1.rs @@ -15,6 +15,12 @@ // along with Polkadot. If not, see . //! `V1` database for the dispute coordinator. +//! +//! Note that the version here differs from the actual version of the parachains +//! database (check `CURRENT_VERSION` in `node/service/src/parachains_db/upgrade.rs`). +//! The code in this module implements the way dispute coordinator works with +//! the dispute data in the database. Any breaking changes here will still +//! require a db migration (check `node/service/src/parachains_db/upgrade.rs`). use polkadot_node_primitives::DisputeStatus; use polkadot_node_subsystem_util::database::{DBTransaction, Database}; @@ -206,8 +212,6 @@ fn candidate_votes_session_prefix(session: SessionIndex) -> [u8; 15 + 4] { pub struct ColumnConfiguration { /// The column in the key-value DB where data is stored. pub col_dispute_data: u32, - /// The column in the key-value DB where session data is stored. - pub col_session_data: u32, } /// Tracked votes on candidates, for the purposes of dispute resolution. @@ -378,7 +382,7 @@ mod tests { let db = kvdb_memorydb::create(1); let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[0]); let store = Arc::new(db); - let config = ColumnConfiguration { col_dispute_data: 0, col_session_data: 1 }; + let config = ColumnConfiguration { col_dispute_data: 0 }; DbBackend::new(store, config, Metrics::default()) } diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index 81134a43a3a0..1b90a9d865e1 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -305,13 +305,12 @@ impl Initialized { Ok(session_idx) if self.gaps_in_cache || session_idx > self.highest_session_seen => { - // If error has occurred during last session caching - fetch the whole window - // Otherwise - cache only the new sessions - let lower_bound = if self.gaps_in_cache { - session_idx.saturating_sub(DISPUTE_WINDOW.get() - 1) - } else { - self.highest_session_seen + 1 - }; + // Fetch the last `DISPUTE_WINDOW` number of sessions unless there are no gaps in + // cache and we are not missing too many `SessionInfo`s + let mut lower_bound = session_idx.saturating_sub(DISPUTE_WINDOW.get() - 1); + if !self.gaps_in_cache && self.highest_session_seen > lower_bound { + lower_bound = self.highest_session_seen + 1 + } // There is a new session. Perform a dummy fetch to cache it. for idx in lower_bound..=session_idx { diff --git a/node/core/dispute-coordinator/src/lib.rs b/node/core/dispute-coordinator/src/lib.rs index 7379b392f312..02bb6ef9ecda 100644 --- a/node/core/dispute-coordinator/src/lib.rs +++ b/node/core/dispute-coordinator/src/lib.rs @@ -127,16 +127,11 @@ pub struct DisputeCoordinatorSubsystem { pub struct Config { /// The data column in the store to use for dispute data. pub col_dispute_data: u32, - /// The data column in the store to use for session data. - pub col_session_data: u32, } impl Config { fn column_config(&self) -> db::v1::ColumnConfiguration { - db::v1::ColumnConfiguration { - col_dispute_data: self.col_dispute_data, - col_session_data: self.col_session_data, - } + db::v1::ColumnConfiguration { col_dispute_data: self.col_dispute_data } } } diff --git a/node/core/dispute-coordinator/src/tests.rs b/node/core/dispute-coordinator/src/tests.rs index 7d3b87f3c228..ceeac351e8b8 100644 --- a/node/core/dispute-coordinator/src/tests.rs +++ b/node/core/dispute-coordinator/src/tests.rs @@ -33,6 +33,7 @@ use polkadot_node_subsystem_util::database::Database; use polkadot_node_primitives::{ DisputeMessage, DisputeStatus, SignedDisputeStatement, SignedFullStatement, Statement, + DISPUTE_WINDOW, }; use polkadot_node_subsystem::{ messages::{ @@ -214,9 +215,9 @@ impl Default for TestState { make_keystore(vec![Sr25519Keyring::Alice.to_seed()].into_iter()).into(); let db = kvdb_memorydb::create(1); - let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]); + let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[0]); let db = Arc::new(db); - let config = Config { col_dispute_data: 0, col_session_data: 1 }; + let config = Config { col_dispute_data: 0 }; let genesis_header = Header { parent_hash: Hash::zero(), @@ -330,9 +331,11 @@ impl TestState { assert_eq!(h, block_hash); let _ = tx.send(Ok(session)); + let first_expected_session = session.saturating_sub(DISPUTE_WINDOW.get() - 1); + // Queries for session caching - see `handle_startup` if self.known_session.is_none() { - for i in 0..=session { + for i in first_expected_session..=session { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( @@ -3393,3 +3396,174 @@ fn informs_chain_selection_when_dispute_concluded_against() { }) }); } + +// On startup `SessionInfo` cache should be populated +#[test] +fn session_info_caching_on_startup_works() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + test_state + }) + }); +} + +// Underflow means that no more than `DISPUTE_WINDOW` sessions should be fetched on startup +#[test] +fn session_info_caching_doesnt_underflow() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = DISPUTE_WINDOW.get() + 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + test_state + }) + }); +} + +// Cached `SessionInfo` shouldn't be re-requested from the runtime +#[test] +fn session_info_is_requested_only_once() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + // This leaf activation shouldn't fetch `SessionInfo` because the session is already cached + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 3, + vec![make_candidate_included_event(make_valid_candidate_receipt())], + ) + .await; + + // This leaf activation should fetch `SessionInfo` because the session is new + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session + 1, + 4, + vec![make_candidate_included_event(make_valid_candidate_receipt())], + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionInfo(session_index, tx), + )) => { + assert_eq!(session_index, 2); + let _ = tx.send(Ok(Some(test_state.session_info()))); + } + ); + test_state + }) + }); +} + +// Big jump means the new session we see with a leaf update is at least a `DISPUTE_WINDOW` bigger than +// the already known one. In this case The whole `DISPUTE_WINDOW` should be fetched. +#[test] +fn session_info_big_jump_works() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session_on_startup = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session_on_startup).await; + + // This leaf activation shouldn't fetch `SessionInfo` because the session is already cached + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session_on_startup, + 3, + vec![make_candidate_included_event(make_valid_candidate_receipt())], + ) + .await; + + let session_after_jump = session_on_startup + DISPUTE_WINDOW.get() + 10; + // This leaf activation should cache all missing `SessionInfo`s + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session_after_jump, + 4, + vec![make_candidate_included_event(make_valid_candidate_receipt())], + ) + .await; + + let first_expected_session = + session_after_jump.saturating_sub(DISPUTE_WINDOW.get() - 1); + for expected_idx in first_expected_session..=session_after_jump { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionInfo(session_index, tx), + )) => { + assert_eq!(session_index, expected_idx); + let _ = tx.send(Ok(Some(test_state.session_info()))); + } + ); + } + test_state + }) + }); +} + +// Small jump means the new session we see with a leaf update is at less than last known one + `DISPUTE_WINDOW`. In this +// case fetching should start from last known one + 1. +#[test] +fn session_info_small_jump_works() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session_on_startup = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session_on_startup).await; + + // This leaf activation shouldn't fetch `SessionInfo` because the session is already cached + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session_on_startup, + 3, + vec![make_candidate_included_event(make_valid_candidate_receipt())], + ) + .await; + + let session_after_jump = session_on_startup + DISPUTE_WINDOW.get() - 1; + // This leaf activation should cache all missing `SessionInfo`s + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session_after_jump, + 4, + vec![make_candidate_included_event(make_valid_candidate_receipt())], + ) + .await; + + let first_expected_session = session_on_startup + 1; + for expected_idx in first_expected_session..=session_after_jump { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionInfo(session_index, tx), + )) => { + assert_eq!(session_index, expected_idx); + let _ = tx.send(Ok(Some(test_state.session_info()))); + } + ); + } + test_state + }) + }); +} diff --git a/node/core/pvf/Cargo.toml b/node/core/pvf/Cargo.toml index 026930758b86..d00c13fda2b0 100644 --- a/node/core/pvf/Cargo.toml +++ b/node/core/pvf/Cargo.toml @@ -4,6 +4,10 @@ version.workspace = true authors.workspace = true edition.workspace = true +[[bin]] +name = "puppet_worker" +path = "bin/puppet_worker.rs" + [dependencies] always-assert = "0.1" futures = "0.3.21" @@ -13,12 +17,16 @@ libc = "0.2.139" pin-project = "1.0.9" rand = "0.8.5" slotmap = "1.0" +tempfile = "3.3.0" tokio = { version = "1.24.2", features = ["fs", "process"] } parity-scale-codec = { version = "3.4.0", default-features = false, features = ["derive"] } polkadot-parachain = { path = "../../../parachain" } polkadot-core-primitives = { path = "../../../core-primitives" } +polkadot-node-core-pvf-common = { path = "common" } +polkadot-node-core-pvf-execute-worker = { path = "execute-worker" } +polkadot-node-core-pvf-prepare-worker = { path = "prepare-worker" } polkadot-node-metrics = { path = "../../metrics" } polkadot-node-primitives = { path = "../../primitives" } polkadot-primitives = { path = "../../../primitives" } @@ -34,4 +42,6 @@ substrate-build-script-utils = { git = "https://github.com/paritytech/substrate" [dev-dependencies] assert_matches = "1.4.0" hex-literal = "0.3.4" -tempfile = "3.3.0" + +adder = { package = "test-parachain-adder", path = "../../../parachain/test-parachains/adder" } +halt = { package = "test-parachain-halt", path = "../../../parachain/test-parachains/halt" } diff --git a/node/core/pvf/worker/bin/puppet_worker.rs b/node/core/pvf/bin/puppet_worker.rs similarity index 92% rename from node/core/pvf/worker/bin/puppet_worker.rs rename to node/core/pvf/bin/puppet_worker.rs index ddd81971292b..7f93519d8454 100644 --- a/node/core/pvf/worker/bin/puppet_worker.rs +++ b/node/core/pvf/bin/puppet_worker.rs @@ -14,4 +14,4 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -polkadot_node_core_pvf_worker::decl_puppet_worker_main!(); +polkadot_node_core_pvf::decl_puppet_worker_main!(); diff --git a/node/core/pvf/common/Cargo.toml b/node/core/pvf/common/Cargo.toml new file mode 100644 index 000000000000..de9fa10804c7 --- /dev/null +++ b/node/core/pvf/common/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "polkadot-node-core-pvf-common" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +cpu-time = "1.0.0" +futures = "0.3.21" +gum = { package = "tracing-gum", path = "../../../gum" } +libc = "0.2.139" +tokio = { version = "1.24.2", features = ["fs", "process", "io-util"] } + +parity-scale-codec = { version = "3.4.0", default-features = false, features = ["derive"] } + +polkadot-parachain = { path = "../../../../parachain" } +polkadot-primitives = { path = "../../../../primitives" } + +sc-executor-common = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-executor-wasmtime = { git = "https://github.com/paritytech/substrate", branch = "master" } + +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[build-dependencies] +substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/core/pvf/worker/build.rs b/node/core/pvf/common/build.rs similarity index 100% rename from node/core/pvf/worker/build.rs rename to node/core/pvf/common/build.rs diff --git a/node/core/pvf/common/src/error.rs b/node/core/pvf/common/src/error.rs new file mode 100644 index 000000000000..56353c53b4d2 --- /dev/null +++ b/node/core/pvf/common/src/error.rs @@ -0,0 +1,106 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::prepare::PrepareStats; +use parity_scale_codec::{Decode, Encode}; +use std::fmt; + +/// Result of PVF preparation performed by the validation host. Contains stats about the preparation if +/// successful +pub type PrepareResult = Result; + +/// An error that occurred during the prepare part of the PVF pipeline. +#[derive(Debug, Clone, Encode, Decode)] +pub enum PrepareError { + /// During the prevalidation stage of preparation an issue was found with the PVF. + Prevalidation(String), + /// Compilation failed for the given PVF. + Preparation(String), + /// An unexpected panic has occurred in the preparation worker. + Panic(String), + /// Failed to prepare the PVF due to the time limit. + TimedOut, + /// An IO error occurred. This state is reported by either the validation host or by the worker. + IoErr(String), + /// The temporary file for the artifact could not be created at the given cache path. This state is reported by the + /// validation host (not by the worker). + CreateTmpFileErr(String), + /// The response from the worker is received, but the file cannot be renamed (moved) to the final destination + /// location. This state is reported by the validation host (not by the worker). + RenameTmpFileErr(String), +} + +impl PrepareError { + /// Returns whether this is a deterministic error, i.e. one that should trigger reliably. Those + /// errors depend on the PVF itself and the sc-executor/wasmtime logic. + /// + /// Non-deterministic errors can happen spuriously. Typically, they occur due to resource + /// starvation, e.g. under heavy load or memory pressure. Those errors are typically transient + /// but may persist e.g. if the node is run by overwhelmingly underpowered machine. + pub fn is_deterministic(&self) -> bool { + use PrepareError::*; + match self { + Prevalidation(_) | Preparation(_) | Panic(_) => true, + TimedOut | IoErr(_) | CreateTmpFileErr(_) | RenameTmpFileErr(_) => false, + } + } +} + +impl fmt::Display for PrepareError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use PrepareError::*; + match self { + Prevalidation(err) => write!(f, "prevalidation: {}", err), + Preparation(err) => write!(f, "preparation: {}", err), + Panic(err) => write!(f, "panic: {}", err), + TimedOut => write!(f, "prepare: timeout"), + IoErr(err) => write!(f, "prepare: io error while receiving response: {}", err), + CreateTmpFileErr(err) => write!(f, "prepare: error creating tmp file: {}", err), + RenameTmpFileErr(err) => write!(f, "prepare: error renaming tmp file: {}", err), + } + } +} + +/// Some internal error occurred. +/// +/// Should only ever be used for validation errors independent of the candidate and PVF, or for errors we ruled out +/// during pre-checking (so preparation errors are fine). +#[derive(Debug, Clone, Encode, Decode)] +pub enum InternalValidationError { + /// Some communication error occurred with the host. + HostCommunication(String), + /// Could not find or open compiled artifact file. + CouldNotOpenFile(String), + /// An error occurred in the CPU time monitor thread. Should be totally unrelated to validation. + CpuTimeMonitorThread(String), + /// Some non-deterministic preparation error occurred. + NonDeterministicPrepareError(PrepareError), +} + +impl fmt::Display for InternalValidationError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use InternalValidationError::*; + match self { + HostCommunication(err) => + write!(f, "validation: some communication error occurred with the host: {}", err), + CouldNotOpenFile(err) => + write!(f, "validation: could not find or open compiled artifact file: {}", err), + CpuTimeMonitorThread(err) => + write!(f, "validation: an error occurred in the CPU time monitor thread: {}", err), + NonDeterministicPrepareError(err) => write!(f, "validation: prepare: {}", err), + } + } +} diff --git a/node/core/pvf/common/src/execute.rs b/node/core/pvf/common/src/execute.rs new file mode 100644 index 000000000000..de5ce39f7838 --- /dev/null +++ b/node/core/pvf/common/src/execute.rs @@ -0,0 +1,60 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::error::InternalValidationError; +use parity_scale_codec::{Decode, Encode}; +use polkadot_parachain::primitives::ValidationResult; +use polkadot_primitives::ExecutorParams; +use std::time::Duration; + +/// The payload of the one-time handshake that is done when a worker process is created. Carries +/// data from the host to the worker. +#[derive(Encode, Decode)] +pub struct Handshake { + /// The executor parameters. + pub executor_params: ExecutorParams, +} + +/// The response from an execution job on the worker. +#[derive(Encode, Decode)] +pub enum Response { + /// The job completed successfully. + Ok { + /// The result of parachain validation. + result_descriptor: ValidationResult, + /// The amount of CPU time taken by the job. + duration: Duration, + }, + /// The candidate is invalid. + InvalidCandidate(String), + /// The job timed out. + TimedOut, + /// An unexpected panic has occurred in the execution worker. + Panic(String), + /// Some internal error occurred. + InternalError(InternalValidationError), +} + +impl Response { + /// Creates an invalid response from a context `ctx` and a message `msg` (which can be empty). + pub fn format_invalid(ctx: &'static str, msg: &str) -> Self { + if msg.is_empty() { + Self::InvalidCandidate(ctx.to_string()) + } else { + Self::InvalidCandidate(format!("{}: {}", ctx, msg)) + } + } +} diff --git a/node/core/pvf/common/src/executor_intf.rs b/node/core/pvf/common/src/executor_intf.rs new file mode 100644 index 000000000000..5926f3c5dbc7 --- /dev/null +++ b/node/core/pvf/common/src/executor_intf.rs @@ -0,0 +1,114 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Interface to the Substrate Executor + +use polkadot_primitives::{ExecutorParam, ExecutorParams}; +use sc_executor_common::wasm_runtime::HeapAllocStrategy; +use sc_executor_wasmtime::{Config, DeterministicStackLimit, Semantics}; + +// Memory configuration +// +// When Substrate Runtime is instantiated, a number of WASM pages are allocated for the Substrate +// Runtime instance's linear memory. The exact number of pages is a sum of whatever the WASM blob +// itself requests (by default at least enough to hold the data section as well as have some space +// left for the stack; this is, of course, overridable at link time when compiling the runtime) +// plus the number of pages specified in the `extra_heap_pages` passed to the executor. +// +// By default, rustc (or `lld` specifically) should allocate 1 MiB for the shadow stack, or 16 pages. +// The data section for runtimes are typically rather small and can fit in a single digit number of +// WASM pages, so let's say an extra 16 pages. Thus let's assume that 32 pages or 2 MiB are used for +// these needs by default. +const DEFAULT_HEAP_PAGES_ESTIMATE: u32 = 32; +const EXTRA_HEAP_PAGES: u32 = 2048; + +/// The number of bytes devoted for the stack during wasm execution of a PVF. +pub const NATIVE_STACK_MAX: u32 = 256 * 1024 * 1024; + +// VALUES OF THE DEFAULT CONFIGURATION SHOULD NEVER BE CHANGED +// They are used as base values for the execution environment parametrization. +// To overwrite them, add new ones to `EXECUTOR_PARAMS` in the `session_info` pallet and perform +// a runtime upgrade to make them active. +pub const DEFAULT_CONFIG: Config = Config { + allow_missing_func_imports: true, + cache_path: None, + semantics: Semantics { + heap_alloc_strategy: sc_executor_common::wasm_runtime::HeapAllocStrategy::Dynamic { + maximum_pages: Some(DEFAULT_HEAP_PAGES_ESTIMATE + EXTRA_HEAP_PAGES), + }, + + instantiation_strategy: + sc_executor_wasmtime::InstantiationStrategy::RecreateInstanceCopyOnWrite, + + // Enable deterministic stack limit to pin down the exact number of items the wasmtime stack + // can contain before it traps with stack overflow. + // + // Here is how the values below were chosen. + // + // At the moment of writing, the default native stack size limit is 1 MiB. Assuming a logical item + // (see the docs about the field and the instrumentation algorithm) is 8 bytes, 1 MiB can + // fit 2x 65536 logical items. + // + // Since reaching the native stack limit is undesirable, we halve the logical item limit and + // also increase the native 256x. This hopefully should preclude wasm code from reaching + // the stack limit set by the wasmtime. + deterministic_stack_limit: Some(DeterministicStackLimit { + logical_max: 65536, + native_stack_max: NATIVE_STACK_MAX, + }), + canonicalize_nans: true, + // Rationale for turning the multi-threaded compilation off is to make the preparation time + // easily reproducible and as deterministic as possible. + // + // Currently the prepare queue doesn't distinguish between precheck and prepare requests. + // On the one hand, it simplifies the code, on the other, however, slows down compile times + // for execute requests. This behavior may change in future. + parallel_compilation: false, + + // WASM extensions. Only those that are meaningful to us may be controlled here. By default, + // we're using WASM MVP, which means all the extensions are disabled. Nevertheless, some + // extensions (e.g., sign extension ops) are enabled by Wasmtime and cannot be disabled. + wasm_reference_types: false, + wasm_simd: false, + wasm_bulk_memory: false, + wasm_multi_value: false, + }, +}; + +pub fn params_to_wasmtime_semantics(par: &ExecutorParams) -> Result { + let mut sem = DEFAULT_CONFIG.semantics.clone(); + let mut stack_limit = if let Some(stack_limit) = sem.deterministic_stack_limit.clone() { + stack_limit + } else { + return Err("No default stack limit set".to_owned()) + }; + + for p in par.iter() { + match p { + ExecutorParam::MaxMemoryPages(max_pages) => + sem.heap_alloc_strategy = + HeapAllocStrategy::Dynamic { maximum_pages: Some(*max_pages) }, + ExecutorParam::StackLogicalMax(slm) => stack_limit.logical_max = *slm, + ExecutorParam::StackNativeMax(snm) => stack_limit.native_stack_max = *snm, + ExecutorParam::WasmExtBulkMemory => sem.wasm_bulk_memory = true, + // TODO: Not implemented yet; . + ExecutorParam::PrecheckingMaxMemory(_) => (), + ExecutorParam::PvfPrepTimeout(_, _) | ExecutorParam::PvfExecTimeout(_, _) => (), // Not used here + } + } + sem.deterministic_stack_limit = Some(stack_limit); + Ok(sem) +} diff --git a/node/core/pvf/common/src/lib.rs b/node/core/pvf/common/src/lib.rs new file mode 100644 index 000000000000..028fd9b17947 --- /dev/null +++ b/node/core/pvf/common/src/lib.rs @@ -0,0 +1,57 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Functionality that is shared by the host and the workers. + +pub mod error; +pub mod execute; +pub mod executor_intf; +pub mod prepare; +pub mod pvf; +pub mod worker; + +pub use cpu_time::ProcessTime; + +const LOG_TARGET: &str = "parachain::pvf-common"; + +use std::mem; +use tokio::io::{self, AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _}; + +#[doc(hidden)] +pub mod tests { + use std::time::Duration; + + pub const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); + pub const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(30); +} + +/// Write some data prefixed by its length into `w`. +pub async fn framed_send(w: &mut (impl AsyncWrite + Unpin), buf: &[u8]) -> io::Result<()> { + let len_buf = buf.len().to_le_bytes(); + w.write_all(&len_buf).await?; + w.write_all(buf).await?; + Ok(()) +} + +/// Read some data prefixed by its length from `r`. +pub async fn framed_recv(r: &mut (impl AsyncRead + Unpin)) -> io::Result> { + let mut len_buf = [0u8; mem::size_of::()]; + r.read_exact(&mut len_buf).await?; + let len = usize::from_le_bytes(len_buf); + let mut buf = vec![0; len]; + r.read_exact(&mut buf).await?; + Ok(buf) +} diff --git a/node/core/pvf/common/src/prepare.rs b/node/core/pvf/common/src/prepare.rs new file mode 100644 index 000000000000..ac64e2927a16 --- /dev/null +++ b/node/core/pvf/common/src/prepare.rs @@ -0,0 +1,48 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use parity_scale_codec::{Decode, Encode}; + +/// Preparation statistics, including the CPU time and memory taken. +#[derive(Debug, Clone, Default, Encode, Decode)] +pub struct PrepareStats { + /// The CPU time that elapsed for the preparation job. + pub cpu_time_elapsed: std::time::Duration, + /// The observed memory statistics for the preparation job. + pub memory_stats: MemoryStats, +} + +/// Helper struct to contain all the memory stats, including `MemoryAllocationStats` and, if +/// supported by the OS, `ru_maxrss`. +#[derive(Clone, Debug, Default, Encode, Decode)] +pub struct MemoryStats { + /// Memory stats from `tikv_jemalloc_ctl`. + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + pub memory_tracker_stats: Option, + /// `ru_maxrss` from `getrusage`. `None` if an error occurred. + #[cfg(target_os = "linux")] + pub max_rss: Option, +} + +/// Statistics of collected memory metrics. +#[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] +#[derive(Clone, Debug, Default, Encode, Decode)] +pub struct MemoryAllocationStats { + /// Total resident memory, in bytes. + pub resident: u64, + /// Total allocated memory, in bytes. + pub allocated: u64, +} diff --git a/node/core/pvf/src/pvf.rs b/node/core/pvf/common/src/pvf.rs similarity index 81% rename from node/core/pvf/src/pvf.rs rename to node/core/pvf/common/src/pvf.rs index c134cacb4acf..1661f324083a 100644 --- a/node/core/pvf/src/pvf.rs +++ b/node/core/pvf/common/src/pvf.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::artifacts::ArtifactId; use parity_scale_codec::{Decode, Encode}; use polkadot_parachain::primitives::ValidationCodeHash; use polkadot_primitives::ExecutorParams; @@ -26,9 +25,6 @@ use std::{ time::Duration, }; -#[cfg(test)] -use crate::host::tests::TEST_PREPARATION_TIMEOUT; - /// A struct that carries the exhaustive set of data to prepare an artifact out of plain /// Wasm binary /// @@ -58,13 +54,8 @@ impl PvfPrepData { Self { code, code_hash, executor_params, prep_timeout } } - /// Returns artifact ID that corresponds to the PVF with given executor params - pub(crate) fn as_artifact_id(&self) -> ArtifactId { - ArtifactId::new(self.code_hash, self.executor_params.hash()) - } - /// Returns validation code hash for the PVF - pub(crate) fn code_hash(&self) -> ValidationCodeHash { + pub fn code_hash(&self) -> ValidationCodeHash { self.code_hash } @@ -83,16 +74,17 @@ impl PvfPrepData { self.prep_timeout } - /// Creates a structure for tests - #[cfg(test)] - pub(crate) fn from_discriminator_and_timeout(num: u32, timeout: Duration) -> Self { + /// Creates a structure for tests. + #[doc(hidden)] + pub fn from_discriminator_and_timeout(num: u32, timeout: Duration) -> Self { let descriminator_buf = num.to_le_bytes().to_vec(); Self::from_code(descriminator_buf, ExecutorParams::default(), timeout) } - #[cfg(test)] - pub(crate) fn from_discriminator(num: u32) -> Self { - Self::from_discriminator_and_timeout(num, TEST_PREPARATION_TIMEOUT) + /// Creates a structure for tests. + #[doc(hidden)] + pub fn from_discriminator(num: u32) -> Self { + Self::from_discriminator_and_timeout(num, crate::tests::TEST_PREPARATION_TIMEOUT) } } diff --git a/node/core/pvf/worker/src/common.rs b/node/core/pvf/common/src/worker.rs similarity index 90% rename from node/core/pvf/worker/src/common.rs rename to node/core/pvf/common/src/worker.rs index 00289737a5c8..debe18985b37 100644 --- a/node/core/pvf/worker/src/common.rs +++ b/node/core/pvf/common/src/worker.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +//! Functionality common to both prepare and execute workers. + use crate::LOG_TARGET; use cpu_time::ProcessTime; use futures::never::Never; @@ -25,6 +27,42 @@ use std::{ }; use tokio::{io, net::UnixStream, runtime::Runtime}; +/// Use this macro to declare a `fn main() {}` that will create an executable that can be used for +/// spawning the desired worker. +#[macro_export] +macro_rules! decl_worker_main { + ($expected_command:expr, $entrypoint:expr) => { + fn main() { + ::sp_tracing::try_init_simple(); + + let args = std::env::args().collect::>(); + if args.len() < 3 { + panic!("wrong number of arguments"); + } + + let mut version = None; + let mut socket_path: &str = ""; + + for i in 2..args.len() { + match args[i].as_ref() { + "--socket-path" => socket_path = args[i + 1].as_str(), + "--node-version" => version = Some(args[i + 1].as_str()), + _ => (), + } + } + + let subcommand = &args[1]; + if subcommand != $expected_command { + panic!( + "trying to run {} binary with the {} subcommand", + $expected_command, subcommand + ) + } + $entrypoint(&socket_path, version); + } + }; +} + /// Some allowed overhead that we account for in the "CPU time monitor" thread's sleeps, on the /// child process. pub const JOB_TIMEOUT_OVERHEAD: Duration = Duration::from_millis(50); diff --git a/node/core/pvf/worker/Cargo.toml b/node/core/pvf/execute-worker/Cargo.toml similarity index 67% rename from node/core/pvf/worker/Cargo.toml rename to node/core/pvf/execute-worker/Cargo.toml index 53d548dbac6f..c360cee8bf5d 100644 --- a/node/core/pvf/worker/Cargo.toml +++ b/node/core/pvf/execute-worker/Cargo.toml @@ -1,27 +1,20 @@ [package] -name = "polkadot-node-core-pvf-worker" +name = "polkadot-node-core-pvf-execute-worker" version.workspace = true authors.workspace = true edition.workspace = true -[[bin]] -name = "puppet_worker" -path = "bin/puppet_worker.rs" - [dependencies] -assert_matches = "1.4.0" cpu-time = "1.0.0" futures = "0.3.21" gum = { package = "tracing-gum", path = "../../../gum" } -libc = "0.2.139" rayon = "1.5.1" -tempfile = "3.3.0" tikv-jemalloc-ctl = { version = "0.5.0", optional = true } -tokio = "1.24.2" +tokio = { version = "1.24.2", features = ["fs", "process"] } parity-scale-codec = { version = "3.4.0", default-features = false, features = ["derive"] } -polkadot-node-core-pvf = { path = ".." } +polkadot-node-core-pvf-common = { path = "../common" } polkadot-parachain = { path = "../../../../parachain" } polkadot-primitives = { path = "../../../../primitives" } @@ -37,12 +30,5 @@ sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master [target.'cfg(target_os = "linux")'.dependencies] tikv-jemalloc-ctl = "0.5.0" -[build-dependencies] -substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[dev-dependencies] -adder = { package = "test-parachain-adder", path = "../../../../parachain/test-parachains/adder" } -halt = { package = "test-parachain-halt", path = "../../../../parachain/test-parachains/halt" } - [features] -jemalloc-allocator = ["dep:tikv-jemalloc-ctl"] +builder = [] diff --git a/node/core/pvf/worker/src/executor_intf.rs b/node/core/pvf/execute-worker/src/executor_intf.rs similarity index 65% rename from node/core/pvf/worker/src/executor_intf.rs rename to node/core/pvf/execute-worker/src/executor_intf.rs index ff286dd74d64..98424a3dcd1d 100644 --- a/node/core/pvf/worker/src/executor_intf.rs +++ b/node/core/pvf/execute-worker/src/executor_intf.rs @@ -16,13 +16,16 @@ //! Interface to the Substrate Executor -use polkadot_primitives::{ExecutorParam, ExecutorParams}; +use polkadot_node_core_pvf_common::executor_intf::{ + params_to_wasmtime_semantics, DEFAULT_CONFIG, NATIVE_STACK_MAX, +}; +use polkadot_primitives::ExecutorParams; use sc_executor_common::{ error::WasmError, runtime_blob::RuntimeBlob, - wasm_runtime::{HeapAllocStrategy, InvokeMethod, WasmModule as _}, + wasm_runtime::{InvokeMethod, WasmModule as _}, }; -use sc_executor_wasmtime::{Config, DeterministicStackLimit, Semantics, WasmtimeRuntime}; +use sc_executor_wasmtime::{Config, WasmtimeRuntime}; use sp_core::storage::{ChildInfo, TrackedStorageKey}; use sp_externalities::MultiRemovalResults; use std::any::{Any, TypeId}; @@ -63,119 +66,6 @@ use std::any::{Any, TypeId}; /// The stack size for the execute thread. pub const EXECUTE_THREAD_STACK_SIZE: usize = 2 * 1024 * 1024 + NATIVE_STACK_MAX as usize; -// Memory configuration -// -// When Substrate Runtime is instantiated, a number of WASM pages are allocated for the Substrate -// Runtime instance's linear memory. The exact number of pages is a sum of whatever the WASM blob -// itself requests (by default at least enough to hold the data section as well as have some space -// left for the stack; this is, of course, overridable at link time when compiling the runtime) -// plus the number of pages specified in the `extra_heap_pages` passed to the executor. -// -// By default, rustc (or `lld` specifically) should allocate 1 MiB for the shadow stack, or 16 pages. -// The data section for runtimes are typically rather small and can fit in a single digit number of -// WASM pages, so let's say an extra 16 pages. Thus let's assume that 32 pages or 2 MiB are used for -// these needs by default. -const DEFAULT_HEAP_PAGES_ESTIMATE: u32 = 32; -const EXTRA_HEAP_PAGES: u32 = 2048; - -/// The number of bytes devoted for the stack during wasm execution of a PVF. -const NATIVE_STACK_MAX: u32 = 256 * 1024 * 1024; - -// VALUES OF THE DEFAULT CONFIGURATION SHOULD NEVER BE CHANGED -// They are used as base values for the execution environment parametrization. -// To overwrite them, add new ones to `EXECUTOR_PARAMS` in the `session_info` pallet and perform -// a runtime upgrade to make them active. -const DEFAULT_CONFIG: Config = Config { - allow_missing_func_imports: true, - cache_path: None, - semantics: Semantics { - heap_alloc_strategy: sc_executor_common::wasm_runtime::HeapAllocStrategy::Dynamic { - maximum_pages: Some(DEFAULT_HEAP_PAGES_ESTIMATE + EXTRA_HEAP_PAGES), - }, - - instantiation_strategy: - sc_executor_wasmtime::InstantiationStrategy::RecreateInstanceCopyOnWrite, - - // Enable deterministic stack limit to pin down the exact number of items the wasmtime stack - // can contain before it traps with stack overflow. - // - // Here is how the values below were chosen. - // - // At the moment of writing, the default native stack size limit is 1 MiB. Assuming a logical item - // (see the docs about the field and the instrumentation algorithm) is 8 bytes, 1 MiB can - // fit 2x 65536 logical items. - // - // Since reaching the native stack limit is undesirable, we halve the logical item limit and - // also increase the native 256x. This hopefully should preclude wasm code from reaching - // the stack limit set by the wasmtime. - deterministic_stack_limit: Some(DeterministicStackLimit { - logical_max: 65536, - native_stack_max: NATIVE_STACK_MAX, - }), - canonicalize_nans: true, - // Rationale for turning the multi-threaded compilation off is to make the preparation time - // easily reproducible and as deterministic as possible. - // - // Currently the prepare queue doesn't distinguish between precheck and prepare requests. - // On the one hand, it simplifies the code, on the other, however, slows down compile times - // for execute requests. This behavior may change in future. - parallel_compilation: false, - - // WASM extensions. Only those that are meaningful to us may be controlled here. By default, - // we're using WASM MVP, which means all the extensions are disabled. Nevertheless, some - // extensions (e.g., sign extension ops) are enabled by Wasmtime and cannot be disabled. - wasm_reference_types: false, - wasm_simd: false, - wasm_bulk_memory: false, - wasm_multi_value: false, - }, -}; - -/// Runs the prevalidation on the given code. Returns a [`RuntimeBlob`] if it succeeds. -pub fn prevalidate(code: &[u8]) -> Result { - let blob = RuntimeBlob::new(code)?; - // It's assumed this function will take care of any prevalidation logic - // that needs to be done. - // - // Do nothing for now. - Ok(blob) -} - -/// Runs preparation on the given runtime blob. If successful, it returns a serialized compiled -/// artifact which can then be used to pass into `Executor::execute` after writing it to the disk. -pub fn prepare( - blob: RuntimeBlob, - executor_params: &ExecutorParams, -) -> Result, sc_executor_common::error::WasmError> { - let semantics = params_to_wasmtime_semantics(executor_params) - .map_err(|e| sc_executor_common::error::WasmError::Other(e))?; - sc_executor_wasmtime::prepare_runtime_artifact(blob, &semantics) -} - -fn params_to_wasmtime_semantics(par: &ExecutorParams) -> Result { - let mut sem = DEFAULT_CONFIG.semantics.clone(); - let mut stack_limit = if let Some(stack_limit) = sem.deterministic_stack_limit.clone() { - stack_limit - } else { - return Err("No default stack limit set".to_owned()) - }; - - for p in par.iter() { - match p { - ExecutorParam::MaxMemoryPages(max_pages) => - sem.heap_alloc_strategy = - HeapAllocStrategy::Dynamic { maximum_pages: Some(*max_pages) }, - ExecutorParam::StackLogicalMax(slm) => stack_limit.logical_max = *slm, - ExecutorParam::StackNativeMax(snm) => stack_limit.native_stack_max = *snm, - ExecutorParam::WasmExtBulkMemory => sem.wasm_bulk_memory = true, - ExecutorParam::PrecheckingMaxMemory(_) => (), // TODO: Not implemented yet - ExecutorParam::PvfPrepTimeout(_, _) | ExecutorParam::PvfExecTimeout(_, _) => (), // Not used here - } - } - sem.deterministic_stack_limit = Some(stack_limit); - Ok(sem) -} - #[derive(Clone)] pub struct Executor { config: Config, diff --git a/node/core/pvf/worker/src/execute.rs b/node/core/pvf/execute-worker/src/lib.rs similarity index 93% rename from node/core/pvf/worker/src/execute.rs rename to node/core/pvf/execute-worker/src/lib.rs index c5b8ddc9dd18..0ac39aafb0c9 100644 --- a/node/core/pvf/worker/src/execute.rs +++ b/node/core/pvf/execute-worker/src/lib.rs @@ -14,20 +14,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::{ - common::{ +mod executor_intf; + +pub use executor_intf::Executor; + +// NOTE: Initializing logging in e.g. tests will not have an effect in the workers, as they are +// separate spawned processes. Run with e.g. `RUST_LOG=parachain::pvf-execute-worker=trace`. +const LOG_TARGET: &str = "parachain::pvf-execute-worker"; + +use crate::executor_intf::EXECUTE_THREAD_STACK_SIZE; +use cpu_time::ProcessTime; +use parity_scale_codec::{Decode, Encode}; +use polkadot_node_core_pvf_common::{ + error::InternalValidationError, + execute::{Handshake, Response}, + framed_recv, framed_send, + worker::{ bytes_to_path, cpu_time_monitor_loop, stringify_panic_payload, thread::{self, WaitOutcome}, worker_event_loop, }, - executor_intf::{Executor, EXECUTE_THREAD_STACK_SIZE}, - LOG_TARGET, -}; -use cpu_time::ProcessTime; -use parity_scale_codec::{Decode, Encode}; -use polkadot_node_core_pvf::{ - framed_recv, framed_send, ExecuteHandshake as Handshake, ExecuteResponse as Response, - InternalValidationError, }; use polkadot_parachain::primitives::ValidationResult; use std::{ diff --git a/node/core/pvf/prepare-worker/Cargo.toml b/node/core/pvf/prepare-worker/Cargo.toml new file mode 100644 index 000000000000..07386de35962 --- /dev/null +++ b/node/core/pvf/prepare-worker/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "polkadot-node-core-pvf-prepare-worker" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +futures = "0.3.21" +gum = { package = "tracing-gum", path = "../../../gum" } +libc = "0.2.139" +rayon = "1.5.1" +tikv-jemalloc-ctl = { version = "0.5.0", optional = true } +tokio = { version = "1.24.2", features = ["fs", "process"] } + +parity-scale-codec = { version = "3.4.0", default-features = false, features = ["derive"] } + +polkadot-node-core-pvf-common = { path = "../common" } +polkadot-parachain = { path = "../../../../parachain" } +polkadot-primitives = { path = "../../../../primitives" } + +sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-executor-common = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-executor-wasmtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-maybe-compressed-blob = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[target.'cfg(target_os = "linux")'.dependencies] +tikv-jemalloc-ctl = "0.5.0" + +[features] +builder = [] +jemalloc-allocator = ["dep:tikv-jemalloc-ctl"] diff --git a/node/core/pvf/prepare-worker/src/executor_intf.rs b/node/core/pvf/prepare-worker/src/executor_intf.rs new file mode 100644 index 000000000000..1f88f6a6dd6e --- /dev/null +++ b/node/core/pvf/prepare-worker/src/executor_intf.rs @@ -0,0 +1,42 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Interface to the Substrate Executor + +use polkadot_node_core_pvf_common::executor_intf::params_to_wasmtime_semantics; +use polkadot_primitives::ExecutorParams; +use sc_executor_common::runtime_blob::RuntimeBlob; + +/// Runs the prevalidation on the given code. Returns a [`RuntimeBlob`] if it succeeds. +pub fn prevalidate(code: &[u8]) -> Result { + let blob = RuntimeBlob::new(code)?; + // It's assumed this function will take care of any prevalidation logic + // that needs to be done. + // + // Do nothing for now. + Ok(blob) +} + +/// Runs preparation on the given runtime blob. If successful, it returns a serialized compiled +/// artifact which can then be used to pass into `Executor::execute` after writing it to the disk. +pub fn prepare( + blob: RuntimeBlob, + executor_params: &ExecutorParams, +) -> Result, sc_executor_common::error::WasmError> { + let semantics = params_to_wasmtime_semantics(executor_params) + .map_err(|e| sc_executor_common::error::WasmError::Other(e))?; + sc_executor_wasmtime::prepare_runtime_artifact(blob, &semantics) +} diff --git a/node/core/pvf/worker/src/prepare.rs b/node/core/pvf/prepare-worker/src/lib.rs similarity index 90% rename from node/core/pvf/worker/src/prepare.rs rename to node/core/pvf/prepare-worker/src/lib.rs index fe9c1a85545a..8f36ef397cfb 100644 --- a/node/core/pvf/worker/src/prepare.rs +++ b/node/core/pvf/prepare-worker/src/lib.rs @@ -14,23 +14,31 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +mod executor_intf; +mod memory_stats; + +pub use executor_intf::{prepare, prevalidate}; + +// NOTE: Initializing logging in e.g. tests will not have an effect in the workers, as they are +// separate spawned processes. Run with e.g. `RUST_LOG=parachain::pvf-prepare-worker=trace`. +const LOG_TARGET: &str = "parachain::pvf-prepare-worker"; + #[cfg(target_os = "linux")] use crate::memory_stats::max_rss_stat::{extract_max_rss_stat, get_max_rss_thread}; #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] use crate::memory_stats::memory_tracker::{get_memory_tracker_loop_stats, memory_tracker_loop}; -use crate::{ - common::{ +use parity_scale_codec::{Decode, Encode}; +use polkadot_node_core_pvf_common::{ + error::{PrepareError, PrepareResult}, + framed_recv, framed_send, + prepare::{MemoryStats, PrepareStats}, + pvf::PvfPrepData, + worker::{ bytes_to_path, cpu_time_monitor_loop, stringify_panic_payload, thread::{self, WaitOutcome}, worker_event_loop, }, - prepare, prevalidate, LOG_TARGET, -}; -use cpu_time::ProcessTime; -use parity_scale_codec::{Decode, Encode}; -use polkadot_node_core_pvf::{ - framed_recv, framed_send, CompiledArtifact, MemoryStats, PrepareError, PrepareResult, - PrepareStats, PvfPrepData, + ProcessTime, }; use std::{ path::PathBuf, @@ -39,6 +47,22 @@ use std::{ }; use tokio::{io, net::UnixStream}; +/// Contains the bytes for a successfully compiled artifact. +pub struct CompiledArtifact(Vec); + +impl CompiledArtifact { + /// Creates a `CompiledArtifact`. + pub fn new(code: Vec) -> Self { + Self(code) + } +} + +impl AsRef<[u8]> for CompiledArtifact { + fn as_ref(&self) -> &[u8] { + self.0.as_slice() + } +} + async fn recv_request(stream: &mut UnixStream) -> io::Result<(PvfPrepData, PathBuf)> { let pvf = framed_recv(stream).await?; let pvf = PvfPrepData::decode(&mut &pvf[..]).map_err(|e| { diff --git a/node/core/pvf/worker/src/memory_stats.rs b/node/core/pvf/prepare-worker/src/memory_stats.rs similarity index 97% rename from node/core/pvf/worker/src/memory_stats.rs rename to node/core/pvf/prepare-worker/src/memory_stats.rs index 907f793d87af..e6dc8572c4a3 100644 --- a/node/core/pvf/worker/src/memory_stats.rs +++ b/node/core/pvf/prepare-worker/src/memory_stats.rs @@ -33,11 +33,11 @@ /// NOTE: Requires jemalloc enabled. #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] pub mod memory_tracker { - use crate::{ - common::{stringify_panic_payload, thread}, - LOG_TARGET, + use crate::LOG_TARGET; + use polkadot_node_core_pvf_common::{ + prepare::MemoryAllocationStats, + worker::{stringify_panic_payload, thread}, }; - use polkadot_node_core_pvf::MemoryAllocationStats; use std::{thread::JoinHandle, time::Duration}; use tikv_jemalloc_ctl::{epoch, stats, Error}; diff --git a/node/core/pvf/src/artifacts.rs b/node/core/pvf/src/artifacts.rs index d5a660cc3aa5..78d2f88941b8 100644 --- a/node/core/pvf/src/artifacts.rs +++ b/node/core/pvf/src/artifacts.rs @@ -55,8 +55,9 @@ //! older by a predefined parameter. This process is run very rarely (say, once a day). Once the //! artifact is expired it is removed from disk eagerly atomically. -use crate::{error::PrepareError, host::PrepareResultSender, prepare::PrepareStats}; +use crate::host::PrepareResultSender; use always_assert::always; +use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats, pvf::PvfPrepData}; use polkadot_parachain::primitives::ValidationCodeHash; use polkadot_primitives::ExecutorParamsHash; use std::{ @@ -65,22 +66,6 @@ use std::{ time::{Duration, SystemTime}, }; -/// Contains the bytes for a successfully compiled artifact. -pub struct CompiledArtifact(Vec); - -impl CompiledArtifact { - /// Creates a `CompiledArtifact`. - pub fn new(code: Vec) -> Self { - Self(code) - } -} - -impl AsRef<[u8]> for CompiledArtifact { - fn as_ref(&self) -> &[u8] { - self.0.as_slice() - } -} - /// Identifier of an artifact. Encodes a code hash of the PVF and a hash of executor parameter set. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct ArtifactId { @@ -96,6 +81,11 @@ impl ArtifactId { Self { code_hash, executor_params_hash } } + /// Returns an artifact ID that corresponds to the PVF with given executor params. + pub fn from_pvf_prep_data(pvf: &PvfPrepData) -> Self { + Self::new(pvf.code_hash(), pvf.executor_params().hash()) + } + /// Tries to recover the artifact id from the given file name. #[cfg(test)] pub fn from_file_name(file_name: &str) -> Option { @@ -304,7 +294,7 @@ mod tests { #[tokio::test] async fn artifacts_removes_cache_on_startup() { - let fake_cache_path = crate::worker_common::tmpfile("test-cache").await.unwrap(); + let fake_cache_path = crate::worker_intf::tmpfile("test-cache").await.unwrap(); let fake_artifact_path = { let mut p = fake_cache_path.clone(); p.push("wasmtime_0x1234567890123456789012345678901234567890123456789012345678901234"); diff --git a/node/core/pvf/src/error.rs b/node/core/pvf/src/error.rs index 33f3f00810f2..7372cd233c49 100644 --- a/node/core/pvf/src/error.rs +++ b/node/core/pvf/src/error.rs @@ -14,65 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::prepare::PrepareStats; -use parity_scale_codec::{Decode, Encode}; -use std::fmt; - -/// Result of PVF preparation performed by the validation host. Contains stats about the preparation if -/// successful -pub type PrepareResult = Result; - -/// An error that occurred during the prepare part of the PVF pipeline. -#[derive(Debug, Clone, Encode, Decode)] -pub enum PrepareError { - /// During the prevalidation stage of preparation an issue was found with the PVF. - Prevalidation(String), - /// Compilation failed for the given PVF. - Preparation(String), - /// An unexpected panic has occurred in the preparation worker. - Panic(String), - /// Failed to prepare the PVF due to the time limit. - TimedOut, - /// An IO error occurred. This state is reported by either the validation host or by the worker. - IoErr(String), - /// The temporary file for the artifact could not be created at the given cache path. This state is reported by the - /// validation host (not by the worker). - CreateTmpFileErr(String), - /// The response from the worker is received, but the file cannot be renamed (moved) to the final destination - /// location. This state is reported by the validation host (not by the worker). - RenameTmpFileErr(String), -} - -impl PrepareError { - /// Returns whether this is a deterministic error, i.e. one that should trigger reliably. Those - /// errors depend on the PVF itself and the sc-executor/wasmtime logic. - /// - /// Non-deterministic errors can happen spuriously. Typically, they occur due to resource - /// starvation, e.g. under heavy load or memory pressure. Those errors are typically transient - /// but may persist e.g. if the node is run by overwhelmingly underpowered machine. - pub fn is_deterministic(&self) -> bool { - use PrepareError::*; - match self { - Prevalidation(_) | Preparation(_) | Panic(_) => true, - TimedOut | IoErr(_) | CreateTmpFileErr(_) | RenameTmpFileErr(_) => false, - } - } -} - -impl fmt::Display for PrepareError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use PrepareError::*; - match self { - Prevalidation(err) => write!(f, "prevalidation: {}", err), - Preparation(err) => write!(f, "preparation: {}", err), - Panic(err) => write!(f, "panic: {}", err), - TimedOut => write!(f, "prepare: timeout"), - IoErr(err) => write!(f, "prepare: io error while receiving response: {}", err), - CreateTmpFileErr(err) => write!(f, "prepare: error creating tmp file: {}", err), - RenameTmpFileErr(err) => write!(f, "prepare: error renaming tmp file: {}", err), - } - } -} +use polkadot_node_core_pvf_common::error::{InternalValidationError, PrepareError}; /// A error raised during validation of the candidate. #[derive(Debug, Clone)] @@ -122,37 +64,6 @@ pub enum InvalidCandidate { Panic(String), } -/// Some internal error occurred. -/// -/// Should only ever be used for validation errors independent of the candidate and PVF, or for errors we ruled out -/// during pre-checking (so preparation errors are fine). -#[derive(Debug, Clone, Encode, Decode)] -pub enum InternalValidationError { - /// Some communication error occurred with the host. - HostCommunication(String), - /// Could not find or open compiled artifact file. - CouldNotOpenFile(String), - /// An error occurred in the CPU time monitor thread. Should be totally unrelated to validation. - CpuTimeMonitorThread(String), - /// Some non-deterministic preparation error occurred. - NonDeterministicPrepareError(PrepareError), -} - -impl fmt::Display for InternalValidationError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use InternalValidationError::*; - match self { - HostCommunication(err) => - write!(f, "validation: some communication error occurred with the host: {}", err), - CouldNotOpenFile(err) => - write!(f, "validation: could not find or open compiled artifact file: {}", err), - CpuTimeMonitorThread(err) => - write!(f, "validation: an error occurred in the CPU time monitor thread: {}", err), - NonDeterministicPrepareError(err) => write!(f, "validation: prepare: {}", err), - } - } -} - impl From for ValidationError { fn from(error: InternalValidationError) -> Self { Self::InternalError(error) diff --git a/node/core/pvf/src/execute/mod.rs b/node/core/pvf/src/execute/mod.rs index 8e3b17d71569..669b9dc04d7c 100644 --- a/node/core/pvf/src/execute/mod.rs +++ b/node/core/pvf/src/execute/mod.rs @@ -24,4 +24,3 @@ mod queue; mod worker_intf; pub use queue::{start, PendingExecutionRequest, ToQueue}; -pub use worker_intf::{Handshake as ExecuteHandshake, Response as ExecuteResponse}; diff --git a/node/core/pvf/src/execute/queue.rs b/node/core/pvf/src/execute/queue.rs index 61cebc5e2c46..395697616b36 100644 --- a/node/core/pvf/src/execute/queue.rs +++ b/node/core/pvf/src/execute/queue.rs @@ -21,7 +21,7 @@ use crate::{ artifacts::{ArtifactId, ArtifactPathId}, host::ResultSender, metrics::Metrics, - worker_common::{IdleWorker, WorkerHandle}, + worker_intf::{IdleWorker, WorkerHandle}, InvalidCandidate, ValidationError, LOG_TARGET, }; use futures::{ diff --git a/node/core/pvf/src/execute/worker_intf.rs b/node/core/pvf/src/execute/worker_intf.rs index 4c26aeb0260a..6e54e17e515a 100644 --- a/node/core/pvf/src/execute/worker_intf.rs +++ b/node/core/pvf/src/execute/worker_intf.rs @@ -18,17 +18,20 @@ use crate::{ artifacts::ArtifactPathId, - error::InternalValidationError, - worker_common::{ - framed_recv, framed_send, path_to_bytes, spawn_with_program_path, IdleWorker, SpawnErr, - WorkerHandle, JOB_TIMEOUT_WALL_CLOCK_FACTOR, + worker_intf::{ + path_to_bytes, spawn_with_program_path, IdleWorker, SpawnErr, WorkerHandle, + JOB_TIMEOUT_WALL_CLOCK_FACTOR, }, LOG_TARGET, }; use futures::FutureExt; use futures_timer::Delay; use parity_scale_codec::{Decode, Encode}; - +use polkadot_node_core_pvf_common::{ + error::InternalValidationError, + execute::{Handshake, Response}, + framed_recv, framed_send, +}; use polkadot_parachain::primitives::ValidationResult; use polkadot_primitives::ExecutorParams; use std::{path::Path, time::Duration}; @@ -208,42 +211,3 @@ async fn recv_response(stream: &mut UnixStream) -> io::Result { ) }) } - -/// The payload of the one-time handshake that is done when a worker process is created. Carries -/// data from the host to the worker. -#[derive(Encode, Decode)] -pub struct Handshake { - /// The executor parameters. - pub executor_params: ExecutorParams, -} - -/// The response from an execution job on the worker. -#[derive(Encode, Decode)] -pub enum Response { - /// The job completed successfully. - Ok { - /// The result of parachain validation. - result_descriptor: ValidationResult, - /// The amount of CPU time taken by the job. - duration: Duration, - }, - /// The candidate is invalid. - InvalidCandidate(String), - /// The job timed out. - TimedOut, - /// An unexpected panic has occurred in the execution worker. - Panic(String), - /// Some internal error occurred. - InternalError(InternalValidationError), -} - -impl Response { - /// Creates an invalid response from a context `ctx` and a message `msg` (which can be empty). - pub fn format_invalid(ctx: &'static str, msg: &str) -> Self { - if msg.is_empty() { - Self::InvalidCandidate(ctx.to_string()) - } else { - Self::InvalidCandidate(format!("{}: {}", ctx, msg)) - } - } -} diff --git a/node/core/pvf/src/host.rs b/node/core/pvf/src/host.rs index bfc775a32dee..67f4a66e9748 100644 --- a/node/core/pvf/src/host.rs +++ b/node/core/pvf/src/host.rs @@ -22,16 +22,19 @@ use crate::{ artifacts::{ArtifactId, ArtifactPathId, ArtifactState, Artifacts}, - error::PrepareError, execute::{self, PendingExecutionRequest}, metrics::Metrics, - prepare, PrepareResult, Priority, PvfPrepData, ValidationError, LOG_TARGET, + prepare, Priority, ValidationError, LOG_TARGET, }; use always_assert::never; use futures::{ channel::{mpsc, oneshot}, Future, FutureExt, SinkExt, StreamExt, }; +use polkadot_node_core_pvf_common::{ + error::{PrepareError, PrepareResult}, + pvf::PvfPrepData, +}; use polkadot_parachain::primitives::ValidationResult; use std::{ collections::HashMap, @@ -423,7 +426,7 @@ async fn handle_precheck_pvf( pvf: PvfPrepData, result_sender: PrepareResultSender, ) -> Result<(), Fatal> { - let artifact_id = pvf.as_artifact_id(); + let artifact_id = ArtifactId::from_pvf_prep_data(&pvf); if let Some(state) = artifacts.artifact_state_mut(&artifact_id) { match state { @@ -467,7 +470,7 @@ async fn handle_execute_pvf( inputs: ExecutePvfInputs, ) -> Result<(), Fatal> { let ExecutePvfInputs { pvf, exec_timeout, params, priority, result_tx } = inputs; - let artifact_id = pvf.as_artifact_id(); + let artifact_id = ArtifactId::from_pvf_prep_data(&pvf); let executor_params = (*pvf.executor_params()).clone(); if let Some(state) = artifacts.artifact_state_mut(&artifact_id) { @@ -590,7 +593,7 @@ async fn handle_heads_up( let now = SystemTime::now(); for active_pvf in active_pvfs { - let artifact_id = active_pvf.as_artifact_id(); + let artifact_id = ArtifactId::from_pvf_prep_data(&active_pvf); if let Some(state) = artifacts.artifact_state_mut(&artifact_id) { match state { ArtifactState::Prepared { last_time_needed, .. } => { @@ -854,9 +857,10 @@ fn pulse_every(interval: std::time::Duration) -> impl futures::Stream #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::{prepare::PrepareStats, InvalidCandidate, PrepareError}; + use crate::InvalidCandidate; use assert_matches::assert_matches; use futures::future::BoxFuture; + use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats}; const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); pub(crate) const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(30); @@ -877,7 +881,7 @@ pub(crate) mod tests { /// Creates a new PVF which artifact id can be uniquely identified by the given number. fn artifact_id(descriminator: u32) -> ArtifactId { - PvfPrepData::from_discriminator(descriminator).as_artifact_id() + ArtifactId::from_pvf_prep_data(&PvfPrepData::from_discriminator(descriminator)) } fn artifact_path(descriminator: u32) -> PathBuf { diff --git a/node/core/pvf/src/lib.rs b/node/core/pvf/src/lib.rs index 9b302150fd36..d8b801292ca8 100644 --- a/node/core/pvf/src/lib.rs +++ b/node/core/pvf/src/lib.rs @@ -95,27 +95,31 @@ mod host; mod metrics; mod prepare; mod priority; -mod pvf; -mod worker_common; +mod worker_intf; -pub use artifacts::CompiledArtifact; -pub use error::{ - InternalValidationError, InvalidCandidate, PrepareError, PrepareResult, ValidationError, -}; -pub use execute::{ExecuteHandshake, ExecuteResponse}; -#[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] -pub use prepare::MemoryAllocationStats; -pub use prepare::{MemoryStats, PrepareStats}; -pub use priority::Priority; -pub use pvf::PvfPrepData; +#[doc(hidden)] +pub mod testing; + +// Used by `decl_puppet_worker_main!`. +#[doc(hidden)] +pub use sp_tracing; +pub use error::{InvalidCandidate, ValidationError}; pub use host::{start, Config, ValidationHost}; pub use metrics::Metrics; -pub use worker_common::{framed_recv, framed_send, JOB_TIMEOUT_WALL_CLOCK_FACTOR}; +pub use priority::Priority; +pub use worker_intf::{framed_recv, framed_send, JOB_TIMEOUT_WALL_CLOCK_FACTOR}; + +// Re-export some common types. +pub use polkadot_node_core_pvf_common::{ + error::{InternalValidationError, PrepareError}, + prepare::PrepareStats, + pvf::PvfPrepData, +}; -const LOG_TARGET: &str = "parachain::pvf"; +// Re-export worker entrypoints. +pub use polkadot_node_core_pvf_execute_worker::worker_entrypoint as execute_worker_entrypoint; +pub use polkadot_node_core_pvf_prepare_worker::worker_entrypoint as prepare_worker_entrypoint; -#[doc(hidden)] -pub mod testing { - pub use crate::worker_common::{spawn_with_program_path, SpawnErr}; -} +/// The log target for this crate. +pub const LOG_TARGET: &str = "parachain::pvf"; diff --git a/node/core/pvf/src/metrics.rs b/node/core/pvf/src/metrics.rs index 12bcd9eadad3..62f8c6dc5157 100644 --- a/node/core/pvf/src/metrics.rs +++ b/node/core/pvf/src/metrics.rs @@ -16,7 +16,7 @@ //! Prometheus metrics related to the validation host. -use crate::prepare::MemoryStats; +use polkadot_node_core_pvf_common::prepare::MemoryStats; use polkadot_node_metrics::metrics::{self, prometheus}; /// Validation host metrics. diff --git a/node/core/pvf/src/prepare/mod.rs b/node/core/pvf/src/prepare/mod.rs index de40c48464c4..580f67f73fa0 100644 --- a/node/core/pvf/src/prepare/mod.rs +++ b/node/core/pvf/src/prepare/mod.rs @@ -28,36 +28,3 @@ mod worker_intf; pub use pool::start as start_pool; pub use queue::{start as start_queue, FromQueue, ToQueue}; - -use parity_scale_codec::{Decode, Encode}; - -/// Preparation statistics, including the CPU time and memory taken. -#[derive(Debug, Clone, Default, Encode, Decode)] -pub struct PrepareStats { - /// The CPU time that elapsed for the preparation job. - pub cpu_time_elapsed: std::time::Duration, - /// The observed memory statistics for the preparation job. - pub memory_stats: MemoryStats, -} - -/// Helper struct to contain all the memory stats, including `MemoryAllocationStats` and, if -/// supported by the OS, `ru_maxrss`. -#[derive(Clone, Debug, Default, Encode, Decode)] -pub struct MemoryStats { - /// Memory stats from `tikv_jemalloc_ctl`. - #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] - pub memory_tracker_stats: Option, - /// `ru_maxrss` from `getrusage`. `None` if an error occurred. - #[cfg(target_os = "linux")] - pub max_rss: Option, -} - -/// Statistics of collected memory metrics. -#[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] -#[derive(Clone, Debug, Default, Encode, Decode)] -pub struct MemoryAllocationStats { - /// Total resident memory, in bytes. - pub resident: u64, - /// Total allocated memory, in bytes. - pub allocated: u64, -} diff --git a/node/core/pvf/src/prepare/pool.rs b/node/core/pvf/src/prepare/pool.rs index d151f097805e..ae8ecff5285c 100644 --- a/node/core/pvf/src/prepare/pool.rs +++ b/node/core/pvf/src/prepare/pool.rs @@ -16,16 +16,18 @@ use super::worker_intf::{self, Outcome}; use crate::{ - error::{PrepareError, PrepareResult}, metrics::Metrics, - pvf::PvfPrepData, - worker_common::{IdleWorker, WorkerHandle}, + worker_intf::{IdleWorker, WorkerHandle}, LOG_TARGET, }; use always_assert::never; use futures::{ channel::mpsc, future::BoxFuture, stream::FuturesUnordered, Future, FutureExt, StreamExt, }; +use polkadot_node_core_pvf_common::{ + error::{PrepareError, PrepareResult}, + pvf::PvfPrepData, +}; use slotmap::HopSlotMap; use std::{ fmt, diff --git a/node/core/pvf/src/prepare/queue.rs b/node/core/pvf/src/prepare/queue.rs index f84d5ab0e56e..5e19a4c7217a 100644 --- a/node/core/pvf/src/prepare/queue.rs +++ b/node/core/pvf/src/prepare/queue.rs @@ -17,11 +17,10 @@ //! A queue that handles requests for PVF preparation. use super::pool::{self, Worker}; -use crate::{ - artifacts::ArtifactId, metrics::Metrics, PrepareResult, Priority, PvfPrepData, LOG_TARGET, -}; +use crate::{artifacts::ArtifactId, metrics::Metrics, Priority, LOG_TARGET}; use always_assert::{always, never}; use futures::{channel::mpsc, stream::StreamExt as _, Future, SinkExt}; +use polkadot_node_core_pvf_common::{error::PrepareResult, pvf::PvfPrepData}; use std::{ collections::{HashMap, VecDeque}, path::PathBuf, @@ -231,7 +230,7 @@ async fn handle_enqueue( ); queue.metrics.prepare_enqueued(); - let artifact_id = pvf.as_artifact_id(); + let artifact_id = ArtifactId::from_pvf_prep_data(&pvf); if never!( queue.artifact_id_to_job.contains_key(&artifact_id), "second Enqueue sent for a known artifact" @@ -339,7 +338,7 @@ async fn handle_worker_concluded( // this can't be None; // qed. let job_data = never_none!(queue.jobs.remove(job)); - let artifact_id = job_data.pvf.as_artifact_id(); + let artifact_id = ArtifactId::from_pvf_prep_data(&job_data.pvf); queue.artifact_id_to_job.remove(&artifact_id); @@ -425,7 +424,7 @@ async fn spawn_extra_worker(queue: &mut Queue, critical: bool) -> Result<(), Fat async fn assign(queue: &mut Queue, worker: Worker, job: Job) -> Result<(), Fatal> { let job_data = &mut queue.jobs[job]; - let artifact_id = job_data.pvf.as_artifact_id(); + let artifact_id = ArtifactId::from_pvf_prep_data(&job_data.pvf); let artifact_path = artifact_id.path(&queue.cache_path); job_data.worker = Some(worker); @@ -488,11 +487,10 @@ pub fn start( #[cfg(test)] mod tests { use super::*; - use crate::{ - error::PrepareError, host::tests::TEST_PREPARATION_TIMEOUT, prepare::PrepareStats, - }; + use crate::host::tests::TEST_PREPARATION_TIMEOUT; use assert_matches::assert_matches; use futures::{future::BoxFuture, FutureExt}; + use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats}; use slotmap::SlotMap; use std::task::Poll; @@ -616,7 +614,10 @@ mod tests { result: Ok(PrepareStats::default()), }); - assert_eq!(test.poll_and_recv_from_queue().await.artifact_id, pvf(1).as_artifact_id()); + assert_eq!( + test.poll_and_recv_from_queue().await.artifact_id, + ArtifactId::from_pvf_prep_data(&pvf(1)) + ); } #[tokio::test] @@ -735,7 +736,10 @@ mod tests { // Since there is still work, the queue requested one extra worker to spawn to handle the // remaining enqueued work items. assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Spawn); - assert_eq!(test.poll_and_recv_from_queue().await.artifact_id, pvf(1).as_artifact_id()); + assert_eq!( + test.poll_and_recv_from_queue().await.artifact_id, + ArtifactId::from_pvf_prep_data(&pvf(1)) + ); } #[tokio::test] diff --git a/node/core/pvf/src/prepare/worker_intf.rs b/node/core/pvf/src/prepare/worker_intf.rs index daf94aadc672..47522d3f0856 100644 --- a/node/core/pvf/src/prepare/worker_intf.rs +++ b/node/core/pvf/src/prepare/worker_intf.rs @@ -17,17 +17,20 @@ //! Host interface to the prepare worker. use crate::{ - error::{PrepareError, PrepareResult}, metrics::Metrics, - prepare::PrepareStats, - pvf::PvfPrepData, - worker_common::{ - framed_recv, framed_send, path_to_bytes, spawn_with_program_path, tmpfile_in, IdleWorker, - SpawnErr, WorkerHandle, JOB_TIMEOUT_WALL_CLOCK_FACTOR, + worker_intf::{ + path_to_bytes, spawn_with_program_path, tmpfile_in, IdleWorker, SpawnErr, WorkerHandle, + JOB_TIMEOUT_WALL_CLOCK_FACTOR, }, LOG_TARGET, }; use parity_scale_codec::{Decode, Encode}; +use polkadot_node_core_pvf_common::{ + error::{PrepareError, PrepareResult}, + framed_recv, framed_send, + prepare::PrepareStats, + pvf::PvfPrepData, +}; use sp_core::hexdisplay::HexDisplay; use std::{ diff --git a/node/core/pvf/worker/src/testing.rs b/node/core/pvf/src/testing.rs similarity index 93% rename from node/core/pvf/worker/src/testing.rs rename to node/core/pvf/src/testing.rs index 7497d4aed31c..cc07d7aeef02 100644 --- a/node/core/pvf/worker/src/testing.rs +++ b/node/core/pvf/src/testing.rs @@ -19,6 +19,9 @@ //! N.B. This is not guarded with some feature flag. Overexposing items here may affect the final //! artifact even for production builds. +#[doc(hidden)] +pub use crate::worker_intf::{spawn_with_program_path, SpawnErr}; + use polkadot_primitives::ExecutorParams; /// A function that emulates the stitches together behaviors of the preparation and the execution @@ -27,7 +30,8 @@ pub fn validate_candidate( code: &[u8], params: &[u8], ) -> Result, Box> { - use crate::executor_intf::{prepare, prevalidate, Executor}; + use polkadot_node_core_pvf_execute_worker::Executor; + use polkadot_node_core_pvf_prepare_worker::{prepare, prevalidate}; let code = sp_maybe_compressed_blob::decompress(code, 10 * 1024 * 1024) .expect("Decompressing code failed"); diff --git a/node/core/pvf/src/worker_common.rs b/node/core/pvf/src/worker_intf.rs similarity index 100% rename from node/core/pvf/src/worker_common.rs rename to node/core/pvf/src/worker_intf.rs diff --git a/node/core/pvf/worker/tests/it/adder.rs b/node/core/pvf/tests/it/adder.rs similarity index 100% rename from node/core/pvf/worker/tests/it/adder.rs rename to node/core/pvf/tests/it/adder.rs diff --git a/node/core/pvf/worker/tests/it/main.rs b/node/core/pvf/tests/it/main.rs similarity index 100% rename from node/core/pvf/worker/tests/it/main.rs rename to node/core/pvf/tests/it/main.rs diff --git a/node/core/pvf/worker/tests/it/worker_common.rs b/node/core/pvf/tests/it/worker_common.rs similarity index 100% rename from node/core/pvf/worker/tests/it/worker_common.rs rename to node/core/pvf/tests/it/worker_common.rs diff --git a/node/core/pvf/worker/src/lib.rs b/node/core/pvf/worker/src/lib.rs deleted file mode 100644 index 456362cf8f57..000000000000 --- a/node/core/pvf/worker/src/lib.rs +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -mod common; -mod execute; -mod executor_intf; -mod memory_stats; -mod prepare; - -#[doc(hidden)] -pub mod testing; - -#[doc(hidden)] -pub use sp_tracing; - -pub use execute::worker_entrypoint as execute_worker_entrypoint; -pub use prepare::worker_entrypoint as prepare_worker_entrypoint; - -pub use executor_intf::{prepare, prevalidate}; - -// NOTE: Initializing logging in e.g. tests will not have an effect in the workers, as they are -// separate spawned processes. Run with e.g. `RUST_LOG=parachain::pvf-worker=trace`. -const LOG_TARGET: &str = "parachain::pvf-worker"; - -/// Use this macro to declare a `fn main() {}` that will create an executable that can be used for -/// spawning the desired worker. -#[macro_export(local_inner_macros)] -macro_rules! decl_worker_main { - ($command:tt) => { - fn main() { - $crate::sp_tracing::try_init_simple(); - - let args = std::env::args().collect::>(); - - let mut version = None; - let mut socket_path: &str = ""; - - for i in 1..args.len() { - match args[i].as_ref() { - "--socket-path" => socket_path = args[i + 1].as_str(), - "--node-version" => version = Some(args[i + 1].as_str()), - _ => (), - } - } - - decl_worker_main_command!($command, socket_path, version) - } - }; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! decl_worker_main_command { - (prepare, $socket_path:expr, $version: expr) => { - $crate::prepare_worker_entrypoint(&$socket_path, $version) - }; - (execute, $socket_path:expr, $version: expr) => { - $crate::execute_worker_entrypoint(&$socket_path, $version) - }; -} diff --git a/node/core/runtime-api/src/cache.rs b/node/core/runtime-api/src/cache.rs index 00b2068ad330..4a97636786fb 100644 --- a/node/core/runtime-api/src/cache.rs +++ b/node/core/runtime-api/src/cache.rs @@ -64,7 +64,10 @@ pub(crate) struct RequestResultCache { LruCache<(Hash, ParaId, OccupiedCoreAssumption), Option>, version: LruCache, disputes: LruCache)>>, - + unapplied_slashes: + LruCache>, + key_ownership_proof: + LruCache<(Hash, ValidatorId), Option>, staging_para_backing_state: LruCache<(Hash, ParaId), Option>, staging_async_backing_params: LruCache, } @@ -94,7 +97,8 @@ impl Default for RequestResultCache { validation_code_hash: LruCache::new(DEFAULT_CACHE_CAP), version: LruCache::new(DEFAULT_CACHE_CAP), disputes: LruCache::new(DEFAULT_CACHE_CAP), - + unapplied_slashes: LruCache::new(DEFAULT_CACHE_CAP), + key_ownership_proof: LruCache::new(DEFAULT_CACHE_CAP), staging_para_backing_state: LruCache::new(DEFAULT_CACHE_CAP), staging_async_backing_params: LruCache::new(DEFAULT_CACHE_CAP), } @@ -393,6 +397,44 @@ impl RequestResultCache { self.disputes.put(relay_parent, value); } + pub(crate) fn unapplied_slashes( + &mut self, + relay_parent: &Hash, + ) -> Option<&Vec<(SessionIndex, CandidateHash, vstaging_primitives::slashing::PendingSlashes)>> { + self.unapplied_slashes.get(relay_parent) + } + + pub(crate) fn cache_unapplied_slashes( + &mut self, + relay_parent: Hash, + value: Vec<(SessionIndex, CandidateHash, vstaging_primitives::slashing::PendingSlashes)>, + ) { + self.unapplied_slashes.put(relay_parent, value); + } + + pub(crate) fn key_ownership_proof( + &mut self, + key: (Hash, ValidatorId), + ) -> Option<&Option> { + self.key_ownership_proof.get(&key) + } + + pub(crate) fn cache_key_ownership_proof( + &mut self, + key: (Hash, ValidatorId), + value: Option, + ) { + self.key_ownership_proof.put(key, value); + } + + // This request is never cached, hence always returns `None`. + pub(crate) fn submit_report_dispute_lost( + &mut self, + _key: (Hash, vstaging_primitives::slashing::DisputeProof, vstaging_primitives::slashing::OpaqueKeyOwnershipProof), + ) -> Option<&Option<()>> { + None + } + pub(crate) fn staging_para_backing_state( &mut self, key: (Hash, ParaId), @@ -459,6 +501,15 @@ pub(crate) enum RequestResult { ValidationCodeHash(Hash, ParaId, OccupiedCoreAssumption, Option), Version(Hash, u32), Disputes(Hash, Vec<(SessionIndex, CandidateHash, DisputeState)>), + UnappliedSlashes(Hash, Vec<(SessionIndex, CandidateHash, vstaging_primitives::slashing::PendingSlashes)>), + KeyOwnershipProof(Hash, ValidatorId, Option), + // This is a request with side-effects. + SubmitReportDisputeLost( + Hash, + vstaging_primitives::slashing::DisputeProof, + vstaging_primitives::slashing::OpaqueKeyOwnershipProof, + Option<()>, + ), StagingParaBackingState(Hash, ParaId, Option), StagingAsyncBackingParams(Hash, vstaging_primitives::AsyncBackingParams), diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index d835ceb53fd3..512076bd223f 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -157,7 +157,12 @@ where self.requests_cache.cache_version(relay_parent, version), Disputes(relay_parent, disputes) => self.requests_cache.cache_disputes(relay_parent, disputes), - + UnappliedSlashes(relay_parent, unapplied_slashes) => + self.requests_cache.cache_unapplied_slashes(relay_parent, unapplied_slashes), + KeyOwnershipProof(relay_parent, validator_id, key_ownership_proof) => self + .requests_cache + .cache_key_ownership_proof((relay_parent, validator_id), key_ownership_proof), + SubmitReportDisputeLost(_, _, _, _) => {}, StagingParaBackingState(relay_parent, para_id, constraints) => self .requests_cache .cache_staging_para_backing_state((relay_parent, para_id), constraints), @@ -277,6 +282,17 @@ where .map(|sender| Request::ValidationCodeHash(para, assumption, sender)), Request::Disputes(sender) => query!(disputes(), sender).map(|sender| Request::Disputes(sender)), + Request::UnappliedSlashes(sender) => + query!(unapplied_slashes(), sender).map(|sender| Request::UnappliedSlashes(sender)), + Request::KeyOwnershipProof(validator_id, sender) => + query!(key_ownership_proof(validator_id), sender) + .map(|sender| Request::KeyOwnershipProof(validator_id, sender)), + Request::SubmitReportDisputeLost(dispute_proof, key_ownership_proof, sender) => + query!(submit_report_dispute_lost(dispute_proof, key_ownership_proof), sender).map( + |sender| { + Request::SubmitReportDisputeLost(dispute_proof, key_ownership_proof, sender) + }, + ), Request::StagingParaBackingState(para, sender) => query!(staging_para_backing_state(para), sender) .map(|sender| Request::StagingParaBackingState(para, sender)), @@ -431,33 +447,38 @@ where Request::Authorities(sender) => query!(Authorities, authorities(), ver = 1, sender), Request::Validators(sender) => query!(Validators, validators(), ver = 1, sender), - Request::ValidatorGroups(sender) => - query!(ValidatorGroups, validator_groups(), ver = 1, sender), - Request::AvailabilityCores(sender) => - query!(AvailabilityCores, availability_cores(), ver = 1, sender), + Request::ValidatorGroups(sender) => { + query!(ValidatorGroups, validator_groups(), ver = 1, sender) + }, + Request::AvailabilityCores(sender) => { + query!(AvailabilityCores, availability_cores(), ver = 1, sender) + }, Request::PersistedValidationData(para, assumption, sender) => query!( PersistedValidationData, persisted_validation_data(para, assumption), ver = 1, sender ), - Request::AssumedValidationData(para, expected_persisted_validation_data_hash, sender) => + Request::AssumedValidationData(para, expected_persisted_validation_data_hash, sender) => { query!( AssumedValidationData, assumed_validation_data(para, expected_persisted_validation_data_hash), ver = 1, sender - ), + ) + }, Request::CheckValidationOutputs(para, commitments, sender) => query!( CheckValidationOutputs, check_validation_outputs(para, commitments), ver = 1, sender ), - Request::SessionIndexForChild(sender) => - query!(SessionIndexForChild, session_index_for_child(), ver = 1, sender), - Request::ValidationCode(para, assumption, sender) => - query!(ValidationCode, validation_code(para, assumption), ver = 1, sender), + Request::SessionIndexForChild(sender) => { + query!(SessionIndexForChild, session_index_for_child(), ver = 1, sender) + }, + Request::ValidationCode(para, assumption, sender) => { + query!(ValidationCode, validation_code(para, assumption), ver = 1, sender) + }, Request::ValidationCodeByHash(validation_code_hash, sender) => query!( ValidationCodeByHash, validation_code_by_hash(validation_code_hash), @@ -470,10 +491,12 @@ where ver = 1, sender ), - Request::CandidateEvents(sender) => - query!(CandidateEvents, candidate_events(), ver = 1, sender), - Request::SessionInfo(index, sender) => - query!(SessionInfo, session_info(index), ver = 2, sender), + Request::CandidateEvents(sender) => { + query!(CandidateEvents, candidate_events(), ver = 1, sender) + }, + Request::SessionInfo(index, sender) => { + query!(SessionInfo, session_info(index), ver = 2, sender) + }, Request::SessionExecutorParams(session_index, sender) => query!( SessionExecutorParams, session_executor_params(session_index), @@ -481,12 +504,15 @@ where sender ), Request::DmqContents(id, sender) => query!(DmqContents, dmq_contents(id), ver = 1, sender), - Request::InboundHrmpChannelsContents(id, sender) => - query!(InboundHrmpChannelsContents, inbound_hrmp_channels_contents(id), ver = 1, sender), - Request::CurrentBabeEpoch(sender) => - query!(CurrentBabeEpoch, current_epoch(), ver = 1, sender), - Request::FetchOnChainVotes(sender) => - query!(FetchOnChainVotes, on_chain_votes(), ver = 1, sender), + Request::InboundHrmpChannelsContents(id, sender) => { + query!(InboundHrmpChannelsContents, inbound_hrmp_channels_contents(id), ver = 1, sender) + }, + Request::CurrentBabeEpoch(sender) => { + query!(CurrentBabeEpoch, current_epoch(), ver = 1, sender) + }, + Request::FetchOnChainVotes(sender) => { + query!(FetchOnChainVotes, on_chain_votes(), ver = 1, sender) + }, Request::SubmitPvfCheckStatement(stmt, signature, sender) => { query!( SubmitPvfCheckStatement, @@ -498,10 +524,30 @@ where Request::PvfsRequirePrecheck(sender) => { query!(PvfsRequirePrecheck, pvfs_require_precheck(), ver = 2, sender) }, - Request::ValidationCodeHash(para, assumption, sender) => - query!(ValidationCodeHash, validation_code_hash(para, assumption), ver = 2, sender), - Request::Disputes(sender) => - query!(Disputes, disputes(), ver = Request::DISPUTES_RUNTIME_REQUIREMENT, sender), + Request::ValidationCodeHash(para, assumption, sender) => { + query!(ValidationCodeHash, validation_code_hash(para, assumption), ver = 2, sender) + }, + Request::Disputes(sender) => { + query!(Disputes, disputes(), ver = Request::DISPUTES_RUNTIME_REQUIREMENT, sender) + }, + Request::UnappliedSlashes(sender) => query!( + UnappliedSlashes, + unapplied_slashes(), + ver = Request::UNAPPLIED_SLASHES_RUNTIME_REQUIREMENT, + sender + ), + Request::KeyOwnershipProof(validator_id, sender) => query!( + KeyOwnershipProof, + key_ownership_proof(validator_id), + ver = Request::KEY_OWNERSHIP_PROOF_RUNTIME_REQUIREMENT, + sender + ), + Request::SubmitReportDisputeLost(dispute_proof, key_ownership_proof, sender) => query!( + SubmitReportDisputeLost, + submit_report_dispute_lost(dispute_proof, key_ownership_proof), + ver = Request::SUBMIT_REPORT_DISPUTE_LOST_RUNTIME_REQUIREMENT, + sender + ), Request::StagingParaBackingState(para, sender) => { query!( StagingParaBackingState, diff --git a/node/gum/README.md b/node/gum/README.md index 9d2cc3168cdd..739ce3066ecb 100644 --- a/node/gum/README.md +++ b/node/gum/README.md @@ -3,6 +3,10 @@ "gum" to make `tracing::{warn,info,..}` and `mick-jaeger` stick together, to be cross referenced in grafana with zero additional loc in the source code. +## Usage + +See the crate docs (e.g. run `cargo doc --open`) for usage information! + ## Architecture Decision Record (ADR) ### Context diff --git a/node/gum/proc-macro/Cargo.toml b/node/gum/proc-macro/Cargo.toml index 3fac1a7b928f..9e60535704d3 100644 --- a/node/gum/proc-macro/Cargo.toml +++ b/node/gum/proc-macro/Cargo.toml @@ -13,7 +13,7 @@ proc-macro = true [dependencies] syn = { version = "2.0.15", features = ["full", "extra-traits"] } -quote = "1.0.26" +quote = "1.0.28" proc-macro2 = "1.0.56" proc-macro-crate = "1.1.3" expander = "2.0.0" diff --git a/node/gum/src/lib.rs b/node/gum/src/lib.rs index 8e65343d21e2..c2d62d98a671 100644 --- a/node/gum/src/lib.rs +++ b/node/gum/src/lib.rs @@ -20,6 +20,89 @@ //! A wrapper around `tracing` macros, to provide semi automatic //! `traceID` annotation without codebase turnover. +//! +//! # Usage +//! +//! The API follows the [`tracing` +//! API](https://docs.rs/tracing/latest/tracing/index.html), but the docs contain +//! more detail than you probably need to know, so here's the quick version. +//! +//! Most common usage is of the form: +//! +//! ```rs +//! gum::warn!( +//! target: LOG_TARGET, +//! worker_pid = %idle_worker.pid, +//! ?error, +//! "failed to send a handshake to the spawned worker", +//! ); +//! ``` +//! +//! ### Log levels +//! +//! All of the the [`tracing` macros](https://docs.rs/tracing/latest/tracing/index.html#macros) are available. +//! In decreasing order of priority they are: +//! +//! - `error!` +//! - `warn!` +//! - `info!` +//! - `debug!` +//! - `trace!` +//! +//! ### `target` +//! +//! The `LOG_TARGET` should be defined once per crate, e.g.: +//! +//! ```rs +//! const LOG_TARGET: &str = "parachain::pvf"; +//! ``` +//! +//! This should be of the form `::`, where the `::` is optional. +//! +//! The target and subtarget are used when debugging by specializing the Grafana Loki query to +//! filter specific subsystem logs. The more specific the query is the better when approaching the +//! query response limit. +//! +//! ### Fields +//! +//! Here's the rundown on how fields work: +//! +//! - Fields on spans and events are specified using the `syntax field_name = +//! field_value`. +//! - Local variables may be used as field values without an assignment, similar to +//! struct initializers. +//! - The `?` sigil is shorthand that specifies a field should be recorded using its +//! `fmt::Debug` implementation. +//! - The `%` sigil operates similarly, but indicates that the value should be +//! recorded using its `fmt::Display` implementation. +//! +//! For full details, again see [the tracing +//! docs](https://docs.rs/tracing/latest/tracing/index.html#recording-fields). +//! +//! ### Viewing traces +//! +//! When testing, +//! +//! ```rs +//! sp_tracing::init_for_tests(); +//! ``` +//! +//! should enable all trace logs. +//! +//! Alternatively, you can do: +//! +//! ```rs +//! sp_tracing::try_init_simple(); +//! ``` +//! +//! On the command line you specify `RUST_LOG` with the desired target and trace level: +//! +//! ```sh +//! RUST_LOG=parachain::pvf=trace cargo test +//! ``` +//! +//! On the other hand if you want all `parachain` logs, specify `parachain=trace`, which will also +//! include logs from `parachain::pvf` and other subtargets. pub use tracing::{enabled, event, Level}; diff --git a/node/malus/Cargo.toml b/node/malus/Cargo.toml index a36822b041a3..8e23e623174f 100644 --- a/node/malus/Cargo.toml +++ b/node/malus/Cargo.toml @@ -20,7 +20,8 @@ polkadot-node-subsystem-types = { path = "../subsystem-types" } polkadot-node-core-dispute-coordinator = { path = "../core/dispute-coordinator" } polkadot-node-core-candidate-validation = { path = "../core/candidate-validation" } polkadot-node-core-backing = { path = "../core/backing" } -polkadot-node-core-pvf-worker = { path = "../core/pvf/worker" } +polkadot-node-core-pvf-execute-worker = { path = "../core/pvf/execute-worker" } +polkadot-node-core-pvf-prepare-worker = { path = "../core/pvf/prepare-worker" } polkadot-node-primitives = { path = "../primitives" } polkadot-primitives = { path = "../../primitives" } color-eyre = { version = "0.6.1", default-features = false } diff --git a/node/malus/src/malus.rs b/node/malus/src/malus.rs index 36cf0cca06bf..d09f8be990a4 100644 --- a/node/malus/src/malus.rs +++ b/node/malus/src/malus.rs @@ -97,7 +97,7 @@ impl MalusCli { #[cfg(not(target_os = "android"))] { - polkadot_node_core_pvf_worker::prepare_worker_entrypoint( + polkadot_node_core_pvf_prepare_worker::worker_entrypoint( &cmd.socket_path, None, ); @@ -111,7 +111,7 @@ impl MalusCli { #[cfg(not(target_os = "android"))] { - polkadot_node_core_pvf_worker::execute_worker_entrypoint( + polkadot_node_core_pvf_execute_worker::worker_entrypoint( &cmd.socket_path, None, ); diff --git a/node/primitives/Cargo.toml b/node/primitives/Cargo.toml index ce77a9fc2ecd..f965018c07d9 100644 --- a/node/primitives/Cargo.toml +++ b/node/primitives/Cargo.toml @@ -19,7 +19,7 @@ sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master polkadot-parachain = { path = "../../parachain", default-features = false } schnorrkel = "0.9.1" thiserror = "1.0.31" -serde = { version = "1.0.137", features = ["derive"] } +serde = { version = "1.0.163", features = ["derive"] } [target.'cfg(not(target_os = "unknown"))'.dependencies] zstd = { version = "0.11.2", default-features = false } diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index 972192ff8107..4329405e76f1 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -70,7 +70,7 @@ frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", bran futures = "0.3.21" hex-literal = "0.4.1" gum = { package = "tracing-gum", path = "../gum/" } -serde = { version = "1.0.137", features = ["derive"] } +serde = { version = "1.0.163", features = ["derive"] } serde_json = "1.0.96" thiserror = "1.0.31" kvdb = "0.13.0" diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index c19fd8b2576b..a467da98946a 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -903,7 +903,6 @@ where let approval_voting_config = ApprovalVotingConfig { col_approval_data: parachains_db::REAL_COLUMNS.col_approval_data, - col_session_data: parachains_db::REAL_COLUMNS.col_session_window_data, slot_duration_millis: slot_duration.as_millis() as u64, }; @@ -927,7 +926,6 @@ where let dispute_coordinator_config = DisputeCoordinatorConfig { col_dispute_data: parachains_db::REAL_COLUMNS.col_dispute_coordinator_data, - col_session_data: parachains_db::REAL_COLUMNS.col_session_window_data, }; let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams { @@ -1167,15 +1165,11 @@ where let gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); - // Wococo's purpose is to be a testbed for BEEFY, so if it fails we'll + // BEEFY currently only runs on testnets, if it fails we'll // bring the node down with it to make sure it is noticed. - if chain_spec.is_wococo() { - task_manager - .spawn_essential_handle() - .spawn_blocking("beefy-gadget", None, gadget); - } else { - task_manager.spawn_handle().spawn_blocking("beefy-gadget", None, gadget); - } + task_manager + .spawn_essential_handle() + .spawn_blocking("beefy-gadget", None, gadget); if is_offchain_indexing_enabled { task_manager.spawn_handle().spawn_blocking( @@ -1525,7 +1519,6 @@ fn revert_chain_selection(db: Arc, hash: Hash) -> sp_blockchain::R fn revert_approval_voting(db: Arc, hash: Hash) -> sp_blockchain::Result<()> { let config = approval_voting_subsystem::Config { col_approval_data: parachains_db::REAL_COLUMNS.col_approval_data, - col_session_data: parachains_db::REAL_COLUMNS.col_session_window_data, slot_duration_millis: Default::default(), }; diff --git a/node/service/src/parachains_db/mod.rs b/node/service/src/parachains_db/mod.rs index 918aecd25e76..519afbe0ccd1 100644 --- a/node/service/src/parachains_db/mod.rs +++ b/node/service/src/parachains_db/mod.rs @@ -36,12 +36,18 @@ pub(crate) mod columns { pub mod v2 { pub const NUM_COLUMNS: u32 = 6; + + #[cfg(test)] + pub const COL_SESSION_WINDOW_DATA: u32 = 5; + } + + pub mod v3 { + pub const NUM_COLUMNS: u32 = 5; pub const COL_AVAILABILITY_DATA: u32 = 0; pub const COL_AVAILABILITY_META: u32 = 1; pub const COL_APPROVAL_DATA: u32 = 2; pub const COL_CHAIN_SELECTION_DATA: u32 = 3; pub const COL_DISPUTE_COORDINATOR_DATA: u32 = 4; - pub const COL_SESSION_WINDOW_DATA: u32 = 5; pub const ORDERED_COL: &[u32] = &[COL_AVAILABILITY_META, COL_CHAIN_SELECTION_DATA, COL_DISPUTE_COORDINATOR_DATA]; @@ -62,19 +68,16 @@ pub struct ColumnsConfig { pub col_chain_selection_data: u32, /// The column used by dispute coordinator for data. pub col_dispute_coordinator_data: u32, - /// The column used for session window data. - pub col_session_window_data: u32, } /// The real columns used by the parachains DB. #[cfg(any(test, feature = "full-node"))] pub const REAL_COLUMNS: ColumnsConfig = ColumnsConfig { - col_availability_data: columns::v2::COL_AVAILABILITY_DATA, - col_availability_meta: columns::v2::COL_AVAILABILITY_META, - col_approval_data: columns::v2::COL_APPROVAL_DATA, - col_chain_selection_data: columns::v2::COL_CHAIN_SELECTION_DATA, - col_dispute_coordinator_data: columns::v2::COL_DISPUTE_COORDINATOR_DATA, - col_session_window_data: columns::v2::COL_SESSION_WINDOW_DATA, + col_availability_data: columns::v3::COL_AVAILABILITY_DATA, + col_availability_meta: columns::v3::COL_AVAILABILITY_META, + col_approval_data: columns::v3::COL_APPROVAL_DATA, + col_chain_selection_data: columns::v3::COL_CHAIN_SELECTION_DATA, + col_dispute_coordinator_data: columns::v3::COL_DISPUTE_COORDINATOR_DATA, }; #[derive(PartialEq)] @@ -122,20 +125,17 @@ pub fn open_creating_rocksdb( let path = root.join("parachains").join("db"); - let mut db_config = DatabaseConfig::with_columns(columns::v2::NUM_COLUMNS); + let mut db_config = DatabaseConfig::with_columns(columns::v3::NUM_COLUMNS); let _ = db_config .memory_budget - .insert(columns::v2::COL_AVAILABILITY_DATA, cache_sizes.availability_data); - let _ = db_config - .memory_budget - .insert(columns::v2::COL_AVAILABILITY_META, cache_sizes.availability_meta); + .insert(columns::v3::COL_AVAILABILITY_DATA, cache_sizes.availability_data); let _ = db_config .memory_budget - .insert(columns::v2::COL_APPROVAL_DATA, cache_sizes.approval_data); + .insert(columns::v3::COL_AVAILABILITY_META, cache_sizes.availability_meta); let _ = db_config .memory_budget - .insert(columns::v2::COL_SESSION_WINDOW_DATA, cache_sizes.session_data); + .insert(columns::v3::COL_APPROVAL_DATA, cache_sizes.approval_data); let path_str = path .to_str() @@ -146,7 +146,7 @@ pub fn open_creating_rocksdb( let db = Database::open(&db_config, &path_str)?; let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new( db, - columns::v2::ORDERED_COL, + columns::v3::ORDERED_COL, ); Ok(Arc::new(db)) @@ -166,12 +166,12 @@ pub fn open_creating_paritydb( std::fs::create_dir_all(&path_str)?; upgrade::try_upgrade_db(&path, DatabaseKind::ParityDB)?; - let db = parity_db::Db::open_or_create(&upgrade::paritydb_version_2_config(&path)) + let db = parity_db::Db::open_or_create(&upgrade::paritydb_version_3_config(&path)) .map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?; let db = polkadot_node_subsystem_util::database::paritydb_impl::DbAdapter::new( db, - columns::v2::ORDERED_COL, + columns::v3::ORDERED_COL, ); Ok(Arc::new(db)) } diff --git a/node/service/src/parachains_db/upgrade.rs b/node/service/src/parachains_db/upgrade.rs index c52bd21c0573..6041a093ef9b 100644 --- a/node/service/src/parachains_db/upgrade.rs +++ b/node/service/src/parachains_db/upgrade.rs @@ -28,7 +28,7 @@ type Version = u32; const VERSION_FILE_NAME: &'static str = "parachain_db_version"; /// Current db version. -const CURRENT_VERSION: Version = 2; +const CURRENT_VERSION: Version = 3; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -58,6 +58,8 @@ pub(crate) fn try_upgrade_db(db_path: &Path, db_kind: DatabaseKind) -> Result<() Some(0) => migrate_from_version_0_to_1(db_path, db_kind)?, // 1 -> 2 migration Some(1) => migrate_from_version_1_to_2(db_path, db_kind)?, + // 2 -> 3 migration + Some(2) => migrate_from_version_2_to_3(db_path, db_kind)?, // Already at current version, do nothing. Some(CURRENT_VERSION) => (), // This is an arbitrary future version, we don't handle it. @@ -127,6 +129,18 @@ fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result<(), }) } +fn migrate_from_version_2_to_3(path: &Path, db_kind: DatabaseKind) -> Result<(), Error> { + gum::info!(target: LOG_TARGET, "Migrating parachains db from version 2 to version 3 ..."); + match db_kind { + DatabaseKind::ParityDB => paritydb_migrate_from_version_2_to_3(path), + DatabaseKind::RocksDB => rocksdb_migrate_from_version_2_to_3(path), + } + .and_then(|result| { + gum::info!(target: LOG_TARGET, "Migration complete! "); + Ok(result) + }) +} + /// Migration from version 0 to version 1: /// * the number of columns has changed from 3 to 5; fn rocksdb_migrate_from_version_0_to_1(path: &Path) -> Result<(), Error> { @@ -160,6 +174,20 @@ fn rocksdb_migrate_from_version_1_to_2(path: &Path) -> Result<(), Error> { Ok(()) } +fn rocksdb_migrate_from_version_2_to_3(path: &Path) -> Result<(), Error> { + use kvdb_rocksdb::{Database, DatabaseConfig}; + + let db_path = path + .to_str() + .ok_or_else(|| super::other_io_error("Invalid database path".into()))?; + let db_cfg = DatabaseConfig::with_columns(super::columns::v2::NUM_COLUMNS); + let mut db = Database::open(&db_cfg, db_path)?; + + db.remove_last_column()?; + + Ok(()) +} + // This currently clears columns which had their configs altered between versions. // The columns to be changed are constrained by the `allowed_columns` vector. fn paritydb_fix_columns( @@ -221,7 +249,7 @@ fn paritydb_fix_columns( pub(crate) fn paritydb_version_1_config(path: &Path) -> parity_db::Options { let mut options = parity_db::Options::with_columns(&path, super::columns::v1::NUM_COLUMNS as u8); - for i in columns::v2::ORDERED_COL { + for i in columns::v3::ORDERED_COL { options.columns[*i as usize].btree_index = true; } @@ -232,7 +260,18 @@ pub(crate) fn paritydb_version_1_config(path: &Path) -> parity_db::Options { pub(crate) fn paritydb_version_2_config(path: &Path) -> parity_db::Options { let mut options = parity_db::Options::with_columns(&path, super::columns::v2::NUM_COLUMNS as u8); - for i in columns::v2::ORDERED_COL { + for i in columns::v3::ORDERED_COL { + options.columns[*i as usize].btree_index = true; + } + + options +} + +/// Database configuration for version 3. +pub(crate) fn paritydb_version_3_config(path: &Path) -> parity_db::Options { + let mut options = + parity_db::Options::with_columns(&path, super::columns::v3::NUM_COLUMNS as u8); + for i in columns::v3::ORDERED_COL { options.columns[*i as usize].btree_index = true; } @@ -244,8 +283,8 @@ pub(crate) fn paritydb_version_2_config(path: &Path) -> parity_db::Options { pub(crate) fn paritydb_version_0_config(path: &Path) -> parity_db::Options { let mut options = parity_db::Options::with_columns(&path, super::columns::v1::NUM_COLUMNS as u8); - options.columns[super::columns::v2::COL_AVAILABILITY_META as usize].btree_index = true; - options.columns[super::columns::v2::COL_CHAIN_SELECTION_DATA as usize].btree_index = true; + options.columns[super::columns::v3::COL_AVAILABILITY_META as usize].btree_index = true; + options.columns[super::columns::v3::COL_CHAIN_SELECTION_DATA as usize].btree_index = true; options } @@ -260,7 +299,7 @@ fn paritydb_migrate_from_version_0_to_1(path: &Path) -> Result<(), Error> { paritydb_fix_columns( path, paritydb_version_1_config(path), - vec![super::columns::v2::COL_DISPUTE_COORDINATOR_DATA], + vec![super::columns::v3::COL_DISPUTE_COORDINATOR_DATA], )?; Ok(()) @@ -278,9 +317,20 @@ fn paritydb_migrate_from_version_1_to_2(path: &Path) -> Result<(), Error> { Ok(()) } +/// Migration from version 2 to version 3: +/// - drop the column used by `RollingSessionWindow` +fn paritydb_migrate_from_version_2_to_3(path: &Path) -> Result<(), Error> { + parity_db::Db::drop_last_column(&mut paritydb_version_2_config(path)) + .map_err(|e| other_io_error(format!("Error removing COL_SESSION_WINDOW_DATA {:?}", e)))?; + Ok(()) +} + #[cfg(test)] mod tests { - use super::{columns::v2::*, *}; + use super::{ + columns::{v2::COL_SESSION_WINDOW_DATA, v3::*}, + *, + }; #[test] fn test_paritydb_migrate_0_to_1() { @@ -375,7 +425,7 @@ mod tests { // We need to properly set db version for upgrade to work. fs::write(version_file_path(db_dir.path()), "1").expect("Failed to write DB version"); { - let db = DbAdapter::new(db, columns::v2::ORDERED_COL); + let db = DbAdapter::new(db, columns::v3::ORDERED_COL); db.write(DBTransaction { ops: vec![DBOp::Insert { col: COL_DISPUTE_COORDINATOR_DATA, @@ -393,7 +443,7 @@ mod tests { assert_eq!(db.num_columns(), super::columns::v2::NUM_COLUMNS); - let db = DbAdapter::new(db, columns::v2::ORDERED_COL); + let db = DbAdapter::new(db, columns::v3::ORDERED_COL); assert_eq!( db.get(COL_DISPUTE_COORDINATOR_DATA, b"1234").unwrap(), @@ -416,4 +466,59 @@ mod tests { Some("0xdeadb00b".as_bytes().to_vec()) ); } + + #[test] + fn test_paritydb_migrate_2_to_3() { + use parity_db::Db; + + let db_dir = tempfile::tempdir().unwrap(); + let path = db_dir.path(); + let test_key = b"1337"; + + // We need to properly set db version for upgrade to work. + fs::write(version_file_path(path), "2").expect("Failed to write DB version"); + + { + let db = Db::open_or_create(&paritydb_version_2_config(&path)).unwrap(); + + // Write some dummy data + db.commit(vec![( + COL_SESSION_WINDOW_DATA as u8, + test_key.to_vec(), + Some(b"0xdeadb00b".to_vec()), + )]) + .unwrap(); + + assert_eq!(db.num_columns(), columns::v2::NUM_COLUMNS as u8); + } + + try_upgrade_db(&path, DatabaseKind::ParityDB).unwrap(); + + let db = Db::open(&paritydb_version_3_config(&path)).unwrap(); + + assert_eq!(db.num_columns(), columns::v3::NUM_COLUMNS as u8); + } + + #[test] + fn test_rocksdb_migrate_2_to_3() { + use kvdb_rocksdb::{Database, DatabaseConfig}; + + let db_dir = tempfile::tempdir().unwrap(); + let db_path = db_dir.path().to_str().unwrap(); + let db_cfg = DatabaseConfig::with_columns(super::columns::v2::NUM_COLUMNS); + { + let db = Database::open(&db_cfg, db_path).unwrap(); + assert_eq!(db.num_columns(), super::columns::v2::NUM_COLUMNS as u32); + } + + // We need to properly set db version for upgrade to work. + fs::write(version_file_path(db_dir.path()), "2").expect("Failed to write DB version"); + + try_upgrade_db(&db_dir.path(), DatabaseKind::RocksDB).unwrap(); + + let db_cfg = DatabaseConfig::with_columns(super::columns::v3::NUM_COLUMNS); + let db = Database::open(&db_cfg, db_path).unwrap(); + + assert_eq!(db.num_columns(), super::columns::v3::NUM_COLUMNS); + } } diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 68e953b9dade..80e231e654d6 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -642,6 +642,24 @@ pub enum RuntimeApiRequest { ), /// Returns all on-chain disputes at given block number. Available in `v3`. Disputes(RuntimeApiSender)>>), + /// Returns a list of validators that lost a past session dispute and need to be slashed. + /// `VStaging` + UnappliedSlashes( + RuntimeApiSender>, + ), + /// Returns a merkle proof of a validator session key. + /// `VStaging` + KeyOwnershipProof( + ValidatorId, + RuntimeApiSender>, + ), + /// Submits an unsigned extrinsic to slash validator who lost a past session dispute. + /// `VStaging` + SubmitReportDisputeLost( + vstaging_primitives::slashing::DisputeProof, + vstaging_primitives::slashing::OpaqueKeyOwnershipProof, + RuntimeApiSender>, + ), /// Get the backing state of the given para. /// This is a staging API that will not be available on production runtimes. StagingParaBackingState(ParaId, RuntimeApiSender>), @@ -657,9 +675,18 @@ impl RuntimeApiRequest { /// `Disputes` pub const DISPUTES_RUNTIME_REQUIREMENT: u32 = 3; + /// `UnappliedSlashes` + pub const UNAPPLIED_SLASHES_RUNTIME_REQUIREMENT: u32 = 4; + /// `ExecutorParams` pub const EXECUTOR_PARAMS_RUNTIME_REQUIREMENT: u32 = 4; + /// `KeyOwnershipProof` + pub const KEY_OWNERSHIP_PROOF_RUNTIME_REQUIREMENT: u32 = 4; + + /// `SubmitReportDisputeLost` + pub const SUBMIT_REPORT_DISPUTE_LOST_RUNTIME_REQUIREMENT: u32 = 4; + /// Minimum version for backing state, required for async backing. /// /// 99 for now, should be adjusted to VSTAGING/actual runtime version once released. diff --git a/node/subsystem-types/src/runtime_client.rs b/node/subsystem-types/src/runtime_client.rs index 4f4d3344c43e..9f78a6f1d0ef 100644 --- a/node/subsystem-types/src/runtime_client.rs +++ b/node/subsystem-types/src/runtime_client.rs @@ -16,7 +16,7 @@ use async_trait::async_trait; use polkadot_primitives::{ - runtime_api::ParachainHost, Block, BlockNumber, CandidateCommitments, CandidateEvent, + runtime_api::ParachainHost, vstaging, Block, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, @@ -182,6 +182,34 @@ pub trait RuntimeApiSubsystemClient { at: Hash, ) -> Result)>, ApiError>; + /// Returns a list of validators that lost a past session dispute and need to be slashed. + /// + /// WARNING: This is a staging method! Do not use on production runtimes! + async fn unapplied_slashes( + &self, + at: Hash, + ) -> Result, ApiError>; + + /// Returns a merkle proof of a validator session key in a past session. + /// + /// WARNING: This is a staging method! Do not use on production runtimes! + async fn key_ownership_proof( + &self, + at: Hash, + validator_id: ValidatorId, + ) -> Result, ApiError>; + + /// Submits an unsigned extrinsic to slash validators who lost a dispute about + /// a candidate of a past session. + /// + /// WARNING: This is a staging method! Do not use on production runtimes! + async fn submit_report_dispute_lost( + &self, + at: Hash, + dispute_proof: vstaging::slashing::DisputeProof, + key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + ) -> Result, ApiError>; + /// Returns the state of parachain backing for a given para. /// This is a staging method! Do not use on production runtimes! async fn staging_para_backing_state( @@ -391,6 +419,31 @@ where self.runtime_api().disputes(at) } + async fn unapplied_slashes( + &self, + at: Hash, + ) -> Result, ApiError> { + self.runtime_api().unapplied_slashes(at) + } + + async fn key_ownership_proof( + &self, + at: Hash, + validator_id: ValidatorId, + ) -> Result, ApiError> { + self.runtime_api().key_ownership_proof(at, validator_id) + } + + async fn submit_report_dispute_lost( + &self, + at: Hash, + dispute_proof: vstaging::slashing::DisputeProof, + key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + ) -> Result, ApiError> { + self.runtime_api() + .submit_report_dispute_lost(at, dispute_proof, key_ownership_proof) + } + async fn staging_para_backing_state( &self, at: Hash, diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index 616b4f01fd5f..6f9a803efb9d 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -72,8 +72,6 @@ pub mod backing_implicit_view; /// An emulator for node-side code to predict the results of on-chain parachain inclusion /// and predict future constraints. pub mod inclusion_emulator; -/// A rolling session window cache. -pub mod rolling_session_window; /// Convenient and efficient runtime info access. pub mod runtime; diff --git a/node/subsystem-util/src/rolling_session_window.rs b/node/subsystem-util/src/rolling_session_window.rs deleted file mode 100644 index 18364491849a..000000000000 --- a/node/subsystem-util/src/rolling_session_window.rs +++ /dev/null @@ -1,1532 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! A rolling window of sessions and cached session info, updated by the state of newly imported blocks. -//! -//! This is useful for consensus components which need to stay up-to-date about recent sessions but don't -//! care about the state of particular blocks. - -use super::database::{DBTransaction, Database}; -use kvdb::{DBKey, DBOp}; - -use parity_scale_codec::{Decode, Encode}; -pub use polkadot_node_primitives::{new_session_window_size, SessionWindowSize}; -use polkadot_primitives::{BlockNumber, Hash, SessionIndex, SessionInfo}; -use std::sync::Arc; - -use futures::channel::oneshot; -use polkadot_node_subsystem::{ - errors::{ChainApiError, RuntimeApiError}, - messages::{ChainApiMessage, RuntimeApiMessage, RuntimeApiRequest}, - overseer, -}; - -// The window size is equal to the `approval-voting` and `dispute-coordinator` constants that -// have been obsoleted. -const SESSION_WINDOW_SIZE: SessionWindowSize = new_session_window_size!(6); -const LOG_TARGET: &str = "parachain::rolling-session-window"; -const STORED_ROLLING_SESSION_WINDOW: &[u8] = b"Rolling_session_window"; - -/// Sessions unavailable in state to cache. -#[derive(Debug, Clone, thiserror::Error)] -pub enum SessionsUnavailableReason { - /// Runtime API subsystem was unavailable. - #[error(transparent)] - RuntimeApiUnavailable(#[from] oneshot::Canceled), - /// The runtime API itself returned an error. - #[error(transparent)] - RuntimeApi(#[from] RuntimeApiError), - /// The chain API itself returned an error. - #[error(transparent)] - ChainApi(#[from] ChainApiError), - /// Missing session info from runtime API for given `SessionIndex`. - #[error("Missing session index {0:?}")] - Missing(SessionIndex), - /// Missing last finalized block number. - #[error("Missing last finalized block number")] - MissingLastFinalizedBlock, - /// Missing last finalized block hash. - #[error("Missing last finalized block hash")] - MissingLastFinalizedBlockHash(BlockNumber), -} - -/// Information about the sessions being fetched. -#[derive(Debug, Clone)] -pub struct SessionsUnavailableInfo { - /// The desired window start. - pub window_start: SessionIndex, - /// The desired window end. - pub window_end: SessionIndex, - /// The block hash whose state the sessions were meant to be drawn from. - pub block_hash: Hash, -} - -/// Sessions were unavailable to fetch from the state for some reason. -#[derive(Debug, thiserror::Error, Clone)] -#[error("Sessions unavailable: {kind:?}, info: {info:?}")] -pub struct SessionsUnavailable { - /// The error kind. - #[source] - kind: SessionsUnavailableReason, - /// The info about the session window, if any. - info: Option, -} - -/// An indicated update of the rolling session window. -#[derive(Debug, PartialEq, Clone)] -pub enum SessionWindowUpdate { - /// The session window was just advanced from one range to a new one. - Advanced { - /// The previous start of the window (inclusive). - prev_window_start: SessionIndex, - /// The previous end of the window (inclusive). - prev_window_end: SessionIndex, - /// The new start of the window (inclusive). - new_window_start: SessionIndex, - /// The new end of the window (inclusive). - new_window_end: SessionIndex, - }, - /// The session window was unchanged. - Unchanged, -} - -/// A structure to store rolling session database parameters. -#[derive(Clone)] -pub struct DatabaseParams { - /// Database reference. - pub db: Arc, - /// The column which stores the rolling session info. - pub db_column: u32, -} -/// A rolling window of sessions and cached session info. -pub struct RollingSessionWindow { - earliest_session: SessionIndex, - session_info: Vec, - window_size: SessionWindowSize, - // The option is just to enable some approval-voting tests to force feed sessions - // in the window without dealing with the DB. - db_params: Option, -} - -/// The rolling session data we persist in the database. -#[derive(Encode, Decode, Default)] -struct StoredWindow { - earliest_session: SessionIndex, - session_info: Vec, -} - -impl RollingSessionWindow { - /// Initialize a new session info cache with the given window size. - /// Invariant: The database always contains the earliest session. Then, - /// we can always extend the session info vector using chain state. - pub async fn new( - mut sender: Sender, - block_hash: Hash, - db_params: DatabaseParams, - ) -> Result - where - Sender: overseer::SubsystemSender - + overseer::SubsystemSender, - { - // At first, determine session window start using the chain state. - let session_index = get_session_index_for_child(&mut sender, block_hash).await?; - let earliest_non_finalized_block_session = - Self::earliest_non_finalized_block_session(&mut sender).await?; - - // This will increase the session window to cover the full unfinalized chain. - let on_chain_window_start = std::cmp::min( - session_index.saturating_sub(SESSION_WINDOW_SIZE.get() - 1), - earliest_non_finalized_block_session, - ); - - // Fetch session information from DB. - let maybe_stored_window = Self::db_load(db_params.clone()); - - // Get the DB stored sessions and recompute window start based on DB data. - let (mut window_start, stored_sessions) = - if let Some(mut stored_window) = maybe_stored_window { - // Check if DB is ancient. - if earliest_non_finalized_block_session > - stored_window.earliest_session + stored_window.session_info.len() as u32 - { - // If ancient, we scrap it and fetch from chain state. - stored_window.session_info.clear(); - } - - // The session window might extend beyond the last finalized block, but that's fine as we'll prune it at - // next update. - let window_start = if stored_window.session_info.len() > 0 { - // If there is at least one entry in db, we always take the DB as source of truth. - stored_window.earliest_session - } else { - on_chain_window_start - }; - - (window_start, stored_window.session_info) - } else { - (on_chain_window_start, Vec::new()) - }; - - // Compute the amount of sessions missing from the window that will be fetched from chain state. - let sessions_missing_count = session_index - .saturating_sub(window_start) - .saturating_add(1) - .saturating_sub(stored_sessions.len() as u32); - - // Extend from chain state. - let sessions = if sessions_missing_count > 0 { - match extend_sessions_from_chain_state( - stored_sessions, - &mut sender, - block_hash, - &mut window_start, - session_index, - ) - .await - { - Err(kind) => Err(SessionsUnavailable { - kind, - info: Some(SessionsUnavailableInfo { - window_start, - window_end: session_index, - block_hash, - }), - }), - Ok(sessions) => Ok(sessions), - }? - } else { - // There are no new sessions to be fetched from chain state. - stored_sessions - }; - - Ok(Self { - earliest_session: window_start, - session_info: sessions, - window_size: SESSION_WINDOW_SIZE, - db_params: Some(db_params), - }) - } - - // Load session information from the parachains db. - fn db_load(db_params: DatabaseParams) -> Option { - match db_params.db.get(db_params.db_column, STORED_ROLLING_SESSION_WINDOW).ok()? { - None => None, - Some(raw) => { - let maybe_decoded = StoredWindow::decode(&mut &raw[..]).map(Some); - match maybe_decoded { - Ok(decoded) => decoded, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?err, - "Failed decoding db entry; will start with onchain session infos and self-heal DB entry on next update." - ); - None - }, - } - }, - } - } - - // Saves/Updates all sessions in the database. - // TODO: https://github.com/paritytech/polkadot/issues/6144 - fn db_save(&mut self, stored_window: StoredWindow) { - if let Some(db_params) = self.db_params.as_ref() { - match db_params.db.write(DBTransaction { - ops: vec![DBOp::Insert { - col: db_params.db_column, - key: DBKey::from_slice(STORED_ROLLING_SESSION_WINDOW), - value: stored_window.encode(), - }], - }) { - Ok(_) => {}, - Err(err) => { - gum::warn!(target: LOG_TARGET, ?err, "Failed writing db entry"); - }, - } - } - } - - /// Initialize a new session info cache with the given window size and - /// initial data. - /// This is only used in `approval voting` tests. - pub fn with_session_info( - earliest_session: SessionIndex, - session_info: Vec, - ) -> Self { - RollingSessionWindow { - earliest_session, - session_info, - window_size: SESSION_WINDOW_SIZE, - db_params: None, - } - } - - /// Access the session info for the given session index, if stored within the window. - pub fn session_info(&self, index: SessionIndex) -> Option<&SessionInfo> { - if index < self.earliest_session { - None - } else { - self.session_info.get((index - self.earliest_session) as usize) - } - } - - /// Access the index of the earliest session. - pub fn earliest_session(&self) -> SessionIndex { - self.earliest_session - } - - /// Access the index of the latest session. - pub fn latest_session(&self) -> SessionIndex { - self.earliest_session + (self.session_info.len() as SessionIndex).saturating_sub(1) - } - - /// Returns `true` if `session_index` is contained in the window. - pub fn contains(&self, session_index: SessionIndex) -> bool { - session_index >= self.earliest_session() && session_index <= self.latest_session() - } - - async fn earliest_non_finalized_block_session( - sender: &mut Sender, - ) -> Result - where - Sender: overseer::SubsystemSender - + overseer::SubsystemSender, - { - let last_finalized_height = { - let (tx, rx) = oneshot::channel(); - sender.send_message(ChainApiMessage::FinalizedBlockNumber(tx)).await; - match rx.await { - Ok(Ok(number)) => number, - Ok(Err(e)) => - return Err(SessionsUnavailable { - kind: SessionsUnavailableReason::ChainApi(e), - info: None, - }), - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?err, - "Failed fetching last finalized block number" - ); - return Err(SessionsUnavailable { - kind: SessionsUnavailableReason::MissingLastFinalizedBlock, - info: None, - }) - }, - } - }; - - let (tx, rx) = oneshot::channel(); - // We want to get the session index for the child of the last finalized block. - sender - .send_message(ChainApiMessage::FinalizedBlockHash(last_finalized_height, tx)) - .await; - let last_finalized_hash_parent = match rx.await { - Ok(Ok(maybe_hash)) => maybe_hash, - Ok(Err(e)) => - return Err(SessionsUnavailable { - kind: SessionsUnavailableReason::ChainApi(e), - info: None, - }), - Err(err) => { - gum::warn!(target: LOG_TARGET, ?err, "Failed fetching last finalized block hash"); - return Err(SessionsUnavailable { - kind: SessionsUnavailableReason::MissingLastFinalizedBlockHash( - last_finalized_height, - ), - info: None, - }) - }, - }; - - // Get the session in which the last finalized block was authored. - if let Some(last_finalized_hash_parent) = last_finalized_hash_parent { - let session = - match get_session_index_for_child(sender, last_finalized_hash_parent).await { - Ok(session_index) => session_index, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?err, - ?last_finalized_hash_parent, - "Failed fetching session index" - ); - return Err(err) - }, - }; - - Ok(session) - } else { - return Err(SessionsUnavailable { - kind: SessionsUnavailableReason::MissingLastFinalizedBlockHash( - last_finalized_height, - ), - info: None, - }) - } - } - - /// When inspecting a new import notification, updates the session info cache to match - /// the session of the imported block's child. - /// - /// this only needs to be called on heads where we are directly notified about import, as sessions do - /// not change often and import notifications are expected to be typically increasing in session number. - /// - /// some backwards drift in session index is acceptable. - pub async fn cache_session_info_for_head( - &mut self, - sender: &mut Sender, - block_hash: Hash, - ) -> Result - where - Sender: overseer::SubsystemSender - + overseer::SubsystemSender, - { - let session_index = get_session_index_for_child(sender, block_hash).await?; - let latest = self.latest_session(); - - // Either cached or ancient. - if session_index <= latest { - return Ok(SessionWindowUpdate::Unchanged) - } - - let earliest_non_finalized_block_session = - Self::earliest_non_finalized_block_session(sender).await?; - - let old_window_start = self.earliest_session; - let old_window_end = latest; - - // Ensure we keep sessions up to last finalized block by adjusting the window start. - // This will increase the session window to cover the full unfinalized chain. - let window_start = std::cmp::min( - session_index.saturating_sub(self.window_size.get() - 1), - earliest_non_finalized_block_session, - ); - - // Never look back past earliest session, since if sessions beyond were not needed or available - // in the past remains valid for the future (window only advances forward). - let mut window_start = std::cmp::max(window_start, self.earliest_session); - - let mut sessions = self.session_info.clone(); - let sessions_out_of_window = window_start.saturating_sub(old_window_start) as usize; - - let sessions = if sessions_out_of_window < sessions.len() { - // Drop sessions based on how much the window advanced. - sessions.split_off((window_start as usize).saturating_sub(old_window_start as usize)) - } else { - // Window has jumped such that we need to fetch all sessions from on chain. - Vec::new() - }; - - match extend_sessions_from_chain_state( - sessions, - sender, - block_hash, - &mut window_start, - session_index, - ) - .await - { - Err(kind) => Err(SessionsUnavailable { - kind, - info: Some(SessionsUnavailableInfo { - window_start, - window_end: session_index, - block_hash, - }), - }), - Ok(s) => { - let update = SessionWindowUpdate::Advanced { - prev_window_start: old_window_start, - prev_window_end: old_window_end, - new_window_start: window_start, - new_window_end: session_index, - }; - - self.session_info = s; - - // we need to account for this case: - // window_start ................................... session_index - // old_window_start ........... latest - let new_earliest = std::cmp::max(window_start, old_window_start); - self.earliest_session = new_earliest; - - // Update current window in DB. - self.db_save(StoredWindow { - earliest_session: self.earliest_session, - session_info: self.session_info.clone(), - }); - Ok(update) - }, - } - } -} - -// Returns the session index expected at any child of the `parent` block. -// -// Note: We could use `RuntimeInfo::get_session_index_for_child` here but it's -// cleaner to just call the runtime API directly without needing to create an instance -// of `RuntimeInfo`. -async fn get_session_index_for_child( - sender: &mut impl overseer::SubsystemSender, - block_hash: Hash, -) -> Result { - let (s_tx, s_rx) = oneshot::channel(); - - // We're requesting session index of a child to populate the cache in advance. - sender - .send_message(RuntimeApiMessage::Request( - block_hash, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) - .await; - - match s_rx.await { - Ok(Ok(s)) => Ok(s), - Ok(Err(e)) => - return Err(SessionsUnavailable { - kind: SessionsUnavailableReason::RuntimeApi(e), - info: None, - }), - Err(e) => - return Err(SessionsUnavailable { - kind: SessionsUnavailableReason::RuntimeApiUnavailable(e), - info: None, - }), - } -} - -/// Attempts to extend db stored sessions with sessions missing between `start` and up to `end_inclusive`. -/// Runtime session info fetching errors are ignored if that doesn't create a gap in the window. -async fn extend_sessions_from_chain_state( - stored_sessions: Vec, - sender: &mut impl overseer::SubsystemSender, - block_hash: Hash, - window_start: &mut SessionIndex, - end_inclusive: SessionIndex, -) -> Result, SessionsUnavailableReason> { - // Start from the db sessions. - let mut sessions = stored_sessions; - // We allow session fetch failures only if we won't create a gap in the window by doing so. - // If `allow_failure` is set to true here, fetching errors are ignored until we get a first session. - let mut allow_failure = sessions.is_empty(); - - let start = *window_start + sessions.len() as u32; - - for i in start..=end_inclusive { - let (tx, rx) = oneshot::channel(); - sender - .send_message(RuntimeApiMessage::Request( - block_hash, - RuntimeApiRequest::SessionInfo(i, tx), - )) - .await; - - match rx.await { - Ok(Ok(Some(session_info))) => { - // We do not allow failure anymore after having at least 1 session in window. - allow_failure = false; - sessions.push(session_info); - }, - Ok(Ok(None)) if !allow_failure => return Err(SessionsUnavailableReason::Missing(i)), - Ok(Ok(None)) => { - // Handle `allow_failure` true. - // If we didn't get the session, we advance window start. - *window_start += 1; - gum::debug!( - target: LOG_TARGET, - session = ?i, - "Session info missing from runtime." - ); - }, - Ok(Err(e)) if !allow_failure => return Err(SessionsUnavailableReason::RuntimeApi(e)), - Err(canceled) if !allow_failure => - return Err(SessionsUnavailableReason::RuntimeApiUnavailable(canceled)), - Ok(Err(err)) => { - // Handle `allow_failure` true. - // If we didn't get the session, we advance window start. - *window_start += 1; - gum::debug!( - target: LOG_TARGET, - session = ?i, - ?err, - "Error while fetching session information." - ); - }, - Err(err) => { - // Handle `allow_failure` true. - // If we didn't get the session, we advance window start. - *window_start += 1; - gum::debug!( - target: LOG_TARGET, - session = ?i, - ?err, - "Channel error while fetching session information." - ); - }, - }; - } - - Ok(sessions) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::database::kvdb_impl::DbAdapter; - use assert_matches::assert_matches; - use polkadot_node_subsystem::{ - messages::{AllMessages, AvailabilityRecoveryMessage}, - SubsystemContext, - }; - use polkadot_node_subsystem_test_helpers::make_subsystem_context; - use polkadot_primitives::Header; - use sp_core::testing::TaskExecutor; - - const SESSION_DATA_COL: u32 = 0; - - const NUM_COLUMNS: u32 = 1; - - fn dummy_db_params() -> DatabaseParams { - let db = kvdb_memorydb::create(NUM_COLUMNS); - let db = DbAdapter::new(db, &[]); - let db: Arc = Arc::new(db); - DatabaseParams { db, db_column: SESSION_DATA_COL } - } - - fn dummy_session_info(index: SessionIndex) -> SessionInfo { - SessionInfo { - validators: Default::default(), - discovery_keys: Vec::new(), - assignment_keys: Vec::new(), - validator_groups: Default::default(), - n_cores: index as _, - zeroth_delay_tranche_width: index as _, - relay_vrf_modulo_samples: index as _, - n_delay_tranches: index as _, - no_show_slots: index as _, - needed_approvals: index as _, - active_validator_indices: Vec::new(), - dispute_period: 6, - random_seed: [0u8; 32], - } - } - - fn cache_session_info_test( - expected_start_session: SessionIndex, - session: SessionIndex, - window: Option, - expect_requests_from: SessionIndex, - db_params: Option, - ) -> RollingSessionWindow { - let db_params = db_params.unwrap_or(dummy_db_params()); - - let header = Header { - digest: Default::default(), - extrinsics_root: Default::default(), - number: 5, - state_root: Default::default(), - parent_hash: Default::default(), - }; - - let finalized_header = Header { - digest: Default::default(), - extrinsics_root: Default::default(), - number: 0, - state_root: Default::default(), - parent_hash: Default::default(), - }; - - let pool = TaskExecutor::new(); - let (mut ctx, mut handle) = - make_subsystem_context::(pool.clone()); - - let hash = header.hash(); - - let sender = ctx.sender(); - - let test_fut = { - Box::pin(async move { - let window = match window { - None => - RollingSessionWindow::new(sender.clone(), hash, db_params).await.unwrap(), - Some(mut window) => { - window.cache_session_info_for_head(sender, hash).await.unwrap(); - window - }, - }; - assert_eq!(window.earliest_session, expected_start_session); - assert_eq!( - window.session_info, - (expected_start_session..=session).map(dummy_session_info).collect::>(), - ); - - window - }) - }; - - let aux_fut = Box::pin(async move { - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, hash); - let _ = s_tx.send(Ok(session)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber( - s_tx, - )) => { - let _ = s_tx.send(Ok(finalized_header.number)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash( - block_number, - s_tx, - )) => { - assert_eq!(block_number, finalized_header.number); - let _ = s_tx.send(Ok(Some(finalized_header.hash()))); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, finalized_header.hash()); - let _ = s_tx.send(Ok(session)); - } - ); - - for i in expect_requests_from..=session { - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionInfo(j, s_tx), - )) => { - assert_eq!(h, hash); - assert_eq!(i, j); - let _ = s_tx.send(Ok(Some(dummy_session_info(i)))); - } - ); - } - }); - - let (window, _) = futures::executor::block_on(futures::future::join(test_fut, aux_fut)); - window - } - - #[test] - fn cache_session_info_start_empty_db() { - let db_params = dummy_db_params(); - - let window = cache_session_info_test( - (10 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), - 10, - None, - (10 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), - Some(db_params.clone()), - ); - - let window = cache_session_info_test( - (11 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), - 11, - Some(window), - 11, - None, - ); - assert_eq!(window.session_info.len(), SESSION_WINDOW_SIZE.get() as usize); - - cache_session_info_test( - (11 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), - 12, - None, - 12, - Some(db_params), - ); - } - - #[test] - fn cache_session_info_first_early() { - cache_session_info_test(0, 1, None, 0, None); - } - - #[test] - fn cache_session_info_does_not_underflow() { - let window = RollingSessionWindow { - earliest_session: 1, - session_info: vec![dummy_session_info(1)], - window_size: SESSION_WINDOW_SIZE, - db_params: Some(dummy_db_params()), - }; - - cache_session_info_test(1, 2, Some(window), 2, None); - } - - #[test] - fn cache_session_window_contains() { - let window = RollingSessionWindow { - earliest_session: 10, - session_info: vec![dummy_session_info(1)], - window_size: SESSION_WINDOW_SIZE, - db_params: Some(dummy_db_params()), - }; - - assert!(!window.contains(0)); - assert!(!window.contains(10 + SESSION_WINDOW_SIZE.get())); - assert!(!window.contains(11)); - assert!(!window.contains(10 + SESSION_WINDOW_SIZE.get() - 1)); - } - - #[test] - fn cache_session_info_first_late() { - cache_session_info_test( - (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), - 100, - None, - (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), - None, - ); - } - - #[test] - fn cache_session_info_jump() { - let window = RollingSessionWindow { - earliest_session: 50, - session_info: vec![ - dummy_session_info(50), - dummy_session_info(51), - dummy_session_info(52), - ], - window_size: SESSION_WINDOW_SIZE, - db_params: Some(dummy_db_params()), - }; - - cache_session_info_test( - (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), - 100, - Some(window), - (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), - None, - ); - } - - #[test] - fn cache_session_info_roll_full() { - let start = 99 - (SESSION_WINDOW_SIZE.get() - 1); - let window = RollingSessionWindow { - earliest_session: start, - session_info: (start..=99).map(dummy_session_info).collect(), - window_size: SESSION_WINDOW_SIZE, - db_params: Some(dummy_db_params()), - }; - - cache_session_info_test( - (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), - 100, - Some(window), - 100, // should only make one request. - None, - ); - } - - #[test] - fn cache_session_info_roll_many_full_db() { - let db_params = dummy_db_params(); - let start = 97 - (SESSION_WINDOW_SIZE.get() - 1); - let window = RollingSessionWindow { - earliest_session: start, - session_info: (start..=97).map(dummy_session_info).collect(), - window_size: SESSION_WINDOW_SIZE, - db_params: Some(db_params.clone()), - }; - - cache_session_info_test( - (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), - 100, - Some(window), - 98, - None, - ); - - // We expect the session to be populated from DB, and only fetch 101 from on chain. - cache_session_info_test( - (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), - 101, - None, - 101, - Some(db_params.clone()), - ); - - // Session warps in the future. - let window = cache_session_info_test(195, 200, None, 195, Some(db_params)); - - assert_eq!(window.session_info.len(), SESSION_WINDOW_SIZE.get() as usize); - } - - #[test] - fn cache_session_info_roll_many_full() { - let start = 97 - (SESSION_WINDOW_SIZE.get() - 1); - let window = RollingSessionWindow { - earliest_session: start, - session_info: (start..=97).map(dummy_session_info).collect(), - window_size: SESSION_WINDOW_SIZE, - db_params: Some(dummy_db_params()), - }; - - cache_session_info_test( - (100 as SessionIndex).saturating_sub(SESSION_WINDOW_SIZE.get() - 1), - 100, - Some(window), - 98, - None, - ); - } - - #[test] - fn cache_session_info_roll_early() { - let start = 0; - let window = RollingSessionWindow { - earliest_session: start, - session_info: (0..=1).map(dummy_session_info).collect(), - window_size: SESSION_WINDOW_SIZE, - db_params: Some(dummy_db_params()), - }; - - cache_session_info_test( - 0, - 2, - Some(window), - 2, // should only make one request. - None, - ); - } - - #[test] - fn cache_session_info_roll_many_early() { - let start = 0; - let window = RollingSessionWindow { - earliest_session: start, - session_info: (0..=1).map(dummy_session_info).collect(), - window_size: SESSION_WINDOW_SIZE, - db_params: Some(dummy_db_params()), - }; - - let actual_window_size = window.session_info.len() as u32; - - cache_session_info_test(0, 3, Some(window), actual_window_size, None); - } - - #[test] - fn db_load_works() { - // Session index of the tip of our fake test chain. - let session: SessionIndex = 100; - let genesis_session: SessionIndex = 0; - - let header = Header { - digest: Default::default(), - extrinsics_root: Default::default(), - number: 5, - state_root: Default::default(), - parent_hash: Default::default(), - }; - - let finalized_header = Header { - digest: Default::default(), - extrinsics_root: Default::default(), - number: 0, - state_root: Default::default(), - parent_hash: Default::default(), - }; - - let finalized_header_clone = finalized_header.clone(); - - let hash: sp_core::H256 = header.hash(); - let db_params = dummy_db_params(); - let db_params_clone = db_params.clone(); - - let pool = TaskExecutor::new(); - let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone()); - - let test_fut = { - let sender = ctx.sender().clone(); - Box::pin(async move { - let mut rsw = - RollingSessionWindow::new(sender.clone(), hash, db_params_clone).await.unwrap(); - - let session_info = rsw.session_info.clone(); - let earliest_session = rsw.earliest_session(); - - assert_eq!(earliest_session, 0); - assert_eq!(session_info.len(), 101); - - rsw.db_save(StoredWindow { earliest_session, session_info }); - }) - }; - - let aux_fut = Box::pin(async move { - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, hash); - let _ = s_tx.send(Ok(session)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber( - s_tx, - )) => { - let _ = s_tx.send(Ok(finalized_header.number)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash( - block_number, - s_tx, - )) => { - assert_eq!(block_number, finalized_header.number); - let _ = s_tx.send(Ok(Some(finalized_header.hash()))); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, finalized_header.hash()); - let _ = s_tx.send(Ok(0)); - } - ); - - // Unfinalized chain starts at geneisis block, so session 0 is how far we stretch. - for i in genesis_session..=session { - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionInfo(j, s_tx), - )) => { - assert_eq!(h, hash); - assert_eq!(i, j); - let _ = s_tx.send(Ok(Some(dummy_session_info(i)))); - } - ); - } - }); - - futures::executor::block_on(futures::future::join(test_fut, aux_fut)); - - let pool = TaskExecutor::new(); - let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone()); - - let test_fut = { - Box::pin(async move { - let sender = ctx.sender().clone(); - let res = RollingSessionWindow::new(sender, hash, db_params).await; - let rsw = res.unwrap(); - assert_eq!(rsw.earliest_session, 0); - assert_eq!(rsw.session_info.len(), 101); - }) - }; - - let aux_fut = Box::pin(async move { - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, hash); - let _ = s_tx.send(Ok(session)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber( - s_tx, - )) => { - let _ = s_tx.send(Ok(finalized_header_clone.number)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash( - block_number, - s_tx, - )) => { - assert_eq!(block_number, finalized_header_clone.number); - let _ = s_tx.send(Ok(Some(finalized_header_clone.hash()))); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, finalized_header_clone.hash()); - let _ = s_tx.send(Ok(0)); - } - ); - }); - - futures::executor::block_on(futures::future::join(test_fut, aux_fut)); - } - - #[test] - fn cache_session_fails_for_gap_in_window() { - // Session index of the tip of our fake test chain. - let session: SessionIndex = 100; - let genesis_session: SessionIndex = 0; - - let header = Header { - digest: Default::default(), - extrinsics_root: Default::default(), - number: 5, - state_root: Default::default(), - parent_hash: Default::default(), - }; - - let finalized_header = Header { - digest: Default::default(), - extrinsics_root: Default::default(), - number: 0, - state_root: Default::default(), - parent_hash: Default::default(), - }; - - let pool = TaskExecutor::new(); - let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone()); - - let hash = header.hash(); - - let test_fut = { - let sender = ctx.sender().clone(); - Box::pin(async move { - let res = RollingSessionWindow::new(sender, hash, dummy_db_params()).await; - - assert!(res.is_err()); - }) - }; - - let aux_fut = Box::pin(async move { - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, hash); - let _ = s_tx.send(Ok(session)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber( - s_tx, - )) => { - let _ = s_tx.send(Ok(finalized_header.number)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash( - block_number, - s_tx, - )) => { - assert_eq!(block_number, finalized_header.number); - let _ = s_tx.send(Ok(Some(finalized_header.hash()))); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, finalized_header.hash()); - let _ = s_tx.send(Ok(0)); - } - ); - - // Unfinalized chain starts at geneisis block, so session 0 is how far we stretch. - // First 50 sessions are missing. - for i in genesis_session..=50 { - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionInfo(j, s_tx), - )) => { - assert_eq!(h, hash); - assert_eq!(i, j); - let _ = s_tx.send(Ok(None)); - } - ); - } - // next 10 sessions are present - for i in 51..=60 { - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionInfo(j, s_tx), - )) => { - assert_eq!(h, hash); - assert_eq!(i, j); - let _ = s_tx.send(Ok(Some(dummy_session_info(i)))); - } - ); - } - // gap of 1 session - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionInfo(j, s_tx), - )) => { - assert_eq!(h, hash); - assert_eq!(61, j); - let _ = s_tx.send(Ok(None)); - } - ); - }); - - futures::executor::block_on(futures::future::join(test_fut, aux_fut)); - } - - #[test] - fn any_session_stretch_with_failure_allowed_for_unfinalized_chain() { - // Session index of the tip of our fake test chain. - let session: SessionIndex = 100; - let genesis_session: SessionIndex = 0; - - let header = Header { - digest: Default::default(), - extrinsics_root: Default::default(), - number: 5, - state_root: Default::default(), - parent_hash: Default::default(), - }; - - let finalized_header = Header { - digest: Default::default(), - extrinsics_root: Default::default(), - number: 0, - state_root: Default::default(), - parent_hash: Default::default(), - }; - - let pool = TaskExecutor::new(); - let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone()); - - let hash = header.hash(); - - let test_fut = { - let sender = ctx.sender().clone(); - Box::pin(async move { - let res = RollingSessionWindow::new(sender, hash, dummy_db_params()).await; - assert!(res.is_ok()); - let rsw = res.unwrap(); - // Since first 50 sessions are missing the earliest should be 50. - assert_eq!(rsw.earliest_session, 50); - assert_eq!(rsw.session_info.len(), 51); - }) - }; - - let aux_fut = Box::pin(async move { - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, hash); - let _ = s_tx.send(Ok(session)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber( - s_tx, - )) => { - let _ = s_tx.send(Ok(finalized_header.number)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash( - block_number, - s_tx, - )) => { - assert_eq!(block_number, finalized_header.number); - let _ = s_tx.send(Ok(Some(finalized_header.hash()))); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, finalized_header.hash()); - let _ = s_tx.send(Ok(0)); - } - ); - - // Unfinalized chain starts at geneisis block, so session 0 is how far we stretch. - // We also test if failure is allowed for 50 first missing sessions. - for i in genesis_session..=session { - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionInfo(j, s_tx), - )) => { - assert_eq!(h, hash); - assert_eq!(i, j); - - let _ = s_tx.send(Ok(if i < 50 { - None - } else { - Some(dummy_session_info(i)) - })); - } - ); - } - }); - - futures::executor::block_on(futures::future::join(test_fut, aux_fut)); - } - - #[test] - fn any_session_unavailable_for_caching_means_no_change() { - let session: SessionIndex = 6; - let start_session = session.saturating_sub(SESSION_WINDOW_SIZE.get() - 1); - - let header = Header { - digest: Default::default(), - extrinsics_root: Default::default(), - number: 5, - state_root: Default::default(), - parent_hash: Default::default(), - }; - - let finalized_header = Header { - digest: Default::default(), - extrinsics_root: Default::default(), - number: 0, - state_root: Default::default(), - parent_hash: Default::default(), - }; - - let pool = TaskExecutor::new(); - let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone()); - - let hash = header.hash(); - - let test_fut = { - let sender = ctx.sender().clone(); - Box::pin(async move { - let res = RollingSessionWindow::new(sender, hash, dummy_db_params()).await; - assert!(res.is_err()); - }) - }; - - let aux_fut = Box::pin(async move { - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, hash); - let _ = s_tx.send(Ok(session)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber( - s_tx, - )) => { - let _ = s_tx.send(Ok(finalized_header.number)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash( - block_number, - s_tx, - )) => { - assert_eq!(block_number, finalized_header.number); - let _ = s_tx.send(Ok(Some(finalized_header.hash()))); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, finalized_header.hash()); - let _ = s_tx.send(Ok(session)); - } - ); - - for i in start_session..=session { - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionInfo(j, s_tx), - )) => { - assert_eq!(h, hash); - assert_eq!(i, j); - - let _ = s_tx.send(Ok(if i == session { - None - } else { - Some(dummy_session_info(i)) - })); - } - ); - } - }); - - futures::executor::block_on(futures::future::join(test_fut, aux_fut)); - } - - #[test] - fn request_session_info_for_genesis() { - let session: SessionIndex = 0; - - let header = Header { - digest: Default::default(), - extrinsics_root: Default::default(), - number: 0, - state_root: Default::default(), - parent_hash: Default::default(), - }; - - let pool = TaskExecutor::new(); - let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone()); - - let hash = header.hash(); - - let test_fut = { - Box::pin(async move { - let sender = ctx.sender().clone(); - let window = - RollingSessionWindow::new(sender, hash, dummy_db_params()).await.unwrap(); - - assert_eq!(window.earliest_session, session); - assert_eq!(window.session_info, vec![dummy_session_info(session)]); - }) - }; - - let aux_fut = Box::pin(async move { - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, hash); - let _ = s_tx.send(Ok(session)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber( - s_tx, - )) => { - let _ = s_tx.send(Ok(header.number)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash( - block_number, - s_tx, - )) => { - assert_eq!(block_number, header.number); - let _ = s_tx.send(Ok(Some(header.hash()))); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionIndexForChild(s_tx), - )) => { - assert_eq!(h, header.hash()); - let _ = s_tx.send(Ok(session)); - } - ); - - assert_matches!( - handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - h, - RuntimeApiRequest::SessionInfo(s, s_tx), - )) => { - assert_eq!(h, hash); - assert_eq!(s, session); - - let _ = s_tx.send(Ok(Some(dummy_session_info(s)))); - } - ); - }); - - futures::executor::block_on(futures::future::join(test_fut, aux_fut)); - } -} diff --git a/node/test/performance-test/Cargo.toml b/node/test/performance-test/Cargo.toml index 70f072c03ae1..0e5800b920e3 100644 --- a/node/test/performance-test/Cargo.toml +++ b/node/test/performance-test/Cargo.toml @@ -6,11 +6,11 @@ edition.workspace = true [dependencies] thiserror = "1.0.31" -quote = "1.0.26" +quote = "1.0.28" env_logger = "0.9" log = "0.4" -polkadot-node-core-pvf-worker = { path = "../../core/pvf/worker" } +polkadot-node-core-pvf-prepare-worker = { path = "../../core/pvf/prepare-worker" } polkadot-erasure-coding = { path = "../../../erasure-coding" } polkadot-node-primitives = { path = "../../primitives" } polkadot-primitives = { path = "../../../primitives" } diff --git a/node/test/performance-test/src/lib.rs b/node/test/performance-test/src/lib.rs index 1afa43cc62ba..15073912654a 100644 --- a/node/test/performance-test/src/lib.rs +++ b/node/test/performance-test/src/lib.rs @@ -65,9 +65,9 @@ pub fn measure_pvf_prepare(wasm_code: &[u8]) -> Result .or(Err(PerfCheckError::CodeDecompressionFailed))?; // Recreate the pipeline from the pvf prepare worker. - let blob = - polkadot_node_core_pvf_worker::prevalidate(code.as_ref()).map_err(PerfCheckError::from)?; - polkadot_node_core_pvf_worker::prepare(blob, &ExecutorParams::default()) + let blob = polkadot_node_core_pvf_prepare_worker::prevalidate(code.as_ref()) + .map_err(PerfCheckError::from)?; + polkadot_node_core_pvf_prepare_worker::prepare(blob, &ExecutorParams::default()) .map_err(PerfCheckError::from)?; Ok(start.elapsed()) diff --git a/parachain/Cargo.toml b/parachain/Cargo.toml index afd192257636..7913d5462338 100644 --- a/parachain/Cargo.toml +++ b/parachain/Cargo.toml @@ -17,10 +17,10 @@ sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } polkadot-core-primitives = { path = "../core-primitives", default-features = false } derive_more = "0.99.11" -bounded-collections = { version = "0.1.5", default-features = false } +bounded-collections = { version = "0.1.7", default-features = false } # all optional crates. -serde = { version = "1.0.137", default-features = false, features = [ "derive" ], optional = true } +serde = { version = "1.0.163", default-features = false, features = [ "derive" ], optional = true } [features] default = ["std"] diff --git a/parachain/test-parachains/adder/build.rs b/parachain/test-parachains/adder/build.rs index d6377f3d5a5a..6bb34ead1b86 100644 --- a/parachain/test-parachains/adder/build.rs +++ b/parachain/test-parachains/adder/build.rs @@ -17,5 +17,9 @@ use substrate_wasm_builder::WasmBuilder; fn main() { - WasmBuilder::new().with_current_project().export_heap_base().build() + WasmBuilder::new() + .with_current_project() + .export_heap_base() + .disable_runtime_version_section_check() + .build() } diff --git a/parachain/test-parachains/adder/collator/Cargo.toml b/parachain/test-parachains/adder/collator/Cargo.toml index 081dfd938942..c9454787ef1c 100644 --- a/parachain/test-parachains/adder/collator/Cargo.toml +++ b/parachain/test-parachains/adder/collator/Cargo.toml @@ -34,7 +34,7 @@ sc-service = { git = "https://github.com/paritytech/substrate", branch = "master # This one is tricky. Even though it is not used directly by the collator, we still need it for the # `puppet_worker` binary, which is required for the integration test. However, this shouldn't be # a big problem since it is used transitively anyway. -polkadot-node-core-pvf-worker = { path = "../../../../node/core/pvf/worker" } +polkadot-node-core-pvf = { path = "../../../../node/core/pvf" } [dev-dependencies] polkadot-parachain = { path = "../../.." } diff --git a/parachain/test-parachains/adder/collator/bin/puppet_worker.rs b/parachain/test-parachains/adder/collator/bin/puppet_worker.rs index ddd81971292b..7f93519d8454 100644 --- a/parachain/test-parachains/adder/collator/bin/puppet_worker.rs +++ b/parachain/test-parachains/adder/collator/bin/puppet_worker.rs @@ -14,4 +14,4 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -polkadot_node_core_pvf_worker::decl_puppet_worker_main!(); +polkadot_node_core_pvf::decl_puppet_worker_main!(); diff --git a/parachain/test-parachains/adder/collator/src/lib.rs b/parachain/test-parachains/adder/collator/src/lib.rs index 4b2b9248de22..02a4598f9e47 100644 --- a/parachain/test-parachains/adder/collator/src/lib.rs +++ b/parachain/test-parachains/adder/collator/src/lib.rs @@ -272,7 +272,7 @@ mod tests { } fn validate_collation(collator: &Collator, parent_head: HeadData, collation: Collation) { - use polkadot_node_core_pvf_worker::testing::validate_candidate; + use polkadot_node_core_pvf::testing::validate_candidate; let block_data = match collation.proof_of_validity { MaybeCompressedPoV::Raw(pov) => pov.block_data, diff --git a/parachain/test-parachains/halt/build.rs b/parachain/test-parachains/halt/build.rs index aabf553cedd9..cb3370c150d4 100644 --- a/parachain/test-parachains/halt/build.rs +++ b/parachain/test-parachains/halt/build.rs @@ -17,7 +17,11 @@ use substrate_wasm_builder::WasmBuilder; fn main() { - WasmBuilder::new().with_current_project().export_heap_base().build(); + WasmBuilder::new() + .with_current_project() + .export_heap_base() + .disable_runtime_version_section_check() + .build(); enable_alloc_error_handler(); } diff --git a/parachain/test-parachains/undying/build.rs b/parachain/test-parachains/undying/build.rs index d6377f3d5a5a..6bb34ead1b86 100644 --- a/parachain/test-parachains/undying/build.rs +++ b/parachain/test-parachains/undying/build.rs @@ -17,5 +17,9 @@ use substrate_wasm_builder::WasmBuilder; fn main() { - WasmBuilder::new().with_current_project().export_heap_base().build() + WasmBuilder::new() + .with_current_project() + .export_heap_base() + .disable_runtime_version_section_check() + .build() } diff --git a/parachain/test-parachains/undying/collator/Cargo.toml b/parachain/test-parachains/undying/collator/Cargo.toml index 1b2ccf3be0ca..2b9d80401f5d 100644 --- a/parachain/test-parachains/undying/collator/Cargo.toml +++ b/parachain/test-parachains/undying/collator/Cargo.toml @@ -34,7 +34,7 @@ sc-service = { git = "https://github.com/paritytech/substrate", branch = "master # This one is tricky. Even though it is not used directly by the collator, we still need it for the # `puppet_worker` binary, which is required for the integration test. However, this shouldn't be # a big problem since it is used transitively anyway. -polkadot-node-core-pvf-worker = { path = "../../../../node/core/pvf/worker" } +polkadot-node-core-pvf = { path = "../../../../node/core/pvf" } [dev-dependencies] polkadot-parachain = { path = "../../.." } diff --git a/parachain/test-parachains/undying/collator/bin/puppet_worker.rs b/parachain/test-parachains/undying/collator/bin/puppet_worker.rs index ddd81971292b..7f93519d8454 100644 --- a/parachain/test-parachains/undying/collator/bin/puppet_worker.rs +++ b/parachain/test-parachains/undying/collator/bin/puppet_worker.rs @@ -14,4 +14,4 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -polkadot_node_core_pvf_worker::decl_puppet_worker_main!(); +polkadot_node_core_pvf::decl_puppet_worker_main!(); diff --git a/parachain/test-parachains/undying/collator/src/lib.rs b/parachain/test-parachains/undying/collator/src/lib.rs index dcaf9b63296d..838590fa16f5 100644 --- a/parachain/test-parachains/undying/collator/src/lib.rs +++ b/parachain/test-parachains/undying/collator/src/lib.rs @@ -354,7 +354,7 @@ mod tests { } fn validate_collation(collator: &Collator, parent_head: HeadData, collation: Collation) { - use polkadot_node_core_pvf_worker::testing::validate_candidate; + use polkadot_node_core_pvf::testing::validate_candidate; let block_data = match collation.proof_of_validity { MaybeCompressedPoV::Raw(pov) => pov.block_data, diff --git a/primitives/Cargo.toml b/primitives/Cargo.toml index 6ce968230724..4274ac9175d7 100644 --- a/primitives/Cargo.toml +++ b/primitives/Cargo.toml @@ -9,7 +9,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } hex-literal = "0.4.1" parity-scale-codec = { version = "3.4.0", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["bit-vec", "derive"] } -serde = { version = "1.0.137", optional = true, features = ["derive"] } +serde = { version = "1.0.163", optional = true, features = ["derive"] } application-crypto = { package = "sp-application-crypto", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/primitives/src/runtime_api.rs b/primitives/src/runtime_api.rs index 4e15e1fb9338..67714476560c 100644 --- a/primitives/src/runtime_api.rs +++ b/primitives/src/runtime_api.rs @@ -219,6 +219,24 @@ sp_api::decl_runtime_apis! { /// Returns execution parameters for the session. fn session_executor_params(session_index: SessionIndex) -> Option; + /// Returns a list of validators that lost a past session dispute and need to be slashed. + #[api_version(5)] + fn unapplied_slashes() -> Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>; + + /// Returns a merkle proof of a validator session key. + #[api_version(5)] + fn key_ownership_proof( + validator_id: ValidatorId, + ) -> Option; + + /// Submit an unsigned extrinsic to slash validators who lost a dispute about + /// a candidate of a past session. + #[api_version(5)] + fn submit_report_dispute_lost( + dispute_proof: vstaging::slashing::DisputeProof, + key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + ) -> Option<()>; + /***** Asynchronous backing *****/ /// Returns the state of parachain backing for a given para. diff --git a/primitives/src/vstaging/mod.rs b/primitives/src/vstaging/mod.rs index 48d9f2a784f3..c0910a8ebebd 100644 --- a/primitives/src/vstaging/mod.rs +++ b/primitives/src/vstaging/mod.rs @@ -18,6 +18,7 @@ // Put any primitives used by staging APIs functions here pub use crate::v4::*; +pub mod slashing; use sp_std::prelude::*; use parity_scale_codec::{Decode, Encode}; diff --git a/primitives/src/vstaging/slashing.rs b/primitives/src/vstaging/slashing.rs new file mode 100644 index 000000000000..c5782c7c2ab4 --- /dev/null +++ b/primitives/src/vstaging/slashing.rs @@ -0,0 +1,99 @@ +// Copyright 2017-2023 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Primitives types used for dispute slashing. + +use crate::v4::{CandidateHash, SessionIndex, ValidatorId, ValidatorIndex}; +use parity_scale_codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; + +/// The kind of the dispute offence. +#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, TypeInfo, Debug)] +pub enum SlashingOffenceKind { + /// A severe offence when a validator backed an invalid block. + #[codec(index = 0)] + ForInvalid, + /// A minor offence when a validator disputed a valid block. + #[codec(index = 1)] + AgainstValid, +} + +/// Timeslots should uniquely identify offences and are used for the offence +/// deduplication. +#[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Encode, Decode, TypeInfo, Debug)] +pub struct DisputesTimeSlot { + // The order of the fields matters for `derive(Ord)`. + /// Session index when the candidate was backed/included. + pub session_index: SessionIndex, + /// Candidate hash of the disputed candidate. + pub candidate_hash: CandidateHash, +} + +impl DisputesTimeSlot { + /// Create a new instance of `Self`. + pub fn new(session_index: SessionIndex, candidate_hash: CandidateHash) -> Self { + Self { session_index, candidate_hash } + } +} + +/// We store most of the information about a lost dispute on chain. This struct +/// is required to identify and verify it. +#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug)] +pub struct DisputeProof { + /// Time slot when the dispute occured. + pub time_slot: DisputesTimeSlot, + /// The dispute outcome. + pub kind: SlashingOffenceKind, + /// The index of the validator who lost a dispute. + pub validator_index: ValidatorIndex, + /// The parachain session key of the validator. + pub validator_id: ValidatorId, +} + +/// Slashes that are waiting to be applied once we have validator key +/// identification. +#[derive(Encode, Decode, TypeInfo, Debug, Clone)] +pub struct PendingSlashes { + /// Indices and keys of the validators who lost a dispute and are pending + /// slashes. + pub keys: BTreeMap, + /// The dispute outcome. + pub kind: SlashingOffenceKind, +} + +// TODO: can we reuse this type between BABE, GRANDPA and disputes? +/// An opaque type used to represent the key ownership proof at the runtime API +/// boundary. The inner value is an encoded representation of the actual key +/// ownership proof which will be parameterized when defining the runtime. At +/// the runtime API boundary this type is unknown and as such we keep this +/// opaque representation, implementors of the runtime API will have to make +/// sure that all usages of `OpaqueKeyOwnershipProof` refer to the same type. +#[derive(Decode, Encode, PartialEq, Eq, Debug, Clone, TypeInfo)] +pub struct OpaqueKeyOwnershipProof(Vec); +impl OpaqueKeyOwnershipProof { + /// Create a new `OpaqueKeyOwnershipProof` using the given encoded + /// representation. + pub fn new(inner: Vec) -> OpaqueKeyOwnershipProof { + OpaqueKeyOwnershipProof(inner) + } + + /// Try to decode this `OpaqueKeyOwnershipProof` into the given concrete key + /// ownership proof type. + pub fn decode(self) -> Option { + Decode::decode(&mut &self.0[..]).ok() + } +} diff --git a/runtime/common/Cargo.toml b/runtime/common/Cargo.toml index 517b600eeea6..64a19394918b 100644 --- a/runtime/common/Cargo.toml +++ b/runtime/common/Cargo.toml @@ -11,7 +11,7 @@ parity-scale-codec = { version = "3.4.0", default-features = false, features = [ log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.139", default-features = false } +serde = { version = "1.0.163", default-features = false } serde_derive = { version = "1.0.117", optional = true } static_assertions = "1.1.0" diff --git a/runtime/common/src/claims.rs b/runtime/common/src/claims.rs index b598d2ea0ef0..fe6b54535396 100644 --- a/runtime/common/src/claims.rs +++ b/runtime/common/src/claims.rs @@ -20,6 +20,7 @@ use frame_support::{ ensure, traits::{Currency, Get, IsSubType, VestingSchedule}, weights::Weight, + DefaultNoBound, }; pub use pallet::*; use parity_scale_codec::{Decode, Encode}; @@ -28,10 +29,8 @@ use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{self, Deserialize, Deserializer, Serialize, Serializer}; use sp_io::{crypto::secp256k1_ecdsa_recover, hashing::keccak_256}; -#[cfg(feature = "std")] -use sp_runtime::traits::Zero; use sp_runtime::{ - traits::{CheckedSub, DispatchInfoOf, SignedExtension}, + traits::{CheckedSub, DispatchInfoOf, SignedExtension, Zero}, transaction_validity::{ InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, @@ -229,19 +228,13 @@ pub mod pallet { pub(super) type Preclaims = StorageMap<_, Identity, T::AccountId, EthereumAddress>; #[pallet::genesis_config] + #[derive(DefaultNoBound)] pub struct GenesisConfig { pub claims: Vec<(EthereumAddress, BalanceOf, Option, Option)>, pub vesting: Vec<(EthereumAddress, (BalanceOf, BalanceOf, T::BlockNumber))>, } - #[cfg(feature = "std")] - impl Default for GenesisConfig { - fn default() -> Self { - GenesisConfig { claims: Default::default(), vesting: Default::default() } - } - } - #[pallet::genesis_build] impl GenesisBuild for GenesisConfig { fn build(&self) { diff --git a/runtime/common/src/paras_registrar.rs b/runtime/common/src/paras_registrar.rs index 8653e6f19123..729a3d6f90af 100644 --- a/runtime/common/src/paras_registrar.rs +++ b/runtime/common/src/paras_registrar.rs @@ -195,7 +195,6 @@ pub mod pallet { pub next_free_para_id: ParaId, } - #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { GenesisConfig { next_free_para_id: LOWEST_PUBLIC_ID } diff --git a/runtime/kusama/Cargo.toml b/runtime/kusama/Cargo.toml index acb58ee96513..b317cc9e88da 100644 --- a/runtime/kusama/Cargo.toml +++ b/runtime/kusama/Cargo.toml @@ -11,7 +11,7 @@ parity-scale-codec = { version = "3.4.0", default-features = false, features = [ scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } -serde = { version = "1.0.139", default-features = false } +serde = { version = "1.0.163", default-features = false } serde_derive = { version = "1.0.117", optional = true } static_assertions = "1.1.0" smallvec = "1.8.0" diff --git a/runtime/kusama/src/weights/runtime_parachains_configuration.rs b/runtime/kusama/src/weights/runtime_parachains_configuration.rs index 109cbcce1a8d..8fa5b6e33393 100644 --- a/runtime/kusama/src/weights/runtime_parachains_configuration.rs +++ b/runtime/kusama/src/weights/runtime_parachains_configuration.rs @@ -17,24 +17,26 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-05-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm6`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=kusama-dev // --steps=50 // --repeat=20 -// --pallet=runtime_parachains::configuration // --extrinsic=* // --execution=wasm // --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_parachains::configuration +// --chain=kusama-dev // --header=./file_header.txt -// --output=./runtime/kusama/src/weights/runtime_parachains_configuration.rs +// --output=./runtime/kusama/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -55,11 +57,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) fn set_config_with_block_number() -> Weight { // Proof Size summary in bytes: - // Measured: `90` - // Estimated: `1575` - // Minimum execution time: 9_482_000 picoseconds. - Weight::from_parts(9_845_000, 0) - .saturating_add(Weight::from_parts(0, 1575)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_471_000 picoseconds. + Weight::from_parts(9_872_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -71,11 +73,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `90` - // Estimated: `1575` - // Minimum execution time: 9_689_000 picoseconds. - Weight::from_parts(9_960_000, 0) - .saturating_add(Weight::from_parts(0, 1575)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_555_000 picoseconds. + Weight::from_parts(9_800_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -87,27 +89,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) fn set_config_with_option_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `90` - // Estimated: `1575` - // Minimum execution time: 9_523_000 picoseconds. - Weight::from_parts(9_736_000, 0) - .saturating_add(Weight::from_parts(0, 1575)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - fn set_config_with_weight() -> Weight { - // Proof Size summary in bytes: - // Measured: `90` - // Estimated: `1575` - // Minimum execution time: 9_443_000 picoseconds. - Weight::from_parts(9_679_000, 0) - .saturating_add(Weight::from_parts(0, 1575)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_623_000 picoseconds. + Weight::from_parts(9_832_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -129,11 +115,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) fn set_config_with_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `90` - // Estimated: `1575` - // Minimum execution time: 9_460_000 picoseconds. - Weight::from_parts(9_716_000, 0) - .saturating_add(Weight::from_parts(0, 1575)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_482_000 picoseconds. + Weight::from_parts(9_797_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -145,11 +131,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) fn set_config_with_executor_params() -> Weight { // Proof Size summary in bytes: - // Measured: `90` - // Estimated: `1575` - // Minimum execution time: 10_279_000 picoseconds. - Weight::from_parts(10_615_000, 0) - .saturating_add(Weight::from_parts(0, 1575)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 10_753_000 picoseconds. + Weight::from_parts(11_009_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/runtime/parachains/Cargo.toml b/runtime/parachains/Cargo.toml index e1b5b5b1de59..31e9bdfc89dc 100644 --- a/runtime/parachains/Cargo.toml +++ b/runtime/parachains/Cargo.toml @@ -10,7 +10,7 @@ parity-scale-codec = { version = "3.4.0", default-features = false, features = [ log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.139", features = [ "derive" ], optional = true } +serde = { version = "1.0.163", features = [ "derive" ], optional = true } derive_more = "0.99.17" bitflags = "1.3.2" diff --git a/runtime/parachains/src/configuration.rs b/runtime/parachains/src/configuration.rs index 62efc095334d..ff41acddbf7f 100644 --- a/runtime/parachains/src/configuration.rs +++ b/runtime/parachains/src/configuration.rs @@ -19,7 +19,7 @@ //! Configuration can change only at session boundaries and is buffered until then. use crate::{inclusion::MAX_UPWARD_MESSAGE_SIZE_BOUND, shared}; -use frame_support::pallet_prelude::*; +use frame_support::{pallet_prelude::*, DefaultNoBound}; use frame_system::pallet_prelude::*; use parity_scale_codec::{Decode, Encode}; use polkadot_parachain::primitives::{MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM}; @@ -440,7 +440,6 @@ pub trait WeightInfo { fn set_config_with_block_number() -> Weight; fn set_config_with_u32() -> Weight; fn set_config_with_option_u32() -> Weight; - fn set_config_with_weight() -> Weight; fn set_config_with_balance() -> Weight; fn set_hrmp_open_request_ttl() -> Weight; fn set_config_with_executor_params() -> Weight; @@ -457,9 +456,6 @@ impl WeightInfo for TestWeightInfo { fn set_config_with_option_u32() -> Weight { Weight::MAX } - fn set_config_with_weight() -> Weight { - Weight::MAX - } fn set_config_with_balance() -> Weight { Weight::MAX } @@ -527,17 +523,11 @@ pub mod pallet { pub(crate) type BypassConsistencyCheck = StorageValue<_, bool, ValueQuery>; #[pallet::genesis_config] + #[derive(DefaultNoBound)] pub struct GenesisConfig { pub config: HostConfiguration, } - #[cfg(feature = "std")] - impl Default for GenesisConfig { - fn default() -> Self { - GenesisConfig { config: Default::default() } - } - } - #[pallet::genesis_build] impl GenesisBuild for GenesisConfig { fn build(&self) { diff --git a/runtime/parachains/src/disputes/slashing.rs b/runtime/parachains/src/disputes/slashing.rs index 58b452d6db07..daf10814df0f 100644 --- a/runtime/parachains/src/disputes/slashing.rs +++ b/runtime/parachains/src/disputes/slashing.rs @@ -49,8 +49,10 @@ use frame_support::{ weights::Weight, }; -use parity_scale_codec::{Decode, Encode}; -use primitives::{CandidateHash, SessionIndex, ValidatorId, ValidatorIndex}; +use primitives::{ + vstaging::slashing::{DisputeProof, DisputesTimeSlot, PendingSlashes, SlashingOffenceKind}, + CandidateHash, SessionIndex, ValidatorId, ValidatorIndex, +}; use scale_info::TypeInfo; use sp_runtime::{ traits::Convert, @@ -58,15 +60,12 @@ use sp_runtime::{ InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, TransactionValidityError, ValidTransaction, }, - KeyTypeId, Perbill, RuntimeDebug, + KeyTypeId, Perbill, }; use sp_session::{GetSessionNumber, GetValidatorCount}; use sp_staking::offence::{DisableStrategy, Kind, Offence, OffenceError, ReportOffence}; use sp_std::{ - collections::{ - btree_map::{BTreeMap, Entry}, - btree_set::BTreeSet, - }, + collections::{btree_map::Entry, btree_set::BTreeSet}, prelude::*, }; @@ -92,23 +91,8 @@ impl BenchmarkingConfiguration for BenchConfig { const MAX_VALIDATORS: u32 = M; } -/// Timeslots should uniquely identify offences and are used for the offence -/// deduplication. -#[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] -pub struct DisputesTimeSlot { - // The order of these matters for `derive(Ord)`. - session_index: SessionIndex, - candidate_hash: CandidateHash, -} - -impl DisputesTimeSlot { - pub fn new(session_index: SessionIndex, candidate_hash: CandidateHash) -> Self { - Self { session_index, candidate_hash } - } -} - /// An offence that is filed when a series of validators lost a dispute. -#[derive(RuntimeDebug, TypeInfo)] +#[derive(TypeInfo)] #[cfg_attr(feature = "std", derive(Clone, PartialEq, Eq))] pub struct SlashingOffence { /// The size of the validator set in that session. @@ -323,39 +307,6 @@ where } } -#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, TypeInfo)] -pub enum SlashingOffenceKind { - #[codec(index = 0)] - ForInvalid, - #[codec(index = 1)] - AgainstValid, -} - -/// We store most of the information about a lost dispute on chain. This struct -/// is required to identify and verify it. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] -pub struct DisputeProof { - /// Time slot when the dispute occured. - pub time_slot: DisputesTimeSlot, - /// The dispute outcome. - pub kind: SlashingOffenceKind, - /// The index of the validator who lost a dispute. - pub validator_index: ValidatorIndex, - /// The parachain session key of the validator. - pub validator_id: ValidatorId, -} - -/// Slashes that are waiting to be applied once we have validator key -/// identification. -#[derive(Encode, Decode, RuntimeDebug, TypeInfo)] -pub struct PendingSlashes { - /// Indices and keys of the validators who lost a dispute and are pending - /// slashes. - pub keys: BTreeMap, - /// The dispute outcome. - pub kind: SlashingOffenceKind, -} - /// A trait that defines methods to report an offence (after the slashing report /// has been validated) and for submitting a transaction to report a slash (from /// an offchain context). @@ -603,6 +554,17 @@ impl Pallet { let old_session = session_index - config.dispute_period - 1; let _ = >::clear_prefix(old_session, REMOVE_LIMIT, None); } + + pub(crate) fn unapplied_slashes() -> Vec<(SessionIndex, CandidateHash, PendingSlashes)> { + >::iter().collect() + } + + pub(crate) fn submit_unsigned_slashing_report( + dispute_proof: DisputeProof, + key_ownership_proof: ::KeyOwnerProof, + ) -> Option<()> { + T::HandleReports::submit_unsigned_slashing_report(dispute_proof, key_ownership_proof).ok() + } } /// Methods for the `ValidateUnsigned` implementation: diff --git a/runtime/parachains/src/disputes/slashing/benchmarking.rs b/runtime/parachains/src/disputes/slashing/benchmarking.rs index d7f2eeed1ac4..4debc41d3306 100644 --- a/runtime/parachains/src/disputes/slashing/benchmarking.rs +++ b/runtime/parachains/src/disputes/slashing/benchmarking.rs @@ -21,6 +21,7 @@ use frame_benchmarking::{benchmarks, whitelist_account}; use frame_support::traits::{OnFinalize, OnInitialize}; use frame_system::RawOrigin; use pallet_staking::testing_utils::create_validators; +use parity_scale_codec::Decode; use primitives::{Hash, PARACHAIN_KEY_TYPE_ID}; use sp_runtime::traits::{One, StaticLookup}; use sp_session::MembershipProof; diff --git a/runtime/parachains/src/hrmp.rs b/runtime/parachains/src/hrmp.rs index 12ef3a6384e9..77b20ce171fd 100644 --- a/runtime/parachains/src/hrmp.rs +++ b/runtime/parachains/src/hrmp.rs @@ -18,7 +18,7 @@ use crate::{ configuration::{self, HostConfiguration}, dmp, ensure_parachain, initializer, paras, }; -use frame_support::{pallet_prelude::*, traits::ReservableCurrency}; +use frame_support::{pallet_prelude::*, traits::ReservableCurrency, DefaultNoBound}; use frame_system::pallet_prelude::*; use parity_scale_codec::{Decode, Encode}; use polkadot_parachain::primitives::HorizontalMessages; @@ -435,17 +435,11 @@ pub mod pallet { /// configuration pallet. /// 2. `sender` and `recipient` must be valid paras. #[pallet::genesis_config] + #[derive(DefaultNoBound)] pub struct GenesisConfig { preopen_hrmp_channels: Vec<(ParaId, ParaId, u32, u32)>, } - #[cfg(feature = "std")] - impl Default for GenesisConfig { - fn default() -> Self { - GenesisConfig { preopen_hrmp_channels: Default::default() } - } - } - #[pallet::genesis_build] impl GenesisBuild for GenesisConfig { fn build(&self) { @@ -621,7 +615,6 @@ pub mod pallet { } } -#[cfg(feature = "std")] fn initialize_storage(preopen_hrmp_channels: &[(ParaId, ParaId, u32, u32)]) { let host_config = configuration::Pallet::::config(); for &(sender, recipient, max_capacity, max_message_size) in preopen_hrmp_channels { @@ -634,7 +627,6 @@ fn initialize_storage(preopen_hrmp_channels: &[(ParaId, ParaId, u32, >::process_hrmp_open_channel_requests(&host_config); } -#[cfg(feature = "std")] fn preopen_hrmp_channel( sender: ParaId, recipient: ParaId, diff --git a/runtime/parachains/src/paras/mod.rs b/runtime/parachains/src/paras/mod.rs index 26d94f4a908c..309f15537eb9 100644 --- a/runtime/parachains/src/paras/mod.rs +++ b/runtime/parachains/src/paras/mod.rs @@ -115,7 +115,7 @@ use crate::{ shared, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; -use frame_support::{pallet_prelude::*, traits::EstimateNextSessionRotation}; +use frame_support::{pallet_prelude::*, traits::EstimateNextSessionRotation, DefaultNoBound}; use frame_system::pallet_prelude::*; use parity_scale_codec::{Decode, Encode}; use primitives::{ @@ -784,17 +784,11 @@ pub mod pallet { StorageMap<_, Identity, ValidationCodeHash, ValidationCode>; #[pallet::genesis_config] + #[derive(DefaultNoBound)] pub struct GenesisConfig { pub paras: Vec<(ParaId, ParaGenesisArgs)>, } - #[cfg(feature = "std")] - impl Default for GenesisConfig { - fn default() -> Self { - GenesisConfig { paras: Default::default() } - } - } - #[pallet::genesis_build] impl GenesisBuild for GenesisConfig { fn build(&self) { diff --git a/runtime/parachains/src/runtime_api_impl/vstaging.rs b/runtime/parachains/src/runtime_api_impl/vstaging.rs index 8548f4dff5c2..3e9e6c8f02c7 100644 --- a/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -16,16 +16,41 @@ //! Put implementations of functions from staging APIs here. -use crate::{configuration, dmp, hrmp, inclusion, initializer, paras, shared}; +use crate::{configuration, dmp, hrmp, inclusion, initializer, paras, shared, disputes}; use primitives::{ vstaging::{ AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints, InboundHrmpLimitations, OutboundHrmpChannelLimitations, }, - Id as ParaId, + Id as ParaId, vstaging, CandidateHash, DisputeState, SessionIndex, }; use sp_std::prelude::*; +/// Implementation for `get_session_disputes` function from the runtime API +pub fn get_session_disputes( +) -> Vec<(SessionIndex, CandidateHash, DisputeState)> { + >::disputes() +} + +/// Implementation of `unapplied_slashes` runtime API +pub fn unapplied_slashes( +) -> Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)> { + >::unapplied_slashes() +} + +/// Implementation of `submit_report_dispute_lost` runtime API +pub fn submit_unsigned_slashing_report( + dispute_proof: vstaging::slashing::DisputeProof, + key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, +) -> Option<()> { + let key_ownership_proof = key_ownership_proof.decode()?; + + >::submit_unsigned_slashing_report( + dispute_proof, + key_ownership_proof, + ) +} + /// Implementation for `StagingParaBackingState` function from the runtime API pub fn backing_state( para_id: ParaId, diff --git a/runtime/polkadot/Cargo.toml b/runtime/polkadot/Cargo.toml index 5e59c22c984d..e287ab3646ee 100644 --- a/runtime/polkadot/Cargo.toml +++ b/runtime/polkadot/Cargo.toml @@ -11,7 +11,7 @@ parity-scale-codec = { version = "3.4.0", default-features = false, features = [ scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } -serde = { version = "1.0.139", default-features = false } +serde = { version = "1.0.163", default-features = false } serde_derive = { version = "1.0.117", optional = true } static_assertions = "1.1.0" smallvec = "1.8.0" diff --git a/runtime/polkadot/src/weights/runtime_parachains_configuration.rs b/runtime/polkadot/src/weights/runtime_parachains_configuration.rs index f1ae7bbad4e0..292b8916dad0 100644 --- a/runtime/polkadot/src/weights/runtime_parachains_configuration.rs +++ b/runtime/polkadot/src/weights/runtime_parachains_configuration.rs @@ -101,24 +101,7 @@ impl runtime_parachains::configuration::WeightInfo for .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - fn set_config_with_weight() -> Weight { - // Proof Size summary in bytes: - // Measured: `393` - // Estimated: `1878` - // Minimum execution time: 12_999_000 picoseconds. - Weight::from_parts(13_465_000, 0) - .saturating_add(Weight::from_parts(0, 1878)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } + /// Storage: Benchmark Override (r:0 w:0) /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) fn set_hrmp_open_request_ttl() -> Weight { diff --git a/runtime/rococo/Cargo.toml b/runtime/rococo/Cargo.toml index a6d00060d939..3b39756e3b40 100644 --- a/runtime/rococo/Cargo.toml +++ b/runtime/rococo/Cargo.toml @@ -9,7 +9,7 @@ edition.workspace = true parity-scale-codec = { version = "3.4.0", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -serde = { version = "1.0.139", default-features = false } +serde = { version = "1.0.163", default-features = false } serde_derive = { version = "1.0.117", optional = true } static_assertions = "1.1.0" smallvec = "1.8.0" diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 03567318e226..77060d96e0ab 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -23,11 +23,11 @@ use pallet_nis::WithMaximumOf; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, + vstaging, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionInfo, Signature, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, }; use runtime_common::{ assigned_slots, auctions, claims, crowdloan, impl_runtime_weights, impls::ToAuthor, @@ -1909,6 +1909,31 @@ sp_api::impl_runtime_apis! { parachains_runtime_api_impl::get_session_disputes::() } + fn unapplied_slashes( + ) -> Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)> { + runtime_parachains::runtime_api_impl::vstaging::unapplied_slashes::() + } + + fn key_ownership_proof( + validator_id: ValidatorId, + ) -> Option { + use parity_scale_codec::Encode; + + Historical::prove((PARACHAIN_KEY_TYPE_ID, validator_id)) + .map(|p| p.encode()) + .map(vstaging::slashing::OpaqueKeyOwnershipProof::new) + } + + fn submit_report_dispute_lost( + dispute_proof: vstaging::slashing::DisputeProof, + key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + ) -> Option<()> { + runtime_parachains::runtime_api_impl::vstaging::submit_unsigned_slashing_report::( + dispute_proof, + key_ownership_proof, + ) + } + fn staging_para_backing_state(para_id: ParaId) -> Option { runtime_parachains::runtime_api_impl::vstaging::backing_state::(para_id) } diff --git a/runtime/rococo/src/weights/runtime_parachains_configuration.rs b/runtime/rococo/src/weights/runtime_parachains_configuration.rs index fcc8ed4840c8..11effbc4e63e 100644 --- a/runtime/rococo/src/weights/runtime_parachains_configuration.rs +++ b/runtime/rococo/src/weights/runtime_parachains_configuration.rs @@ -101,24 +101,7 @@ impl runtime_parachains::configuration::WeightInfo for .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - fn set_config_with_weight() -> Weight { - // Proof Size summary in bytes: - // Measured: `397` - // Estimated: `1882` - // Minimum execution time: 12_914_000 picoseconds. - Weight::from_parts(13_395_000, 0) - .saturating_add(Weight::from_parts(0, 1882)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } + /// Storage: Benchmark Override (r:0 w:0) /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) fn set_hrmp_open_request_ttl() -> Weight { diff --git a/runtime/test-runtime/Cargo.toml b/runtime/test-runtime/Cargo.toml index b5b2d2289437..73bd3ab3688d 100644 --- a/runtime/test-runtime/Cargo.toml +++ b/runtime/test-runtime/Cargo.toml @@ -11,7 +11,7 @@ parity-scale-codec = { version = "3.4.0", default-features = false, features = [ log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.139", default-features = false } +serde = { version = "1.0.163", default-features = false } serde_derive = { version = "1.0.117", optional = true } smallvec = "1.8.0" diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index 767f8bda68d7..a361a2a6ec5c 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -546,6 +546,7 @@ pub mod pallet_test_notifier { use pallet_xcm::ensure_response; use sp_runtime::DispatchResult; use xcm::latest::prelude::*; + use xcm_executor::traits::QueryHandler as XcmQueryHandler; #[pallet::pallet] pub struct Pallet(_); @@ -581,7 +582,7 @@ pub mod pallet_test_notifier { let id = who .using_encoded(|mut d| <[u8; 32]>::decode(&mut d)) .map_err(|_| Error::::BadAccountFormat)?; - let qid = pallet_xcm::Pallet::::new_query( + let qid = as XcmQueryHandler>::new_query( Junction::AccountId32 { network: None, id }, 100u32.into(), Here, diff --git a/runtime/westend/Cargo.toml b/runtime/westend/Cargo.toml index 08a8ca23a55a..bcc406a7c2a2 100644 --- a/runtime/westend/Cargo.toml +++ b/runtime/westend/Cargo.toml @@ -11,7 +11,7 @@ parity-scale-codec = { version = "3.4.0", default-features = false, features = [ scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } -serde = { version = "1.0.139", default-features = false } +serde = { version = "1.0.163", default-features = false } serde_derive = { version = "1.0.117", optional = true } smallvec = "1.8.0" diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index 9101fd5a8e38..0cf7d2a1737d 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -39,12 +39,12 @@ use pallet_session::historical as session_historical; use pallet_transaction_payment::{CurrencyAdapter, FeeDetails, RuntimeDispatchInfo}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, + vstaging, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + ValidatorSignature, PARACHAIN_KEY_TYPE_ID, }; use runtime_common::{ assigned_slots, auctions, crowdloan, elections::OnChainAccuracy, impl_runtime_weights, @@ -58,7 +58,9 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl::v4 as parachains_runtime_api_impl, + runtime_api_impl::{ + v4 as parachains_runtime_api_impl, vstaging as parachains_runtime_api_impl_staging, + }, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -1581,6 +1583,31 @@ sp_api::impl_runtime_apis! { parachains_runtime_api_impl::get_session_disputes::() } + fn unapplied_slashes( + ) -> Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)> { + parachains_runtime_api_impl_staging::unapplied_slashes::() + } + + fn key_ownership_proof( + validator_id: ValidatorId, + ) -> Option { + use parity_scale_codec::Encode; + + Historical::prove((PARACHAIN_KEY_TYPE_ID, validator_id)) + .map(|p| p.encode()) + .map(vstaging::slashing::OpaqueKeyOwnershipProof::new) + } + + fn submit_report_dispute_lost( + dispute_proof: vstaging::slashing::DisputeProof, + key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + ) -> Option<()> { + parachains_runtime_api_impl_staging::submit_unsigned_slashing_report::( + dispute_proof, + key_ownership_proof, + ) + } + fn staging_para_backing_state(para_id: ParaId) -> Option { runtime_parachains::runtime_api_impl::vstaging::backing_state::(para_id) } diff --git a/runtime/westend/src/weights/runtime_parachains_configuration.rs b/runtime/westend/src/weights/runtime_parachains_configuration.rs index aaa7536bdd3d..ec769bb2fa15 100644 --- a/runtime/westend/src/weights/runtime_parachains_configuration.rs +++ b/runtime/westend/src/weights/runtime_parachains_configuration.rs @@ -95,22 +95,7 @@ impl runtime_parachains::configuration::WeightInfo for .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - fn set_config_with_weight() -> Weight { - // Proof Size summary in bytes: - // Measured: `90` - // Estimated: `1575` - // Minimum execution time: 10_293_000 picoseconds. - Weight::from_parts(10_619_000, 0) - .saturating_add(Weight::from_parts(0, 1575)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - } + /// Storage: Benchmark Override (r:0 w:0) /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) fn set_hrmp_open_request_ttl() -> Weight { diff --git a/utils/staking-miner/Cargo.toml b/utils/staking-miner/Cargo.toml index 081b4648416f..1f2d0c1c8acf 100644 --- a/utils/staking-miner/Cargo.toml +++ b/utils/staking-miner/Cargo.toml @@ -15,7 +15,7 @@ tracing-subscriber = { version = "0.3.11", features = ["env-filter"] } jsonrpsee = { version = "0.16.2", features = ["ws-client", "macros"] } log = "0.4.17" paste = "1.0.7" -serde = "1.0.137" +serde = "1.0.163" serde_json = "1.0" thiserror = "1.0.31" tokio = { version = "1.24.2", features = ["macros", "rt-multi-thread", "sync"] } diff --git a/xcm/Cargo.toml b/xcm/Cargo.toml index e2a123a16680..ce3ec331b151 100644 --- a/xcm/Cargo.toml +++ b/xcm/Cargo.toml @@ -6,14 +6,14 @@ authors.workspace = true edition.workspace = true [dependencies] -bounded-collections = { version = "0.1.5", default-features = false } +bounded-collections = { version = "0.1.7", default-features = false } derivative = { version = "2.2.0", default-features = false, features = [ "use_core" ] } impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } parity-scale-codec = { version = "3.4.0", default-features = false, features = [ "derive", "max-encoded-len" ] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-weights = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -serde = { version = "1.0.136", optional = true, features = ["derive"] } +serde = { version = "1.0.163", optional = true, features = ["derive"] } xcm-procedural = { path = "procedural" } [dev-dependencies] diff --git a/xcm/pallet-xcm/Cargo.toml b/xcm/pallet-xcm/Cargo.toml index dd58b0bf903a..9ebd11228b21 100644 --- a/xcm/pallet-xcm/Cargo.toml +++ b/xcm/pallet-xcm/Cargo.toml @@ -6,10 +6,10 @@ version.workspace = true [dependencies] -bounded-collections = { version = "0.1.5", default-features = false } +bounded-collections = { version = "0.1.7", default-features = false } codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.137", optional = true, features = ["derive"] } +serde = { version = "1.0.163", optional = true, features = ["derive"] } log = { version = "0.4.17", default-features = false } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } diff --git a/xcm/pallet-xcm/src/lib.rs b/xcm/pallet-xcm/src/lib.rs index 041e76b2f69f..61243c7d682b 100644 --- a/xcm/pallet-xcm/src/lib.rs +++ b/xcm/pallet-xcm/src/lib.rs @@ -52,8 +52,8 @@ use frame_system::pallet_prelude::*; pub use pallet::*; use xcm_executor::{ traits::{ - CheckSuspension, ClaimAssets, DropAssets, MatchesFungible, OnResponse, - VersionChangeNotifier, WeightBounds, + CheckSuspension, ClaimAssets, DropAssets, MatchesFungible, OnResponse, QueryHandler, + QueryResponseStatus, VersionChangeNotifier, WeightBounds, }, Assets, }; @@ -638,7 +638,6 @@ pub mod pallet { pub safe_xcm_version: Option, } - #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { Self { safe_xcm_version: Some(XCM_VERSION) } @@ -1127,6 +1126,66 @@ pub mod pallet { /// The maximum number of distinct assets allowed to be transferred in a single helper extrinsic. const MAX_ASSETS_FOR_TRANSFER: usize = 2; +impl QueryHandler for Pallet { + type QueryId = u64; + type BlockNumber = T::BlockNumber; + type Error = XcmError; + type UniversalLocation = T::UniversalLocation; + + /// Attempt to create a new query ID and register it as a query that is yet to respond. + fn new_query( + responder: impl Into, + timeout: T::BlockNumber, + match_querier: impl Into, + ) -> Self::QueryId { + Self::do_new_query(responder, None, timeout, match_querier).into() + } + + /// To check the status of the query, use `fn query()` passing the resultant `QueryId` + /// value. + fn report_outcome( + message: &mut Xcm<()>, + responder: impl Into, + timeout: Self::BlockNumber, + ) -> Result { + let responder = responder.into(); + let destination = Self::UniversalLocation::get() + .invert_target(&responder) + .map_err(|()| XcmError::LocationNotInvertible)?; + let query_id = Self::new_query(responder, timeout, Here); + let response_info = QueryResponseInfo { destination, query_id, max_weight: Weight::zero() }; + let report_error = Xcm(vec![ReportError(response_info)]); + message.0.insert(0, SetAppendix(report_error)); + Ok(query_id) + } + + /// Removes response when ready and emits [Event::ResponseTaken] event. + fn take_response(query_id: Self::QueryId) -> QueryResponseStatus { + match Queries::::get(query_id) { + Some(QueryStatus::Ready { response, at }) => match response.try_into() { + Ok(response) => { + Queries::::remove(query_id); + Self::deposit_event(Event::ResponseTaken { query_id }); + QueryResponseStatus::Ready { response, at } + }, + Err(_) => QueryResponseStatus::UnexpectedVersion, + }, + Some(QueryStatus::Pending { timeout, .. }) => QueryResponseStatus::Pending { timeout }, + Some(_) => QueryResponseStatus::UnexpectedVersion, + None => QueryResponseStatus::NotFound, + } + } + + #[cfg(feature = "runtime-benchmarks")] + fn expect_response(id: Self::QueryId, response: Response) { + let response = response.into(); + Queries::::insert( + id, + QueryStatus::Ready { response, at: frame_system::Pallet::::block_number() }, + ); + } +} + impl Pallet { fn do_reserve_transfer_assets( origin: OriginFor, @@ -1498,36 +1557,6 @@ impl Pallet { }) } - /// Consume `message` and return another which is equivalent to it except that it reports - /// back the outcome. - /// - /// - `message`: The message whose outcome should be reported. - /// - `responder`: The origin from which a response should be expected. - /// - `timeout`: The block number after which it is permissible for `notify` not to be - /// called even if a response is received. - /// - /// `report_outcome` may return an error if the `responder` is not invertible. - /// - /// It is assumed that the querier of the response will be `Here`. - /// - /// To check the status of the query, use `fn query()` passing the resultant `QueryId` - /// value. - pub fn report_outcome( - message: &mut Xcm<()>, - responder: impl Into, - timeout: T::BlockNumber, - ) -> Result { - let responder = responder.into(); - let destination = T::UniversalLocation::get() - .invert_target(&responder) - .map_err(|()| XcmError::LocationNotInvertible)?; - let query_id = Self::new_query(responder, timeout, Here); - let response_info = QueryResponseInfo { destination, query_id, max_weight: Weight::zero() }; - let report_error = Xcm(vec![ReportError(response_info)]); - message.0.insert(0, SetAppendix(report_error)); - Ok(query_id) - } - /// Consume `message` and return another which is equivalent to it except that it reports /// back the outcome and dispatches `notify` on this chain. /// @@ -1569,15 +1598,6 @@ impl Pallet { Ok(()) } - /// Attempt to create a new query ID and register it as a query that is yet to respond. - pub fn new_query( - responder: impl Into, - timeout: T::BlockNumber, - match_querier: impl Into, - ) -> u64 { - Self::do_new_query(responder, None, timeout, match_querier) - } - /// Attempt to create a new query ID and register it as a query that is yet to respond, and /// which will call a dispatchable when a response happens. pub fn new_notify_query( @@ -1592,20 +1612,6 @@ impl Pallet { Self::do_new_query(responder, Some(notify), timeout, match_querier) } - /// Attempt to remove and return the response of query with ID `query_id`. - /// - /// Returns `None` if the response is not (yet) available. - pub fn take_response(query_id: QueryId) -> Option<(Response, T::BlockNumber)> { - if let Some(QueryStatus::Ready { response, at }) = Queries::::get(query_id) { - let response = response.try_into().ok()?; - Queries::::remove(query_id); - Self::deposit_event(Event::ResponseTaken { query_id }); - Some((response, at)) - } else { - None - } - } - /// Note that a particular destination to whom we would like to send a message is unknown /// and queue it for version discovery. fn note_unknown_version(dest: &MultiLocation) { diff --git a/xcm/pallet-xcm/src/mock.rs b/xcm/pallet-xcm/src/mock.rs index de3f8d4957a3..6cfc1447e2b4 100644 --- a/xcm/pallet-xcm/src/mock.rs +++ b/xcm/pallet-xcm/src/mock.rs @@ -50,6 +50,7 @@ pub mod pallet_test_notifier { use frame_system::pallet_prelude::*; use sp_runtime::DispatchResult; use xcm::latest::prelude::*; + use xcm_executor::traits::QueryHandler; #[pallet::pallet] pub struct Pallet(_); @@ -85,7 +86,7 @@ pub mod pallet_test_notifier { let id = who .using_encoded(|mut d| <[u8; 32]>::decode(&mut d)) .map_err(|_| Error::::BadAccountFormat)?; - let qid = crate::Pallet::::new_query( + let qid = as QueryHandler>::new_query( Junction::AccountId32 { network: None, id }, 100u32.into(), querier, diff --git a/xcm/pallet-xcm/src/tests.rs b/xcm/pallet-xcm/src/tests.rs index 6415fe03d895..2ad13dced936 100644 --- a/xcm/pallet-xcm/src/tests.rs +++ b/xcm/pallet-xcm/src/tests.rs @@ -28,7 +28,7 @@ use sp_runtime::traits::{AccountIdConversion, BlakeTwo256, Hash}; use xcm::{latest::QueryResponseInfo, prelude::*}; use xcm_builder::AllowKnownQueryResponses; use xcm_executor::{ - traits::{Properties, ShouldExecute}, + traits::{Properties, QueryHandler, QueryResponseStatus, ShouldExecute}, XcmExecutor, }; @@ -170,7 +170,8 @@ fn report_outcome_works() { }) ); - let response = Some((Response::ExecutionResult(None), 1)); + let response = + QueryResponseStatus::Ready { response: Response::ExecutionResult(None), at: 1 }; assert_eq!(XcmPallet::take_response(0), response); }); } @@ -270,7 +271,8 @@ fn custom_querier_works() { }) ); - let response = Some((Response::ExecutionResult(None), 1)); + let response = + QueryResponseStatus::Ready { response: Response::ExecutionResult(None), at: 1 }; assert_eq!(XcmPallet::take_response(0), response); }); } diff --git a/xcm/procedural/Cargo.toml b/xcm/procedural/Cargo.toml index 4da7ac1e2223..dc56695043ca 100644 --- a/xcm/procedural/Cargo.toml +++ b/xcm/procedural/Cargo.toml @@ -9,6 +9,6 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" -quote = "1.0.26" +quote = "1.0.28" syn = "2.0.15" Inflector = "0.11.4" diff --git a/xcm/xcm-builder/Cargo.toml b/xcm/xcm-builder/Cargo.toml index 6f4a4f9dde14..465d338fd0a7 100644 --- a/xcm/xcm-builder/Cargo.toml +++ b/xcm/xcm-builder/Cargo.toml @@ -36,7 +36,8 @@ polkadot-test-runtime = { path = "../../runtime/test-runtime" } default = ["std"] runtime-benchmarks = [ "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks" + "frame-system/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", ] std = [ "log/std", diff --git a/xcm/xcm-builder/src/lib.rs b/xcm/xcm-builder/src/lib.rs index 9ff37209c015..124e83d3c338 100644 --- a/xcm/xcm-builder/src/lib.rs +++ b/xcm/xcm-builder/src/lib.rs @@ -28,8 +28,9 @@ pub mod test_utils; mod location_conversion; pub use location_conversion::{ - Account32Hash, AccountId32Aliases, AccountKey20Aliases, ChildParachainConvertsVia, - GlobalConsensusParachainConvertsFor, ParentIsPreset, SiblingParachainConvertsVia, + Account32Hash, AccountId32Aliases, AccountKey20Aliases, AliasesIntoAccountId32, + ChildParachainConvertsVia, GlobalConsensusParachainConvertsFor, ParentIsPreset, + SiblingParachainConvertsVia, }; mod origin_conversion; @@ -95,3 +96,6 @@ pub use universal_exports::{ ExporterFor, HaulBlob, HaulBlobError, HaulBlobExporter, NetworkExportTable, SovereignPaidRemoteExporter, UnpaidLocalExporter, UnpaidRemoteExporter, }; + +mod pay; +pub use pay::{FixedLocation, LocatableAssetId, PayAccountId32OnChainOverXcm, PayOverXcm}; diff --git a/xcm/xcm-builder/src/location_conversion.rs b/xcm/xcm-builder/src/location_conversion.rs index 66c9fc6dd31d..bafec5da4a30 100644 --- a/xcm/xcm-builder/src/location_conversion.rs +++ b/xcm/xcm-builder/src/location_conversion.rs @@ -232,6 +232,26 @@ impl>, AccountId: From<[u8; 32]> + Into<[u8; 32]> } } +/// Conversion implementation which converts from a `[u8; 32]`-based `AccountId` into a +/// `MultiLocation` consisting solely of a `AccountId32` junction with a fixed value for its +/// network (provided by `Network`) and the `AccountId`'s `[u8; 32]` datum for the `id`. +pub struct AliasesIntoAccountId32(PhantomData<(Network, AccountId)>); +impl<'a, Network: Get>, AccountId: Clone + Into<[u8; 32]> + Clone> + Convert<&'a AccountId, MultiLocation> for AliasesIntoAccountId32 +{ + fn convert(who: &AccountId) -> Result { + Ok(AccountId32 { network: Network::get(), id: who.clone().into() }.into()) + } +} + +impl>, AccountId: Into<[u8; 32]> + Clone> + Convert for AliasesIntoAccountId32 +{ + fn convert(who: AccountId) -> Result { + Ok(AccountId32 { network: Network::get(), id: who.into() }.into()) + } +} + pub struct AccountKey20Aliases(PhantomData<(Network, AccountId)>); impl>, AccountId: From<[u8; 20]> + Into<[u8; 20]> + Clone> Convert for AccountKey20Aliases diff --git a/xcm/xcm-builder/src/pay.rs b/xcm/xcm-builder/src/pay.rs new file mode 100644 index 000000000000..e8cd2b2bb287 --- /dev/null +++ b/xcm/xcm-builder/src/pay.rs @@ -0,0 +1,205 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! `PayOverXcm` struct for paying through XCM and getting the status back. + +use frame_support::traits::{ + tokens::{Pay, PaymentStatus}, + Get, +}; +use sp_runtime::traits::Convert; +use sp_std::{marker::PhantomData, vec}; +use xcm::{opaque::lts::Weight, prelude::*}; +use xcm_executor::traits::{QueryHandler, QueryResponseStatus}; + +/// Implementation of the `frame_support::traits::tokens::Pay` trait, to allow +/// for XCM-based payments of a given `Balance` of some asset ID existing on some chain under +/// ownership of some `Interior` location of the local chain to a particular `Beneficiary`. The +/// `AssetKind` value can be converted into both the XCM `AssetId` (via and `Into` bound) and the +/// the destination chain's location, via the `AssetKindToLocatableAsset` type parameter. +/// +/// This relies on the XCM `TransferAsset` instruction. A trait `BeneficiaryRefToLocation` must be +/// provided in order to convert the `Beneficiary` reference into a location usable by +/// `TransferAsset`. +/// +/// `PayOverXcm::pay` is asynchronous, and returns a `QueryId` which can then be used in +/// `check_payment` to check the status of the XCM transaction. +/// +/// See also `PayAccountId32OverXcm` which is similar to this except that `BeneficiaryRefToLocation` +/// need not be supplied and `Beneficiary` must implement `Into<[u8; 32]>`. +pub struct PayOverXcm< + Interior, + Router, + Querier, + Timeout, + Beneficiary, + AssetKind, + AssetKindToLocatableAsset, + BeneficiaryRefToLocation, +>( + PhantomData<( + Interior, + Router, + Querier, + Timeout, + Beneficiary, + AssetKind, + AssetKindToLocatableAsset, + BeneficiaryRefToLocation, + )>, +); +impl< + Interior: Get, + Router: SendXcm, + Querier: QueryHandler, + Timeout: Get, + Beneficiary: Clone, + AssetKind, + AssetKindToLocatableAsset: Convert, + BeneficiaryRefToLocation: for<'a> Convert<&'a Beneficiary, MultiLocation>, + > Pay + for PayOverXcm< + Interior, + Router, + Querier, + Timeout, + Beneficiary, + AssetKind, + AssetKindToLocatableAsset, + BeneficiaryRefToLocation, + > +{ + type Beneficiary = Beneficiary; + type AssetKind = AssetKind; + type Balance = u128; + type Id = Querier::QueryId; + type Error = xcm::latest::Error; + + fn pay( + who: &Self::Beneficiary, + asset_kind: Self::AssetKind, + amount: Self::Balance, + ) -> Result { + let locatable = AssetKindToLocatableAsset::convert(asset_kind); + let LocatableAssetId { asset_id, location: asset_location } = locatable; + let destination = Querier::UniversalLocation::get() + .invert_target(&asset_location) + .map_err(|()| Self::Error::LocationNotInvertible)?; + let beneficiary = BeneficiaryRefToLocation::convert(&who); + + let query_id = Querier::new_query(asset_location, Timeout::get(), Interior::get()); + + let message = Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + SetAppendix(Xcm(vec![ReportError(QueryResponseInfo { + destination, + query_id, + max_weight: Weight::zero(), + })])), + DescendOrigin(Interior::get()), + TransferAsset { + beneficiary, + assets: vec![MultiAsset { id: asset_id, fun: Fungibility::Fungible(amount) }] + .into(), + }, + ]); + + let (ticket, _) = Router::validate(&mut Some(asset_location), &mut Some(message))?; + Router::deliver(ticket)?; + Ok(query_id.into()) + } + + fn check_payment(id: Self::Id) -> PaymentStatus { + use QueryResponseStatus::*; + match Querier::take_response(id) { + Ready { response, .. } => match response { + Response::ExecutionResult(None) => PaymentStatus::Success, + Response::ExecutionResult(Some(_)) => PaymentStatus::Failure, + _ => PaymentStatus::Unknown, + }, + Pending { .. } => PaymentStatus::InProgress, + NotFound | UnexpectedVersion => PaymentStatus::Unknown, + } + } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful(_: &Self::Beneficiary, _: Self::AssetKind, _: Self::Balance) { + // We cannot generally guarantee this will go through successfully since we don't have any + // control over the XCM transport layers. We just assume that the benchmark environment + // will be sending it somewhere sensible. + } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_concluded(id: Self::Id) { + Querier::expect_response(id, Response::ExecutionResult(None)); + } +} + +/// Specialization of the [PayOverXcm] trait to allow `[u8; 32]`-based `AccountId` values to be +/// paid on a remote chain. +/// +/// Implementation of the [frame_support::traits::tokens::Pay] trait, to allow +/// for XCM payments of a given `Balance` of `AssetKind` existing on a `DestinationChain` under +/// ownership of some `Interior` location of the local chain to a particular `Beneficiary`. +/// +/// This relies on the XCM `TransferAsset` instruction. `Beneficiary` must implement +/// `Into<[u8; 32]>` (as 32-byte `AccountId`s generally do), and the actual XCM beneficiary will be +/// the location consisting of a single `AccountId32` junction with an appropriate account and no +/// specific network. +/// +/// `PayOverXcm::pay` is asynchronous, and returns a `QueryId` which can then be used in +/// `check_payment` to check the status of the XCM transaction. +pub type PayAccountId32OnChainOverXcm< + DestinationChain, + Interior, + Router, + Querier, + Timeout, + Beneficiary, + AssetKind, +> = PayOverXcm< + Interior, + Router, + Querier, + Timeout, + Beneficiary, + AssetKind, + crate::AliasesIntoAccountId32<(), Beneficiary>, + FixedLocation, +>; + +/// Simple struct which contains both an XCM `location` and `asset_id` to identift an asset which +/// exists on some chain. +pub struct LocatableAssetId { + /// The asset's ID. + pub asset_id: AssetId, + /// The (relative) location in which the asset ID is meaningful. + pub location: MultiLocation, +} + +/// Adapter `struct` which implements a conversion from any `AssetKind` into a [LocatableAsset] +/// value using a fixed `Location` for the `location` field. +pub struct FixedLocation(sp_std::marker::PhantomData); +impl, AssetKind: Into> Convert + for FixedLocation +{ + fn convert(value: AssetKind) -> LocatableAssetId { + LocatableAssetId { asset_id: value.into(), location: Location::get() } + } +} + +#[test] +fn it_builds() {} diff --git a/xcm/xcm-builder/src/tests/locking.rs b/xcm/xcm-builder/src/tests/locking.rs index 7c408c999b56..f4ef618ac0e7 100644 --- a/xcm/xcm-builder/src/tests/locking.rs +++ b/xcm/xcm-builder/src/tests/locking.rs @@ -136,6 +136,8 @@ fn remote_unlock_roundtrip_should_work() { set_send_price((Parent, 10u128)); // We have been told by Parachain #1 that Account #3 has locked funds which we can unlock. + // Previously, we must have sent a LockAsset instruction to Parachain #1. + // This caused Parachain #1 to send us the NoteUnlockable instruction. let message = Xcm(vec![NoteUnlockable { asset: (Parent, 100u128).into(), owner: (3u64,).into() }]); let hash = fake_message_hash(&message); @@ -169,8 +171,10 @@ fn remote_unlock_roundtrip_should_work() { assert_eq!(r, Outcome::Complete(Weight::from_parts(40, 40))); assert_eq!(asset_list((3u64,)), vec![(Parent, 990u128).into()]); - let expected_msg = - Xcm::<()>(vec![UnlockAsset { target: (3u64,).into(), asset: (Parent, 100u128).into() }]); + let expected_msg = Xcm::<()>(vec![UnlockAsset { + target: (Parent, Parachain(42), 3u64).into(), + asset: (Parent, 100u128).into(), + }]); let expected_hash = fake_message_hash(&expected_msg); assert_eq!(sent_xcm(), vec![((Parent, Parachain(1)).into(), expected_msg, expected_hash)]); assert_eq!( diff --git a/xcm/xcm-executor/src/lib.rs b/xcm/xcm-executor/src/lib.rs index ce9d3d4644e8..16f53fd6503b 100644 --- a/xcm/xcm-executor/src/lib.rs +++ b/xcm/xcm-executor/src/lib.rs @@ -876,9 +876,11 @@ impl XcmExecutor { RequestUnlock { asset, locker } => { let origin = *self.origin_ref().ok_or(XcmError::BadOrigin)?; let remote_asset = Self::try_reanchor(asset.clone(), &locker)?.0; + let remote_target = Self::try_reanchor_multilocation(origin, &locker)?.0; let reduce_ticket = Config::AssetLocker::prepare_reduce_unlockable(locker, asset, origin)?; - let msg = Xcm::<()>(vec![UnlockAsset { asset: remote_asset, target: origin }]); + let msg = + Xcm::<()>(vec![UnlockAsset { asset: remote_asset, target: remote_target }]); let (ticket, price) = validate_send::(locker, msg)?; self.take_fee(price, FeeReason::RequestUnlock)?; reduce_ticket.enact()?; @@ -990,6 +992,17 @@ impl XcmExecutor { Ok((asset, reanchor_context)) } + fn try_reanchor_multilocation( + location: MultiLocation, + destination: &MultiLocation, + ) -> Result<(MultiLocation, InteriorMultiLocation), XcmError> { + let reanchor_context = Config::UniversalLocation::get(); + let location = location + .reanchored(&destination, reanchor_context) + .map_err(|_| XcmError::ReanchorFailed)?; + Ok((location, reanchor_context)) + } + /// NOTE: Any assets which were unable to be reanchored are introduced into `failed_bin`. fn reanchored( mut assets: Assets, diff --git a/xcm/xcm-executor/src/traits/mod.rs b/xcm/xcm-executor/src/traits/mod.rs index 3b904630d73e..cac9c73ee277 100644 --- a/xcm/xcm-executor/src/traits/mod.rs +++ b/xcm/xcm-executor/src/traits/mod.rs @@ -38,7 +38,7 @@ pub use token_matching::{ Error, MatchesFungible, MatchesFungibles, MatchesNonFungible, MatchesNonFungibles, }; mod on_response; -pub use on_response::{OnResponse, VersionChangeNotifier}; +pub use on_response::{OnResponse, QueryHandler, QueryResponseStatus, VersionChangeNotifier}; mod should_execute; pub use should_execute::{CheckSuspension, Properties, ShouldExecute}; mod transact_asset; diff --git a/xcm/xcm-executor/src/traits/on_response.rs b/xcm/xcm-executor/src/traits/on_response.rs index dcb7d924d799..34bb7eb9597d 100644 --- a/xcm/xcm-executor/src/traits/on_response.rs +++ b/xcm/xcm-executor/src/traits/on_response.rs @@ -14,8 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use crate::Xcm; +use core::result; +use frame_support::{ + dispatch::fmt::Debug, + pallet_prelude::{Get, TypeInfo}, +}; +use parity_scale_codec::{FullCodec, MaxEncodedLen}; +use sp_arithmetic::traits::Zero; use xcm::latest::{ - Error as XcmError, MultiLocation, QueryId, Response, Result as XcmResult, Weight, XcmContext, + Error as XcmError, InteriorMultiLocation, MultiLocation, QueryId, Response, + Result as XcmResult, Weight, XcmContext, }; /// Define what needs to be done upon receiving a query response. @@ -94,3 +103,63 @@ impl VersionChangeNotifier for () { false } } + +/// The possible state of an XCM query response. +#[derive(Debug, PartialEq, Eq)] +pub enum QueryResponseStatus { + /// The response has arrived, and includes the inner Response and the block number it arrived at. + Ready { response: Response, at: BlockNumber }, + /// The response has not yet arrived, the XCM might still be executing or the response might be in transit. + Pending { timeout: BlockNumber }, + /// No response with the given `QueryId` was found, or the response was already queried and removed from local storage. + NotFound, + /// Got an unexpected XCM version. + UnexpectedVersion, +} + +/// Provides methods to expect responses from XCMs and query their status. +pub trait QueryHandler { + type QueryId: From + + FullCodec + + MaxEncodedLen + + TypeInfo + + Clone + + Eq + + PartialEq + + Debug + + Copy; + type BlockNumber: Zero; + type Error; + type UniversalLocation: Get; + + /// Attempt to create a new query ID and register it as a query that is yet to respond. + fn new_query( + responder: impl Into, + timeout: Self::BlockNumber, + match_querier: impl Into, + ) -> QueryId; + + /// Consume `message` and return another which is equivalent to it except that it reports + /// back the outcome. + /// + /// - `message`: The message whose outcome should be reported. + /// - `responder`: The origin from which a response should be expected. + /// - `timeout`: The block number after which it is permissible to return `NotFound` from `take_response`. + /// + /// `report_outcome` may return an error if the `responder` is not invertible. + /// + /// It is assumed that the querier of the response will be `Here`. + /// The response can be queried with `take_response`. + fn report_outcome( + message: &mut Xcm<()>, + responder: impl Into, + timeout: Self::BlockNumber, + ) -> result::Result; + + /// Attempt to remove and return the response of query with ID `query_id`. + fn take_response(id: Self::QueryId) -> QueryResponseStatus; + + /// Makes sure to expect a response with the given id. + #[cfg(feature = "runtime-benchmarks")] + fn expect_response(id: Self::QueryId, response: Response); +} diff --git a/xcm/xcm-simulator/example/src/lib.rs b/xcm/xcm-simulator/example/src/lib.rs index 33a5b2c70a9f..bd5ebb0b472f 100644 --- a/xcm/xcm-simulator/example/src/lib.rs +++ b/xcm/xcm-simulator/example/src/lib.rs @@ -272,7 +272,7 @@ mod tests { } #[test] - fn remote_locking() { + fn remote_locking_and_unlocking() { MockNet::reset(); let locked_amount = 100; @@ -280,7 +280,7 @@ mod tests { ParaB::execute_with(|| { let message = Xcm(vec![LockAsset { asset: (Here, locked_amount).into(), - unlocker: (Parachain(1),).into(), + unlocker: Parachain(1).into(), }]); assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message.clone())); }); @@ -306,6 +306,28 @@ mod tests { }])] ); }); + + ParaB::execute_with(|| { + // Request unlocking part of the funds on the relay chain + let message = Xcm(vec![RequestUnlock { + asset: (Parent, locked_amount - 50).into(), + locker: Parent.into(), + }]); + assert_ok!(ParachainPalletXcm::send_xcm(Here, (Parent, Parachain(1)), message)); + }); + + Relay::execute_with(|| { + use pallet_balances::{BalanceLock, Reasons}; + // Lock is reduced + assert_eq!( + relay_chain::Balances::locks(&child_account_id(2)), + vec![BalanceLock { + id: *b"py/xcmlk", + amount: locked_amount - 50, + reasons: Reasons::All + }] + ); + }); } /// Scenario: diff --git a/xcm/xcm-simulator/example/src/parachain.rs b/xcm/xcm-simulator/example/src/parachain.rs index cea5a93ec0af..39a2e27470b2 100644 --- a/xcm/xcm-simulator/example/src/parachain.rs +++ b/xcm/xcm-simulator/example/src/parachain.rs @@ -17,9 +17,10 @@ //! Parachain runtime mock. use codec::{Decode, Encode}; +use core::marker::PhantomData; use frame_support::{ construct_runtime, parameter_types, - traits::{EnsureOrigin, EnsureOriginWithArg, Everything, EverythingBut, Nothing}, + traits::{ContainsPair, EnsureOrigin, EnsureOriginWithArg, Everything, EverythingBut, Nothing}, weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, }; @@ -27,7 +28,7 @@ use frame_system::EnsureRoot; use sp_core::{ConstU32, H256}; use sp_runtime::{ testing::Header, - traits::{Hash, IdentityLookup}, + traits::{Get, Hash, IdentityLookup}, AccountId32, }; use sp_std::prelude::*; @@ -238,7 +239,7 @@ impl Config for XcmConfig { type Trader = FixedRateOfFungible; type ResponseHandler = (); type AssetTrap = (); - type AssetLocker = (); + type AssetLocker = PolkadotXcm; type AssetExchanger = (); type AssetClaims = (); type SubscriptionService = (); @@ -325,7 +326,7 @@ pub mod mock_msg_queue { Ok(xcm) => { let location = (Parent, Parachain(sender.into())); match T::XcmExecutor::execute_xcm(location, xcm, message_hash, max_weight) { - Outcome::Error(e) => (Err(e.clone()), Event::Fail(Some(hash), e)), + Outcome::Error(e) => (Err(e), Event::Fail(Some(hash), e)), Outcome::Complete(w) => (Ok(w), Event::Success(Some(hash))), // As far as the caller is concerned, this was dispatched without error, so // we just report the weight used. @@ -349,7 +350,7 @@ pub mod mock_msg_queue { let _ = XcmpMessageFormat::decode(&mut data_ref) .expect("Simulator encodes with versioned xcm format; qed"); - let mut remaining_fragments = &data_ref[..]; + let mut remaining_fragments = data_ref; while !remaining_fragments.is_empty() { if let Ok(xcm) = VersionedXcm::::decode(&mut remaining_fragments) @@ -403,6 +404,22 @@ parameter_types! { pub ReachableDest: Option = Some(Parent.into()); } +pub struct TrustedLockerCase(PhantomData); +impl> ContainsPair + for TrustedLockerCase +{ + fn contains(origin: &MultiLocation, asset: &MultiAsset) -> bool { + let (o, a) = T::get(); + a.matches(asset) && &o == origin + } +} + +parameter_types! { + pub RelayTokenForRelay: (MultiLocation, MultiAssetFilter) = (Parent.into(), Wild(AllOf { id: Concrete(Parent.into()), fun: WildFungible })); +} + +pub type TrustedLockers = TrustedLockerCase; + impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -420,7 +437,7 @@ impl pallet_xcm::Config for Runtime { type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; type Currency = Balances; type CurrencyMatcher = (); - type TrustedLockers = (); + type TrustedLockers = TrustedLockers; type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type MaxRemoteLockConsumers = ConstU32<0>; diff --git a/zombienet_tests/functional/0003-beefy-and-mmr.toml b/zombienet_tests/functional/0003-beefy-and-mmr.toml index bea5ac1ba64f..a8d97bc30f85 100644 --- a/zombienet_tests/functional/0003-beefy-and-mmr.toml +++ b/zombienet_tests/functional/0003-beefy-and-mmr.toml @@ -9,8 +9,8 @@ command = "polkadot" [[relaychain.node_groups]] name = "validator" count = 3 -args = ["--log=beefy=debug", "--beefy", "--enable-offchain-indexing=true"] +args = ["--log=beefy=debug", "--enable-offchain-indexing=true"] [[relaychain.nodes]] name = "validator-unstable" -args = ["--log=beefy=debug", "--beefy", "--enable-offchain-indexing=true"] +args = ["--log=beefy=debug", "--enable-offchain-indexing=true"]