From 72fb8bd3cd4a5051bb855415b360657d7ce247fb Mon Sep 17 00:00:00 2001 From: Rodrigo Quelhas <22591718+RomarQ@users.noreply.github.com> Date: Fri, 29 Nov 2024 10:33:46 +0000 Subject: [PATCH 01/29] Expose types from `sc-service` (#5855) # Description At moonbeam we have worked on a `lazy-loading` feature which is a client mode that forks a live parachain and fetches its state on-demand, we have been able to do this by duplicating some code from `sc_service::client`. The objective of this PR is to simplify the implementation by making public some types in polkadot-sdk. - Modules: - `sc_service::client` **I do not see a point to only expose this type when `test-helpers` feature is enabled** ## Integration Not applicable, the PR just makes some types public. ## Review Notes The changes included in this PR give more flexibility for client developers by exposing important types. --- prdoc/pr_5855.prdoc | 15 +++++++ substrate/bin/node/testing/Cargo.toml | 2 +- substrate/client/network/test/Cargo.toml | 2 +- substrate/client/rpc-spec-v2/Cargo.toml | 2 +- .../src/chain_head/subscription/inner.rs | 6 +-- .../rpc-spec-v2/src/chain_head/tests.rs | 6 +-- substrate/client/service/Cargo.toml | 2 - substrate/client/service/src/client/client.rs | 40 +------------------ substrate/client/service/src/client/mod.rs | 3 +- substrate/client/service/src/lib.rs | 5 +-- substrate/client/service/test/Cargo.toml | 2 +- .../client/service/test/src/client/mod.rs | 6 +-- substrate/test-utils/client/Cargo.toml | 4 +- substrate/test-utils/runtime/Cargo.toml | 2 +- 14 files changed, 34 insertions(+), 63 deletions(-) create mode 100644 prdoc/pr_5855.prdoc diff --git a/prdoc/pr_5855.prdoc b/prdoc/pr_5855.prdoc new file mode 100644 index 000000000000..7735cfee9f37 --- /dev/null +++ b/prdoc/pr_5855.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove feature `test-helpers` from sc-service + +doc: + - audience: Node Dev + description: | + Removes feature `test-helpers` from sc-service. + +crates: + - name: sc-service + bump: major + - name: sc-rpc-spec-v2 + bump: major diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index 16112386ad7c..1972c03a368b 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -37,7 +37,7 @@ sc-client-api = { workspace = true, default-features = true } sc-client-db = { features = ["rocksdb"], workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } -sc-service = { features = ["rocksdb", "test-helpers"], workspace = true, default-features = true } +sc-service = { features = ["rocksdb"], workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml index ebece1762f29..6340d1dfb2f4 100644 --- a/substrate/client/network/test/Cargo.toml +++ b/substrate/client/network/test/Cargo.toml @@ -33,7 +33,7 @@ sc-network-types = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } sc-network-light = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true } +sc-service = { workspace = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index b304bc905925..70f68436767f 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -56,7 +56,7 @@ sp-consensus = { workspace = true, default-features = true } sp-externalities = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true, features = ["test-helpers"] } assert_matches = { workspace = true } pretty_assertions = { workspace = true } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index 95a7c7fe1832..3e1bd23776d3 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -784,7 +784,7 @@ mod tests { use super::*; use jsonrpsee::ConnectionId; use sc_block_builder::BlockBuilderBuilder; - use sc_service::client::new_in_mem; + use sc_service::client::new_with_backend; use sp_consensus::BlockOrigin; use sp_core::{testing::TaskExecutor, H256}; use substrate_test_runtime_client::{ @@ -811,13 +811,13 @@ mod tests { ) .unwrap(); let client = Arc::new( - new_in_mem::<_, Block, _, RuntimeApi>( + new_with_backend::<_, _, Block, _, RuntimeApi>( backend.clone(), executor, genesis_block_builder, + Box::new(TaskExecutor::new()), None, None, - Box::new(TaskExecutor::new()), client_config, ) .unwrap(), diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index c505566d887d..21e8365622a1 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -34,7 +34,7 @@ use jsonrpsee::{ use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; use sc_rpc::testing::TokioTestExecutor; -use sc_service::client::new_in_mem; +use sc_service::client::new_with_backend; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_core::{ @@ -2547,13 +2547,13 @@ async fn pin_block_references() { .unwrap(); let client = Arc::new( - new_in_mem::<_, Block, _, RuntimeApi>( + new_with_backend::<_, _, Block, _, RuntimeApi>( backend.clone(), executor, genesis_block_builder, + Box::new(TokioTestExecutor::default()), None, None, - Box::new(TokioTestExecutor::default()), client_config, ) .unwrap(), diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index f2fc65ef2439..3981395d9768 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -20,8 +20,6 @@ default = ["rocksdb"] # The RocksDB feature activates the RocksDB database backend. If it is not activated, and you pass # a path to a database, an error will be produced at runtime. rocksdb = ["sc-client-db/rocksdb"] -# exposes the client type -test-helpers = [] runtime-benchmarks = [ "sc-client-db/runtime-benchmarks", "sp-runtime/runtime-benchmarks", diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index ce5b92551bf2..eddbb9260c05 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -85,10 +85,8 @@ use std::{ sync::Arc, }; -#[cfg(feature = "test-helpers")] -use { - super::call_executor::LocalCallExecutor, sc_client_api::in_mem, sp_core::traits::CodeExecutor, -}; +use super::call_executor::LocalCallExecutor; +use sp_core::traits::CodeExecutor; type NotificationSinks = Mutex>>; @@ -152,39 +150,6 @@ enum PrepareStorageChangesResult { Discard(ImportResult), Import(Option>), } - -/// Create an instance of in-memory client. -#[cfg(feature = "test-helpers")] -pub fn new_in_mem( - backend: Arc>, - executor: E, - genesis_block_builder: G, - prometheus_registry: Option, - telemetry: Option, - spawn_handle: Box, - config: ClientConfig, -) -> sp_blockchain::Result< - Client, LocalCallExecutor, E>, Block, RA>, -> -where - E: CodeExecutor + sc_executor::RuntimeVersionOf, - Block: BlockT, - G: BuildGenesisBlock< - Block, - BlockImportOperation = as backend::Backend>::BlockImportOperation, - >, -{ - new_with_backend( - backend, - executor, - genesis_block_builder, - spawn_handle, - prometheus_registry, - telemetry, - config, - ) -} - /// Client configuration items. #[derive(Debug, Clone)] pub struct ClientConfig { @@ -218,7 +183,6 @@ impl Default for ClientConfig { /// Create a client with the explicitly provided backend. /// This is useful for testing backend implementations. -#[cfg(feature = "test-helpers")] pub fn new_with_backend( backend: Arc, executor: E, diff --git a/substrate/client/service/src/client/mod.rs b/substrate/client/service/src/client/mod.rs index ec77a92f162f..3020b3d296f4 100644 --- a/substrate/client/service/src/client/mod.rs +++ b/substrate/client/service/src/client/mod.rs @@ -56,5 +56,4 @@ pub use call_executor::LocalCallExecutor; pub use client::{Client, ClientConfig}; pub(crate) use code_provider::CodeProvider; -#[cfg(feature = "test-helpers")] -pub use self::client::{new_in_mem, new_with_backend}; +pub use self::client::new_with_backend; diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 9c01d7288a81..b5a38d875e3b 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -23,14 +23,11 @@ #![recursion_limit = "1024"] pub mod chain_ops; +pub mod client; pub mod config; pub mod error; mod builder; -#[cfg(feature = "test-helpers")] -pub mod client; -#[cfg(not(feature = "test-helpers"))] -mod client; mod metrics; mod task_manager; diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index 0edfc5b19314..632b98104f6b 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -31,7 +31,7 @@ sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index 55bbfcdd8594..ead90c4c65d8 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -29,7 +29,7 @@ use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, }; use sc_executor::WasmExecutor; -use sc_service::client::{new_in_mem, Client, LocalCallExecutor}; +use sc_service::client::{new_with_backend, Client, LocalCallExecutor}; use sp_api::ProvideRuntimeApi; use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain}; use sp_core::{testing::TaskExecutor, traits::CallContext, H256}; @@ -2087,13 +2087,13 @@ fn cleans_up_closed_notification_sinks_on_block_import() { // NOTE: we need to build the client here instead of using the client // provided by test_runtime_client otherwise we can't access the private // `import_notification_sinks` and `finality_notification_sinks` fields. - let mut client = new_in_mem::<_, Block, _, RuntimeApi>( + let mut client = new_with_backend::<_, _, Block, _, RuntimeApi>( backend, executor, genesis_block_builder, + Box::new(TaskExecutor::new()), None, None, - Box::new(TaskExecutor::new()), client_config, ) .unwrap(); diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index ebd1eab5980d..a67c91fc5f79 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -29,9 +29,7 @@ sc-client-db = { features = [ sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-offchain = { workspace = true, default-features = true } -sc-service = { features = [ - "test-helpers", -], workspace = true } +sc-service = { workspace = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 1c82c73072bc..96a888052876 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -45,7 +45,7 @@ sp-consensus-grandpa = { features = ["serde"], workspace = true } sp-trie = { workspace = true } sp-transaction-pool = { workspace = true } trie-db = { workspace = true } -sc-service = { features = ["test-helpers"], optional = true, workspace = true } +sc-service = { optional = true, workspace = true } sp-state-machine = { workspace = true } sp-externalities = { workspace = true } From 1dd21bcc1406e0f07f70e604f9cef4dc2115c989 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Fri, 29 Nov 2024 12:00:52 +0100 Subject: [PATCH 02/29] ci: update nightly in ci-unified to 2024-11-19 (#6691) cc https://github.com/paritytech/ci_cd/issues/1088 --- .github/env | 2 +- .gitlab-ci.yml | 2 +- docs/contributor/container.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/env b/.github/env index bb61e1f4cd99..730c37f1db80 100644 --- a/.github/env +++ b/.github/env @@ -1 +1 @@ -IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034" +IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558" diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f508404f1efa..42a7e87bda43 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -22,7 +22,7 @@ workflow: variables: # CI_IMAGE: !reference [ .ci-unified, variables, CI_IMAGE ] - CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034" + CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558" # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" diff --git a/docs/contributor/container.md b/docs/contributor/container.md index ec51b8b9d7cc..e387f568d7b5 100644 --- a/docs/contributor/container.md +++ b/docs/contributor/container.md @@ -24,7 +24,7 @@ The command below allows building a Linux binary without having to even install docker run --rm -it \ -w /polkadot-sdk \ -v $(pwd):/polkadot-sdk \ - docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 \ + docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558 \ cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain sudo chown -R $(id -u):$(id -g) target/ ``` From b3ab312724ee8c3a0c7f3d9b5ea6c98513b5c951 Mon Sep 17 00:00:00 2001 From: Xavier Lau Date: Fri, 29 Nov 2024 20:02:59 +0800 Subject: [PATCH 03/29] Migrate pallet-preimage to benchmark v2 (#6277) Part of: - #6202. --------- Co-authored-by: Giuseppe Re Co-authored-by: command-bot <> --- substrate/frame/preimage/src/benchmarking.rs | 296 ++++++++++--------- substrate/frame/preimage/src/weights.rs | 150 +++++----- 2 files changed, 231 insertions(+), 215 deletions(-) diff --git a/substrate/frame/preimage/src/benchmarking.rs b/substrate/frame/preimage/src/benchmarking.rs index 3d0c5b900579..ea635bf3ef77 100644 --- a/substrate/frame/preimage/src/benchmarking.rs +++ b/substrate/frame/preimage/src/benchmarking.rs @@ -17,14 +17,13 @@ //! Preimage pallet benchmarking. -use super::*; use alloc::vec; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; +use frame_benchmarking::v2::*; use frame_support::assert_ok; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; -use crate::Pallet as Preimage; +use crate::*; fn funded_account() -> T::AccountId { let caller: T::AccountId = whitelisted_caller(); @@ -43,206 +42,225 @@ fn sized_preimage_and_hash(size: u32) -> (Vec, T::Hash) { (preimage, hash) } -benchmarks! { +fn insert_old_unrequested(s: u32) -> ::Hash { + let acc = account("old", s, 0); + T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); + + // The preimage size does not matter here as it is not touched. + let preimage = s.to_le_bytes(); + let hash = ::Hashing::hash(&preimage[..]); + + #[allow(deprecated)] + StatusFor::::insert( + &hash, + OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, + ); + hash +} + +#[benchmarks] +mod benchmarks { + use super::*; + // Expensive note - will reserve. - note_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_preimage(s: Linear<0, MAX_SIZE>) { let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); - }: _(RawOrigin::Signed(caller), preimage) - verify { - assert!(Preimage::::have_preimage(&hash)); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), preimage); + + assert!(Pallet::::have_preimage(&hash)); } + // Cheap note - will not reserve since it was requested. - note_requested_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_requested_preimage(s: Linear<0, MAX_SIZE>) { let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Preimage::::request_preimage( + assert_ok!(Pallet::::request_preimage( T::ManagerOrigin::try_successful_origin() .expect("ManagerOrigin has no successful origin required for the benchmark"), hash, )); - }: note_preimage(RawOrigin::Signed(caller), preimage) - verify { - assert!(Preimage::::have_preimage(&hash)); + + #[extrinsic_call] + note_preimage(RawOrigin::Signed(caller), preimage); + + assert!(Pallet::::have_preimage(&hash)); } + // Cheap note - will not reserve since it's the manager. - note_no_deposit_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_no_deposit_preimage(s: Linear<0, MAX_SIZE>) { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: note_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - preimage - ) verify { - assert!(Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + note_preimage(o as T::RuntimeOrigin, preimage); + + assert!(Pallet::::have_preimage(&hash)); } // Expensive unnote - will unreserve. - unnote_preimage { + #[benchmark] + fn unnote_preimage() { let caller = funded_account::(); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); - }: _(RawOrigin::Signed(caller), hash) - verify { - assert!(!Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), hash); + + assert!(!Pallet::::have_preimage(&hash)); } + // Cheap unnote - will not unreserve since there's no deposit held. - unnote_no_deposit_preimage { + #[benchmark] + fn unnote_no_deposit_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: unnote_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - assert!(!Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::note_preimage(o.clone(), preimage,)); + + #[extrinsic_call] + unnote_preimage(o as T::RuntimeOrigin, hash); + + assert!(!Pallet::::have_preimage(&hash)); } // Expensive request - will unreserve the noter's deposit. - request_preimage { + #[benchmark] + fn request_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); let noter = funded_account::(); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); - }: _( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let ticket = TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); - let s = RequestStatus::Requested { maybe_ticket: Some((noter, ticket)), count: 1, maybe_len: Some(MAX_SIZE) }; + assert_ok!(Pallet::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); + + #[extrinsic_call] + _(o as T::RuntimeOrigin, hash); + + let ticket = + TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); + let s = RequestStatus::Requested { + maybe_ticket: Some((noter, ticket)), + count: 1, + maybe_len: Some(MAX_SIZE), + }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - would unreserve the deposit but none was held. - request_no_deposit_preimage { + #[benchmark] + fn request_no_deposit_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; + assert_ok!(Pallet::::note_preimage(o.clone(), preimage,)); + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + + let s = + RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - the preimage is not yet noted, so deposit to unreserve. - request_unnoted_preimage { + #[benchmark] + fn request_unnoted_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - the preimage is already requested, so just a counter bump. - request_requested_preimage { + #[benchmark] + fn request_requested_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } // Expensive unrequest - last reference and it's noted, so will destroy the preimage. - unrequest_preimage { + #[benchmark] + fn unrequest_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: _( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + assert_ok!(Pallet::::note_preimage(o.clone(), preimage)); + + #[extrinsic_call] + _(o as T::RuntimeOrigin, hash); + assert_eq!(RequestStatusFor::::get(&hash), None); } + // Cheap unrequest - last reference, but it's not noted. - unrequest_unnoted_preimage { + #[benchmark] + fn unrequest_unnoted_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: unrequest_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + unrequest_preimage(o as T::RuntimeOrigin, hash); + assert_eq!(RequestStatusFor::::get(&hash), None); } + // Cheap unrequest - not the last reference. - unrequest_multi_referenced_preimage { + #[benchmark] + fn unrequest_multi_referenced_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: unrequest_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + unrequest_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } - ensure_updated { - let n in 1..MAX_HASH_UPGRADE_BULK_COUNT; - + #[benchmark] + fn ensure_updated(n: Linear<1, MAX_HASH_UPGRADE_BULK_COUNT>) { let caller = funded_account::(); let hashes = (0..n).map(|i| insert_old_unrequested::(i)).collect::>(); - }: _(RawOrigin::Signed(caller), hashes) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), hashes); + assert_eq!(RequestStatusFor::::iter_keys().count(), n as usize); #[allow(deprecated)] let c = StatusFor::::iter_keys().count(); assert_eq!(c, 0); } - impl_benchmark_test_suite!(Preimage, crate::mock::new_test_ext(), crate::mock::Test); -} - -fn insert_old_unrequested(s: u32) -> ::Hash { - let acc = account("old", s, 0); - T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); - - // The preimage size does not matter here as it is not touched. - let preimage = s.to_le_bytes(); - let hash = ::Hashing::hash(&preimage[..]); - - #[allow(deprecated)] - StatusFor::::insert( - &hash, - OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, - ); - hash + impl_benchmark_test_suite! { + Pallet, + mock::new_test_ext(), + mock::Test + } } diff --git a/substrate/frame/preimage/src/weights.rs b/substrate/frame/preimage/src/weights.rs index edb2eed9c75a..a3aec7e7546e 100644 --- a/substrate/frame/preimage/src/weights.rs +++ b/substrate/frame/preimage/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_preimage` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_preimage -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/preimage/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_preimage +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/preimage/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -84,10 +82,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `7` // Estimated: `6012` - // Minimum execution time: 51_981_000 picoseconds. - Weight::from_parts(52_228_000, 6012) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_392, 0).saturating_mul(s.into())) + // Minimum execution time: 51_305_000 picoseconds. + Weight::from_parts(51_670_000, 6012) + // Standard Error: 5 + .saturating_add(Weight::from_parts(2_337, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -102,10 +100,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_835_000 picoseconds. - Weight::from_parts(16_429_000, 3556) - // Standard Error: 8 - .saturating_add(Weight::from_parts(2_647, 0).saturating_mul(s.into())) + // Minimum execution time: 16_204_000 picoseconds. + Weight::from_parts(16_613_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_503, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -120,10 +118,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_263_000 picoseconds. - Weight::from_parts(15_578_000, 3556) - // Standard Error: 7 - .saturating_add(Weight::from_parts(2_598, 0).saturating_mul(s.into())) + // Minimum execution time: 15_118_000 picoseconds. + Weight::from_parts(15_412_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_411, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -139,8 +137,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `206` // Estimated: `3820` - // Minimum execution time: 64_189_000 picoseconds. - Weight::from_parts(70_371_000, 3820) + // Minimum execution time: 57_218_000 picoseconds. + Weight::from_parts(61_242_000, 3820) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -154,8 +152,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 27_582_000 picoseconds. - Weight::from_parts(31_256_000, 3556) + // Minimum execution time: 25_140_000 picoseconds. + Weight::from_parts(27_682_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -167,8 +165,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `150` // Estimated: `3556` - // Minimum execution time: 27_667_000 picoseconds. - Weight::from_parts(32_088_000, 3556) + // Minimum execution time: 25_296_000 picoseconds. + Weight::from_parts(27_413_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -180,8 +178,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 16_065_000 picoseconds. - Weight::from_parts(20_550_000, 3556) + // Minimum execution time: 15_011_000 picoseconds. + Weight::from_parts(16_524_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -193,8 +191,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3556` - // Minimum execution time: 13_638_000 picoseconds. - Weight::from_parts(16_979_000, 3556) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(15_439_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -206,8 +204,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 11_383_000 picoseconds. - Weight::from_parts(12_154_000, 3556) + // Minimum execution time: 10_914_000 picoseconds. + Weight::from_parts(11_137_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -221,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 22_832_000 picoseconds. - Weight::from_parts(30_716_000, 3556) + // Minimum execution time: 22_512_000 picoseconds. + Weight::from_parts(24_376_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -234,8 +232,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_685_000 picoseconds. - Weight::from_parts(12_129_000, 3556) + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(10_855_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -247,8 +245,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_394_000 picoseconds. - Weight::from_parts(10_951_000, 3556) + // Minimum execution time: 10_312_000 picoseconds. + Weight::from_parts(10_653_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -267,10 +265,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + n * (227 ±0)` // Estimated: `6012 + n * (2830 ±0)` - // Minimum execution time: 62_203_000 picoseconds. - Weight::from_parts(63_735_000, 6012) - // Standard Error: 59_589 - .saturating_add(Weight::from_parts(59_482_352, 0).saturating_mul(n.into())) + // Minimum execution time: 61_990_000 picoseconds. + Weight::from_parts(62_751_000, 6012) + // Standard Error: 44_079 + .saturating_add(Weight::from_parts(57_343_378, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(n.into()))) @@ -295,10 +293,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `7` // Estimated: `6012` - // Minimum execution time: 51_981_000 picoseconds. - Weight::from_parts(52_228_000, 6012) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_392, 0).saturating_mul(s.into())) + // Minimum execution time: 51_305_000 picoseconds. + Weight::from_parts(51_670_000, 6012) + // Standard Error: 5 + .saturating_add(Weight::from_parts(2_337, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -313,10 +311,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_835_000 picoseconds. - Weight::from_parts(16_429_000, 3556) - // Standard Error: 8 - .saturating_add(Weight::from_parts(2_647, 0).saturating_mul(s.into())) + // Minimum execution time: 16_204_000 picoseconds. + Weight::from_parts(16_613_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_503, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -331,10 +329,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_263_000 picoseconds. - Weight::from_parts(15_578_000, 3556) - // Standard Error: 7 - .saturating_add(Weight::from_parts(2_598, 0).saturating_mul(s.into())) + // Minimum execution time: 15_118_000 picoseconds. + Weight::from_parts(15_412_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_411, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -350,8 +348,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `206` // Estimated: `3820` - // Minimum execution time: 64_189_000 picoseconds. - Weight::from_parts(70_371_000, 3820) + // Minimum execution time: 57_218_000 picoseconds. + Weight::from_parts(61_242_000, 3820) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -365,8 +363,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 27_582_000 picoseconds. - Weight::from_parts(31_256_000, 3556) + // Minimum execution time: 25_140_000 picoseconds. + Weight::from_parts(27_682_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -378,8 +376,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `150` // Estimated: `3556` - // Minimum execution time: 27_667_000 picoseconds. - Weight::from_parts(32_088_000, 3556) + // Minimum execution time: 25_296_000 picoseconds. + Weight::from_parts(27_413_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -391,8 +389,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 16_065_000 picoseconds. - Weight::from_parts(20_550_000, 3556) + // Minimum execution time: 15_011_000 picoseconds. + Weight::from_parts(16_524_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -404,8 +402,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3556` - // Minimum execution time: 13_638_000 picoseconds. - Weight::from_parts(16_979_000, 3556) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(15_439_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -417,8 +415,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 11_383_000 picoseconds. - Weight::from_parts(12_154_000, 3556) + // Minimum execution time: 10_914_000 picoseconds. + Weight::from_parts(11_137_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -432,8 +430,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 22_832_000 picoseconds. - Weight::from_parts(30_716_000, 3556) + // Minimum execution time: 22_512_000 picoseconds. + Weight::from_parts(24_376_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -445,8 +443,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_685_000 picoseconds. - Weight::from_parts(12_129_000, 3556) + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(10_855_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -458,8 +456,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_394_000 picoseconds. - Weight::from_parts(10_951_000, 3556) + // Minimum execution time: 10_312_000 picoseconds. + Weight::from_parts(10_653_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -478,10 +476,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + n * (227 ±0)` // Estimated: `6012 + n * (2830 ±0)` - // Minimum execution time: 62_203_000 picoseconds. - Weight::from_parts(63_735_000, 6012) - // Standard Error: 59_589 - .saturating_add(Weight::from_parts(59_482_352, 0).saturating_mul(n.into())) + // Minimum execution time: 61_990_000 picoseconds. + Weight::from_parts(62_751_000, 6012) + // Standard Error: 44_079 + .saturating_add(Weight::from_parts(57_343_378, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(n.into()))) From 447902eff4a574e66894ad60cb41999b05bf5e84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Fri, 29 Nov 2024 13:46:31 +0100 Subject: [PATCH 04/29] pallet_revive: Switch to 64bit RISC-V (#6565) This PR updates pallet_revive to the newest PolkaVM version and adapts the test fixtures and syscall interface to work under 64bit. Please note that after this PR no 32bit contracts can be deployed (they will be rejected at deploy time). Pre-deployed 32bit contracts are now considered defunct since we changes how parameters are passed for functions with more than 6 arguments. ## Fixtures The fixtures are now built for the 64bit target. I also removed the temporary directory mechanism that triggered a full rebuild every time. It also makes it easier to find the compiled fixtures since they are now always in `target/pallet-revive-fixtures`. ## Syscall interface ### Passing pointer Registers and pointers are now 64bit wide. This allows us to pass u64 arguments in a single register. Before we needed two registers to pass them. This means that just as before we need one register per pointer we pass. We keep pointers as `u32` argument by truncating the register. This is done since the memory space of PolkaVM is 32bit. ### Functions with more than 6 arguments We only have 6 registers to pass arguments. This is why we pass a pointer to a struct when we need more than 6. Before this PR we expected a packed struct and interpreted it as SCALE encoded tuple. However, this was buggy because the `MaxEncodedLen` returned something that was larger than the packed size of the structure. This wasn't a problem before. But now the memory space changed in a way that things were placed at the edges of the memory space and those extra bytes lead to an out of bound access. This is why this PR drops SCALE and expects the arguments to be passed as a pointer to a `C` aligned struct. This avoids unaligned accesses. However, revive needs to adapt its codegen to properly align the structure fields. ## TODO - [ ] Add multi block migration that wipes all existing contracts as we made breaking changes to the syscall interface --------- Co-authored-by: GitHub Action --- .github/workflows/checks-quick.yml | 1 - Cargo.lock | 72 +++++++------- prdoc/pr_6565.prdoc | 35 +++++++ substrate/frame/revive/Cargo.toml | 2 +- substrate/frame/revive/fixtures/Cargo.toml | 4 +- substrate/frame/revive/fixtures/build.rs | 96 +++++++++++++------ .../build/{Cargo.toml => _Cargo.toml} | 5 +- .../fixtures/build/_rust-toolchain.toml | 4 + .../riscv32emac-unknown-none-polkavm.json | 26 ----- substrate/frame/revive/fixtures/src/lib.rs | 13 +-- substrate/frame/revive/proc-macro/src/lib.rs | 91 ++++++++++-------- substrate/frame/revive/rpc/src/tests.rs | 6 ++ substrate/frame/revive/src/chain_extension.rs | 12 +-- substrate/frame/revive/src/limits.rs | 21 +++- substrate/frame/revive/src/wasm/mod.rs | 20 +++- substrate/frame/revive/src/wasm/runtime.rs | 33 ++----- substrate/frame/revive/uapi/Cargo.toml | 6 +- substrate/frame/revive/uapi/src/host.rs | 4 +- .../uapi/src/host/{riscv32.rs => riscv64.rs} | 86 ++++++++--------- substrate/frame/revive/uapi/src/lib.rs | 6 ++ 20 files changed, 309 insertions(+), 234 deletions(-) create mode 100644 prdoc/pr_6565.prdoc rename substrate/frame/revive/fixtures/build/{Cargo.toml => _Cargo.toml} (80%) create mode 100644 substrate/frame/revive/fixtures/build/_rust-toolchain.toml delete mode 100644 substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json rename substrate/frame/revive/uapi/src/host/{riscv32.rs => riscv64.rs} (93%) diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml index c733a2517cb8..4c26b85a6303 100644 --- a/.github/workflows/checks-quick.yml +++ b/.github/workflows/checks-quick.yml @@ -97,7 +97,6 @@ jobs: --exclude "substrate/frame/contracts/fixtures/build" "substrate/frame/contracts/fixtures/contracts/common" - "substrate/frame/revive/fixtures/build" "substrate/frame/revive/fixtures/contracts/common" - name: deny git deps run: python3 .github/scripts/deny-git-deps.py . diff --git a/Cargo.lock b/Cargo.lock index 84477cd05416..e1abeea49283 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5975,6 +5975,15 @@ dependencies = [ "dirs-sys-next", ] +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs-sys" version = "0.4.1" @@ -14646,7 +14655,7 @@ dependencies = [ "pallet-utility 28.0.0", "parity-scale-codec", "paste", - "polkavm 0.13.0", + "polkavm 0.17.0", "pretty_assertions", "rlp 0.6.1", "scale-info", @@ -14742,12 +14751,10 @@ dependencies = [ "anyhow", "frame-system 28.0.0", "log", - "parity-wasm", - "polkavm-linker 0.14.0", + "polkavm-linker 0.17.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "tempfile", "toml 0.8.12", ] @@ -14864,7 +14871,7 @@ dependencies = [ "bitflags 1.3.2", "parity-scale-codec", "paste", - "polkavm-derive 0.14.0", + "polkavm-derive 0.17.0", "scale-info", ] @@ -19699,15 +19706,15 @@ dependencies = [ [[package]] name = "polkavm" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e79a14b15ed38cb5b9a1e38d02e933f19e3d180ae5b325fed606c5e5b9177e" +checksum = "84979be196ba2855f73616413e7b1d18258128aa396b3dc23f520a00a807720e" dependencies = [ "libc", "log", - "polkavm-assembler 0.13.0", - "polkavm-common 0.13.0", - "polkavm-linux-raw 0.13.0", + "polkavm-assembler 0.17.0", + "polkavm-common 0.17.0", + "polkavm-linux-raw 0.17.0", ] [[package]] @@ -19730,9 +19737,9 @@ dependencies = [ [[package]] name = "polkavm-assembler" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e8da55465000feb0a61bbf556ed03024db58f3420eca37721fc726b3b2136bf" +checksum = "0ba7b434ff630b0f73a1560e8baea807246ca22098abe49f97821e0e2d2accc4" dependencies = [ "log", ] @@ -19764,20 +19771,14 @@ dependencies = [ [[package]] name = "polkavm-common" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084b4339aae7dfdaaa5aa7d634110afd95970e0737b6fb2a0cb10db8b56b753c" +checksum = "8f0dbafef4ab6ceecb4982ac3b550df430ef4f9fdbf07c108b7d4f91a0682fce" dependencies = [ "log", - "polkavm-assembler 0.13.0", + "polkavm-assembler 0.17.0", ] -[[package]] -name = "polkavm-common" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711952a783e9c5ad407cdacb1ed147f36d37c5d43417c1091d86456d2999417b" - [[package]] name = "polkavm-derive" version = "0.8.0" @@ -19807,11 +19808,11 @@ dependencies = [ [[package]] name = "polkavm-derive" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4832a0aebf6cefc988bb7b2d74ea8c86c983164672e2fc96300f356a1babfc1" +checksum = "c0c3dbb6c8c7bd3e5f5b05aa7fc9355acf14df7ce5d392911e77d01090a38d0d" dependencies = [ - "polkavm-derive-impl-macro 0.14.0", + "polkavm-derive-impl-macro 0.17.0", ] [[package]] @@ -19852,11 +19853,11 @@ dependencies = [ [[package]] name = "polkavm-derive-impl" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e339fc7c11310fe5adf711d9342278ac44a75c9784947937cce12bd4f30842f2" +checksum = "42565aed4adbc4034612d0b17dea8db3681fb1bd1aed040d6edc5455a9f478a1" dependencies = [ - "polkavm-common 0.14.0", + "polkavm-common 0.17.0", "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.87", @@ -19894,11 +19895,11 @@ dependencies = [ [[package]] name = "polkavm-derive-impl-macro" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b569754b15060d03000c09e3bf11509d527f60b75d79b4c30c3625b5071d9702" +checksum = "86d9838e95241b0bce4fe269cdd4af96464160505840ed5a8ac8536119ba19e2" dependencies = [ - "polkavm-derive-impl 0.14.0", + "polkavm-derive-impl 0.17.0", "syn 2.0.87", ] @@ -19934,15 +19935,16 @@ dependencies = [ [[package]] name = "polkavm-linker" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0959ac3b0f4fd5caf5c245c637705f19493efe83dba31a83bbba928b93b0116a" +checksum = "d359dc721d2cc9b555ebb3558c305112ddc5bdac09d26f95f2f7b49c1f2db7e9" dependencies = [ + "dirs", "gimli 0.31.1", "hashbrown 0.14.5", "log", "object 0.36.1", - "polkavm-common 0.14.0", + "polkavm-common 0.17.0", "regalloc2 0.9.3", "rustc-demangle", ] @@ -19961,9 +19963,9 @@ checksum = "26e45fa59c7e1bb12ef5289080601e9ec9b31435f6e32800a5c90c132453d126" [[package]] name = "polkavm-linux-raw" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686c4dd9c9c16cc22565b51bdbb269792318d0fd2e6b966b5f6c788534cad0e9" +checksum = "e64c3d93a58ffbc3099d1227f0da9675a025a9ea6c917038f266920c1de1e568" [[package]] name = "polling" diff --git a/prdoc/pr_6565.prdoc b/prdoc/pr_6565.prdoc new file mode 100644 index 000000000000..f9a75a16a6a7 --- /dev/null +++ b/prdoc/pr_6565.prdoc @@ -0,0 +1,35 @@ +title: 'pallet_revive: Switch to 64bit RISC-V' +doc: +- audience: Runtime Dev + description: |- + This PR updates pallet_revive to the newest PolkaVM version and adapts the test fixtures and syscall interface to work under 64bit. + + Please note that after this PR no 32bit contracts can be deployed (they will be rejected at deploy time). Pre-deployed 32bit contracts are now considered defunct since we changes how parameters are passed for functions with more than 6 arguments. + + ## Fixtures + + The fixtures are now built for the 64bit target. I also removed the temporary directory mechanism that triggered a full rebuild every time. It also makes it easier to find the compiled fixtures since they are now always in `target/pallet-revive-fixtures`. + + ## Syscall interface + + ### Passing pointer + + Registers and pointers are now 64bit wide. This allows us to pass u64 arguments in a single register. Before we needed two registers to pass them. This means that just as before we need one register per pointer we pass. We keep pointers as `u32` argument by truncating the register. This is done since the memory space of PolkaVM is 32bit. + + ### Functions with more than 6 arguments + + We only have 6 registers to pass arguments. This is why we pass a pointer to a struct when we need more than 6. Before this PR we expected a packed struct and interpreted it as SCALE encoded tuple. However, this was buggy because the `MaxEncodedLen` returned something that was larger than the packed size of the structure. This wasn't a problem before. But now the memory space changed in a way that things were placed at the edges of the memory space and those extra bytes lead to an out of bound access. + + This is why this PR drops SCALE and expects the arguments to be passed as a pointer to a `C` aligned struct. This avoids unaligned accesses. However, revive needs to adapt its codegen to properly align the structure fields. + + ## TODO + - [ ] Add multi block migration that wipes all existing contracts as we made breaking changes to the syscall interface +crates: +- name: pallet-revive + bump: major +- name: pallet-revive-fixtures + bump: major +- name: pallet-revive-proc-macro + bump: major +- name: pallet-revive-uapi + bump: major diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml index 81fbbc8cf38e..677ef0e1367f 100644 --- a/substrate/frame/revive/Cargo.toml +++ b/substrate/frame/revive/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] environmental = { workspace = true } paste = { workspace = true } -polkavm = { version = "0.13.0", default-features = false } +polkavm = { version = "0.17.0", default-features = false } bitflags = { workspace = true } codec = { features = ["derive", "max-encoded-len"], workspace = true } scale-info = { features = ["derive"], workspace = true } diff --git a/substrate/frame/revive/fixtures/Cargo.toml b/substrate/frame/revive/fixtures/Cargo.toml index 7a5452853d65..798ed8c75a5a 100644 --- a/substrate/frame/revive/fixtures/Cargo.toml +++ b/substrate/frame/revive/fixtures/Cargo.toml @@ -18,10 +18,8 @@ anyhow = { workspace = true, default-features = true, optional = true } log = { workspace = true } [build-dependencies] -parity-wasm = { workspace = true } -tempfile = { workspace = true } toml = { workspace = true } -polkavm-linker = { version = "0.14.0" } +polkavm-linker = { version = "0.17.0" } anyhow = { workspace = true, default-features = true } [features] diff --git a/substrate/frame/revive/fixtures/build.rs b/substrate/frame/revive/fixtures/build.rs index 3472e0846efd..46cd5760ca4e 100644 --- a/substrate/frame/revive/fixtures/build.rs +++ b/substrate/frame/revive/fixtures/build.rs @@ -20,7 +20,8 @@ use anyhow::Result; use anyhow::{bail, Context}; use std::{ - cfg, env, fs, + env, fs, + io::Write, path::{Path, PathBuf}, process::Command, }; @@ -82,7 +83,7 @@ fn create_cargo_toml<'a>( entries: impl Iterator, output_dir: &Path, ) -> Result<()> { - let mut cargo_toml: toml::Value = toml::from_str(include_str!("./build/Cargo.toml"))?; + let mut cargo_toml: toml::Value = toml::from_str(include_str!("./build/_Cargo.toml"))?; let mut set_dep = |name, path| -> Result<()> { cargo_toml["dependencies"][name]["path"] = toml::Value::String( fixtures_dir.join(path).canonicalize()?.to_str().unwrap().to_string(), @@ -108,21 +109,24 @@ fn create_cargo_toml<'a>( let cargo_toml = toml::to_string_pretty(&cargo_toml)?; fs::write(output_dir.join("Cargo.toml"), cargo_toml.clone()) .with_context(|| format!("Failed to write {cargo_toml:?}"))?; + fs::copy( + fixtures_dir.join("build/_rust-toolchain.toml"), + output_dir.join("rust-toolchain.toml"), + ) + .context("Failed to write toolchain file")?; Ok(()) } -fn invoke_build(target: &Path, current_dir: &Path) -> Result<()> { +fn invoke_build(current_dir: &Path) -> Result<()> { let encoded_rustflags = ["-Dwarnings"].join("\x1f"); - let mut build_command = Command::new(env::var("CARGO")?); + let mut build_command = Command::new("cargo"); build_command .current_dir(current_dir) .env_clear() .env("PATH", env::var("PATH").unwrap_or_default()) .env("CARGO_ENCODED_RUSTFLAGS", encoded_rustflags) - .env("RUSTC_BOOTSTRAP", "1") .env("RUSTUP_HOME", env::var("RUSTUP_HOME").unwrap_or_default()) - .env("RUSTUP_TOOLCHAIN", env::var("RUSTUP_TOOLCHAIN").unwrap_or_default()) .args([ "build", "--release", @@ -130,7 +134,7 @@ fn invoke_build(target: &Path, current_dir: &Path) -> Result<()> { "-Zbuild-std-features=panic_immediate_abort", ]) .arg("--target") - .arg(target); + .arg(polkavm_linker::target_json_64_path().unwrap()); if let Ok(toolchain) = env::var(OVERRIDE_RUSTUP_TOOLCHAIN_ENV_VAR) { build_command.env("RUSTUP_TOOLCHAIN", &toolchain); @@ -168,7 +172,7 @@ fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result for entry in entries { post_process( &build_dir - .join("target/riscv32emac-unknown-none-polkavm/release") + .join("target/riscv64emac-unknown-none-polkavm/release") .join(entry.name()), &out_dir.join(entry.out_filename()), )?; @@ -177,11 +181,61 @@ fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result Ok(()) } +/// Create a directory in the `target` as output directory +fn create_out_dir() -> Result { + let temp_dir: PathBuf = env::var("OUT_DIR")?.into(); + + // this is set in case the user has overriden the target directory + let out_dir = if let Ok(path) = env::var("CARGO_TARGET_DIR") { + path.into() + } else { + // otherwise just traverse up from the out dir + let mut out_dir: PathBuf = temp_dir.clone(); + loop { + if !out_dir.pop() { + bail!("Cannot find project root.") + } + if out_dir.join("Cargo.lock").exists() { + break; + } + } + out_dir.join("target") + } + .join("pallet-revive-fixtures"); + + // clean up some leftover symlink from previous versions of this script + if out_dir.exists() && !out_dir.is_dir() { + fs::remove_file(&out_dir)?; + } + fs::create_dir_all(&out_dir).context("Failed to create output directory")?; + + // write the location of the out dir so it can be found later + let mut file = fs::File::create(temp_dir.join("fixture_location.rs")) + .context("Failed to create fixture_location.rs")?; + write!( + file, + r#" + #[allow(dead_code)] + const FIXTURE_DIR: &str = "{0}"; + macro_rules! fixture {{ + ($name: literal) => {{ + include_bytes!(concat!("{0}", "/", $name, ".polkavm")) + }}; + }} + "#, + out_dir.display() + ) + .context("Failed to write to fixture_location.rs")?; + + Ok(out_dir) +} + pub fn main() -> Result<()> { let fixtures_dir: PathBuf = env::var("CARGO_MANIFEST_DIR")?.into(); let contracts_dir = fixtures_dir.join("contracts"); - let out_dir: PathBuf = env::var("OUT_DIR")?.into(); - let target = fixtures_dir.join("riscv32emac-unknown-none-polkavm.json"); + let out_dir = create_out_dir().context("Cannot determine output directory")?; + let build_dir = out_dir.join("build"); + fs::create_dir_all(&build_dir).context("Failed to create build directory")?; println!("cargo::rerun-if-env-changed={OVERRIDE_RUSTUP_TOOLCHAIN_ENV_VAR}"); println!("cargo::rerun-if-env-changed={OVERRIDE_STRIP_ENV_VAR}"); @@ -199,25 +253,9 @@ pub fn main() -> Result<()> { return Ok(()) } - let tmp_dir = tempfile::tempdir()?; - let tmp_dir_path = tmp_dir.path(); - - create_cargo_toml(&fixtures_dir, entries.iter(), tmp_dir.path())?; - invoke_build(&target, tmp_dir_path)?; - - write_output(tmp_dir_path, &out_dir, entries)?; - - #[cfg(unix)] - if let Ok(symlink_dir) = env::var("CARGO_WORKSPACE_ROOT_DIR") { - let symlink_dir: PathBuf = symlink_dir.into(); - let symlink_dir: PathBuf = symlink_dir.join("target").join("pallet-revive-fixtures"); - if symlink_dir.is_symlink() { - fs::remove_file(&symlink_dir) - .with_context(|| format!("Failed to remove_file {symlink_dir:?}"))?; - } - std::os::unix::fs::symlink(&out_dir, &symlink_dir) - .with_context(|| format!("Failed to symlink {out_dir:?} -> {symlink_dir:?}"))?; - } + create_cargo_toml(&fixtures_dir, entries.iter(), &build_dir)?; + invoke_build(&build_dir)?; + write_output(&build_dir, &out_dir, entries)?; Ok(()) } diff --git a/substrate/frame/revive/fixtures/build/Cargo.toml b/substrate/frame/revive/fixtures/build/_Cargo.toml similarity index 80% rename from substrate/frame/revive/fixtures/build/Cargo.toml rename to substrate/frame/revive/fixtures/build/_Cargo.toml index 5d0e256e2e73..beaabd83403e 100644 --- a/substrate/frame/revive/fixtures/build/Cargo.toml +++ b/substrate/frame/revive/fixtures/build/_Cargo.toml @@ -4,6 +4,9 @@ publish = false version = "1.0.0" edition = "2021" +# Make sure this is not included into the workspace +[workspace] + # Binary targets are injected dynamically by the build script. [[bin]] @@ -11,7 +14,7 @@ edition = "2021" [dependencies] uapi = { package = 'pallet-revive-uapi', path = "", default-features = false } common = { package = 'pallet-revive-fixtures-common', path = "" } -polkavm-derive = { version = "0.14.0" } +polkavm-derive = { version = "0.17.0" } [profile.release] opt-level = 3 diff --git a/substrate/frame/revive/fixtures/build/_rust-toolchain.toml b/substrate/frame/revive/fixtures/build/_rust-toolchain.toml new file mode 100644 index 000000000000..4c757c708d58 --- /dev/null +++ b/substrate/frame/revive/fixtures/build/_rust-toolchain.toml @@ -0,0 +1,4 @@ +[toolchain] +channel = "nightly-2024-11-19" +components = ["rust-src"] +profile = "minimal" diff --git a/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json b/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json deleted file mode 100644 index bbd54cdefbac..000000000000 --- a/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "arch": "riscv32", - "cpu": "generic-rv32", - "crt-objects-fallback": "false", - "data-layout": "e-m:e-p:32:32-i64:64-n32-S32", - "eh-frame-header": false, - "emit-debug-gdb-scripts": false, - "features": "+e,+m,+a,+c,+lui-addi-fusion,+fast-unaligned-access,+xtheadcondmov", - "linker": "rust-lld", - "linker-flavor": "ld.lld", - "llvm-abiname": "ilp32e", - "llvm-target": "riscv32", - "max-atomic-width": 32, - "panic-strategy": "abort", - "relocation-model": "pie", - "target-pointer-width": "32", - "singlethread": true, - "pre-link-args": { - "ld": [ - "--emit-relocs", - "--unique", - "--relocatable" - ] - }, - "env": "polkavm" -} diff --git a/substrate/frame/revive/fixtures/src/lib.rs b/substrate/frame/revive/fixtures/src/lib.rs index cc84daec9b59..24f6ee547dc7 100644 --- a/substrate/frame/revive/fixtures/src/lib.rs +++ b/substrate/frame/revive/fixtures/src/lib.rs @@ -19,10 +19,13 @@ extern crate alloc; +// generated file that tells us where to find the fixtures +include!(concat!(env!("OUT_DIR"), "/fixture_location.rs")); + /// Load a given wasm module and returns a wasm binary contents along with it's hash. #[cfg(feature = "std")] pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec, sp_core::H256)> { - let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let out_dir: std::path::PathBuf = FIXTURE_DIR.into(); let fixture_path = out_dir.join(format!("{fixture_name}.polkavm")); log::debug!("Loading fixture from {fixture_path:?}"); let binary = std::fs::read(fixture_path)?; @@ -36,12 +39,6 @@ pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec, sp_core::H /// available in no-std environments (runtime benchmarks). pub mod bench { use alloc::vec::Vec; - - macro_rules! fixture { - ($name: literal) => { - include_bytes!(concat!(env!("OUT_DIR"), "/", $name, ".polkavm")) - }; - } pub const DUMMY: &[u8] = fixture!("dummy"); pub const NOOP: &[u8] = fixture!("noop"); pub const INSTR: &[u8] = fixture!("instr_benchmark"); @@ -61,7 +58,7 @@ pub mod bench { mod test { #[test] fn out_dir_should_have_compiled_mocks() { - let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let out_dir: std::path::PathBuf = crate::FIXTURE_DIR.into(); assert!(out_dir.join("dummy.polkavm").exists()); } } diff --git a/substrate/frame/revive/proc-macro/src/lib.rs b/substrate/frame/revive/proc-macro/src/lib.rs index 7232c6342824..6814add128d9 100644 --- a/substrate/frame/revive/proc-macro/src/lib.rs +++ b/substrate/frame/revive/proc-macro/src/lib.rs @@ -79,6 +79,7 @@ use syn::{parse_quote, punctuated::Punctuated, spanned::Spanned, token::Comma, F /// - `Result<(), TrapReason>`, /// - `Result`, /// - `Result`. +/// - `Result`. /// /// The macro expands to `pub struct Env` declaration, with the following traits implementations: /// - `pallet_revive::wasm::Environment> where E: Ext` @@ -127,6 +128,7 @@ struct HostFn { enum HostFnReturn { Unit, U32, + U64, ReturnCode, } @@ -134,8 +136,7 @@ impl HostFnReturn { fn map_output(&self) -> TokenStream2 { match self { Self::Unit => quote! { |_| None }, - Self::U32 => quote! { |ret_val| Some(ret_val) }, - Self::ReturnCode => quote! { |ret_code| Some(ret_code.into()) }, + _ => quote! { |ret_val| Some(ret_val.into()) }, } } @@ -143,6 +144,7 @@ impl HostFnReturn { match self { Self::Unit => syn::ReturnType::Default, Self::U32 => parse_quote! { -> u32 }, + Self::U64 => parse_quote! { -> u64 }, Self::ReturnCode => parse_quote! { -> ReturnErrorCode }, } } @@ -243,7 +245,8 @@ impl HostFn { let msg = r#"Should return one of the following: - Result<(), TrapReason>, - Result, - - Result"#; + - Result, + - Result"#; let ret_ty = match item.clone().sig.output { syn::ReturnType::Type(_, ty) => Ok(ty.clone()), _ => Err(err(span, &msg)), @@ -305,6 +308,7 @@ impl HostFn { let returns = match ok_ty_str.as_str() { "()" => Ok(HostFnReturn::Unit), "u32" => Ok(HostFnReturn::U32), + "u64" => Ok(HostFnReturn::U64), "ReturnErrorCode" => Ok(HostFnReturn::ReturnCode), _ => Err(err(arg1.span(), &msg)), }?; @@ -339,50 +343,61 @@ where P: Iterator> + Clone, I: Iterator> + Clone, { - const ALLOWED_REGISTERS: u32 = 6; - let mut registers_used = 0; - let mut bindings = vec![]; - let mut idx = 0; - for (name, ty) in param_names.clone().zip(param_types.clone()) { + const ALLOWED_REGISTERS: usize = 6; + + // all of them take one register but we truncate them before passing into the function + // it is important to not allow any type which has illegal bit patterns like 'bool' + if !param_types.clone().all(|ty| { let syn::Type::Path(path) = &**ty else { panic!("Type needs to be path"); }; let Some(ident) = path.path.get_ident() else { panic!("Type needs to be ident"); }; - let size = if ident == "i8" || - ident == "i16" || - ident == "i32" || - ident == "u8" || - ident == "u16" || - ident == "u32" - { - 1 - } else if ident == "i64" || ident == "u64" { - 2 - } else { - panic!("Pass by value only supports primitives"); - }; - registers_used += size; - if registers_used > ALLOWED_REGISTERS { - return quote! { - let (#( #param_names, )*): (#( #param_types, )*) = memory.read_as(__a0__)?; - } - } - let this_reg = quote::format_ident!("__a{}__", idx); - let next_reg = quote::format_ident!("__a{}__", idx + 1); - let binding = if size == 1 { + matches!(ident.to_string().as_ref(), "u8" | "u16" | "u32" | "u64") + }) { + panic!("Only primitive unsigned integers are allowed as arguments to syscalls"); + } + + // too many arguments: pass as pointer to a struct in memory + if param_names.clone().count() > ALLOWED_REGISTERS { + let fields = param_names.clone().zip(param_types.clone()).map(|(name, ty)| { quote! { - let #name = #this_reg as #ty; + #name: #ty, } - } else { - quote! { - let #name = (#this_reg as #ty) | ((#next_reg as #ty) << 32); + }); + return quote! { + #[derive(Default)] + #[repr(C)] + struct Args { + #(#fields)* } - }; - bindings.push(binding); - idx += size; + let Args { #(#param_names,)* } = { + let len = ::core::mem::size_of::(); + let mut args = Args::default(); + let ptr = &mut args as *mut Args as *mut u8; + // Safety + // 1. The struct is initialized at all times. + // 2. We only allow primitive integers (no bools) as arguments so every bit pattern is safe. + // 3. The reference doesn't outlive the args field. + // 4. There is only the single reference to the args field. + // 5. The length of the generated slice is the same as the struct. + let reference = unsafe { + ::core::slice::from_raw_parts_mut(ptr, len) + }; + memory.read_into_buf(__a0__ as _, reference)?; + args + }; + } } + + // otherwise: one argument per register + let bindings = param_names.zip(param_types).enumerate().map(|(idx, (name, ty))| { + let reg = quote::format_ident!("__a{}__", idx); + quote! { + let #name = #reg as #ty; + } + }); quote! { #( #bindings )* } @@ -409,7 +424,7 @@ fn expand_env(def: &EnvDef) -> TokenStream2 { memory: &mut M, __syscall_symbol__: &[u8], __available_api_version__: ApiVersion, - ) -> Result, TrapReason> + ) -> Result, TrapReason> { #impls } diff --git a/substrate/frame/revive/rpc/src/tests.rs b/substrate/frame/revive/rpc/src/tests.rs index 7734c8c57209..920318b26f71 100644 --- a/substrate/frame/revive/rpc/src/tests.rs +++ b/substrate/frame/revive/rpc/src/tests.rs @@ -218,6 +218,8 @@ async fn deploy_and_call() -> anyhow::Result<()> { Ok(()) } +/// TODO: enable ( https://github.com/paritytech/contract-issues/issues/12 ) +#[ignore] #[tokio::test] async fn revert_call() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); @@ -240,6 +242,8 @@ async fn revert_call() -> anyhow::Result<()> { Ok(()) } +/// TODO: enable ( https://github.com/paritytech/contract-issues/issues/12 ) +#[ignore] #[tokio::test] async fn event_logs() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); @@ -279,6 +283,8 @@ async fn invalid_transaction() -> anyhow::Result<()> { Ok(()) } +/// TODO: enable ( https://github.com/paritytech/contract-issues/issues/12 ) +#[ignore] #[tokio::test] async fn native_evm_ratio_works() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); diff --git a/substrate/frame/revive/src/chain_extension.rs b/substrate/frame/revive/src/chain_extension.rs index ccea12945054..5b3e886a5628 100644 --- a/substrate/frame/revive/src/chain_extension.rs +++ b/substrate/frame/revive/src/chain_extension.rs @@ -75,7 +75,7 @@ use crate::{ Error, }; use alloc::vec::Vec; -use codec::{Decode, MaxEncodedLen}; +use codec::Decode; use frame_support::weights::Weight; use sp_runtime::DispatchError; @@ -304,16 +304,6 @@ impl<'a, 'b, E: Ext, M: ?Sized + Memory> Environment<'a, 'b, E, M> { Ok(()) } - /// Reads and decodes a type with a size fixed at compile time from contract memory. - /// - /// This function is secure and recommended for all input types of fixed size - /// as long as the cost of reading the memory is included in the overall already charged - /// weight of the chain extension. This should usually be the case when fixed input types - /// are used. - pub fn read_as(&mut self) -> Result { - self.memory.read_as(self.input_ptr) - } - /// Reads and decodes a type with a dynamic size from contract memory. /// /// Make sure to include `len` in your weight calculations. diff --git a/substrate/frame/revive/src/limits.rs b/substrate/frame/revive/src/limits.rs index 64e66382b9ab..5ce96f59c14d 100644 --- a/substrate/frame/revive/src/limits.rs +++ b/substrate/frame/revive/src/limits.rs @@ -129,23 +129,36 @@ pub mod code { Error::::CodeRejected })?; + if !program.is_64_bit() { + log::debug!(target: LOG_TARGET, "32bit programs are not supported."); + Err(Error::::CodeRejected)?; + } + // This scans the whole program but we only do it once on code deployment. // It is safe to do unchecked math in u32 because the size of the program // was already checked above. - use polkavm::program::ISA32_V1_NoSbrk as ISA; + use polkavm::program::ISA64_V1 as ISA; let mut num_instructions: u32 = 0; let mut max_basic_block_size: u32 = 0; let mut basic_block_size: u32 = 0; for inst in program.instructions(ISA) { + use polkavm::program::Instruction; num_instructions += 1; basic_block_size += 1; if inst.kind.opcode().starts_new_basic_block() { max_basic_block_size = max_basic_block_size.max(basic_block_size); basic_block_size = 0; } - if matches!(inst.kind, polkavm::program::Instruction::invalid) { - log::debug!(target: LOG_TARGET, "invalid instruction at offset {}", inst.offset); - return Err(>::InvalidInstruction.into()) + match inst.kind { + Instruction::invalid => { + log::debug!(target: LOG_TARGET, "invalid instruction at offset {}", inst.offset); + return Err(>::InvalidInstruction.into()) + }, + Instruction::sbrk(_, _) => { + log::debug!(target: LOG_TARGET, "sbrk instruction is not allowed. offset {}", inst.offset); + return Err(>::InvalidInstruction.into()) + }, + _ => (), } } diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs index f10c4f5fddf8..d87ec7112286 100644 --- a/substrate/frame/revive/src/wasm/mod.rs +++ b/substrate/frame/revive/src/wasm/mod.rs @@ -293,8 +293,15 @@ impl WasmBlob { ) -> Result, ExecError> { let mut config = polkavm::Config::default(); config.set_backend(Some(polkavm::BackendKind::Interpreter)); - let engine = - polkavm::Engine::new(&config).expect("interpreter is available on all plattforms; qed"); + config.set_cache_enabled(false); + #[cfg(feature = "std")] + if std::env::var_os("REVIVE_USE_COMPILER").is_some() { + config.set_backend(Some(polkavm::BackendKind::Compiler)); + } + let engine = polkavm::Engine::new(&config).expect( + "on-chain (no_std) use of interpreter is hard coded. + interpreter is available on all plattforms; qed", + ); let mut module_config = polkavm::ModuleConfig::new(); module_config.set_page_size(limits::PAGE_SIZE); @@ -306,6 +313,15 @@ impl WasmBlob { Error::::CodeRejected })?; + // This is checked at deploy time but we also want to reject pre-existing + // 32bit programs. + // TODO: Remove when we reset the test net. + // https://github.com/paritytech/contract-issues/issues/11 + if !module.is_64_bit() { + log::debug!(target: LOG_TARGET, "32bit programs are not supported."); + Err(Error::::CodeRejected)?; + } + let entry_program_counter = module .exports() .find(|export| export.symbol().as_bytes() == entry_point.identifier().as_bytes()) diff --git a/substrate/frame/revive/src/wasm/runtime.rs b/substrate/frame/revive/src/wasm/runtime.rs index 3e2c83db1ebd..7ea518081e23 100644 --- a/substrate/frame/revive/src/wasm/runtime.rs +++ b/substrate/frame/revive/src/wasm/runtime.rs @@ -27,7 +27,7 @@ use crate::{ Config, Error, LOG_TARGET, SENTINEL, }; use alloc::{boxed::Box, vec, vec::Vec}; -use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; +use codec::{Decode, DecodeLimit, Encode}; use core::{fmt, marker::PhantomData, mem}; use frame_support::{ dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types, @@ -126,34 +126,13 @@ pub trait Memory { /// /// # Note /// - /// There must be an extra benchmark for determining the influence of `len` with - /// regard to the overall weight. + /// Make sure to charge a proportional amount of weight if `len` is not fixed. fn read_as_unbounded(&self, ptr: u32, len: u32) -> Result { let buf = self.read(ptr, len)?; let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut buf.as_ref()) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } - - /// Reads and decodes a type with a size fixed at compile time from contract memory. - /// - /// # Only use on fixed size types - /// - /// Don't use this for types where the encoded size is not fixed but merely bounded. Otherwise - /// this implementation will out of bound access the buffer declared by the guest. Some examples - /// of those bounded but not fixed types: Enums with data, `BoundedVec` or any compact encoded - /// integer. - /// - /// # Note - /// - /// The weight of reading a fixed value is included in the overall weight of any - /// contract callable function. - fn read_as(&self, ptr: u32) -> Result { - let buf = self.read(ptr, D::max_encoded_len() as u32)?; - let decoded = D::decode_with_depth_limit(MAX_DECODE_NESTING, &mut buf.as_ref()) - .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; - Ok(decoded) - } } /// Allows syscalls access to the PolkaVM instance they are executing in. @@ -164,8 +143,8 @@ pub trait Memory { pub trait PolkaVmInstance: Memory { fn gas(&self) -> polkavm::Gas; fn set_gas(&mut self, gas: polkavm::Gas); - fn read_input_regs(&self) -> (u32, u32, u32, u32, u32, u32); - fn write_output(&mut self, output: u32); + fn read_input_regs(&self) -> (u64, u64, u64, u64, u64, u64); + fn write_output(&mut self, output: u64); } // Memory implementation used in benchmarking where guest memory is mapped into the host. @@ -214,7 +193,7 @@ impl PolkaVmInstance for polkavm::RawInstance { self.set_gas(gas) } - fn read_input_regs(&self) -> (u32, u32, u32, u32, u32, u32) { + fn read_input_regs(&self) -> (u64, u64, u64, u64, u64, u64) { ( self.reg(polkavm::Reg::A0), self.reg(polkavm::Reg::A1), @@ -225,7 +204,7 @@ impl PolkaVmInstance for polkavm::RawInstance { ) } - fn write_output(&mut self, output: u32) { + fn write_output(&mut self, output: u64) { self.set_reg(polkavm::Reg::A0, output); } } diff --git a/substrate/frame/revive/uapi/Cargo.toml b/substrate/frame/revive/uapi/Cargo.toml index 0c7461a35d69..b55391dd5d6c 100644 --- a/substrate/frame/revive/uapi/Cargo.toml +++ b/substrate/frame/revive/uapi/Cargo.toml @@ -20,11 +20,11 @@ codec = { features = [ "max-encoded-len", ], optional = true, workspace = true } -[target.'cfg(target_arch = "riscv32")'.dependencies] -polkavm-derive = { version = "0.14.0" } +[target.'cfg(target_arch = "riscv64")'.dependencies] +polkavm-derive = { version = "0.17.0" } [package.metadata.docs.rs] -default-target = ["wasm32-unknown-unknown"] +default-target = ["riscv64imac-unknown-none-elf"] [features] default = ["scale"] diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs index 6b3a8b07f040..d3fd4ac8d03e 100644 --- a/substrate/frame/revive/uapi/src/host.rs +++ b/substrate/frame/revive/uapi/src/host.rs @@ -14,8 +14,8 @@ use crate::{CallFlags, Result, ReturnFlags, StorageFlags}; use paste::paste; -#[cfg(target_arch = "riscv32")] -mod riscv32; +#[cfg(target_arch = "riscv64")] +mod riscv64; macro_rules! hash_fn { ( $name:ident, $bytes:literal ) => { diff --git a/substrate/frame/revive/uapi/src/host/riscv32.rs b/substrate/frame/revive/uapi/src/host/riscv64.rs similarity index 93% rename from substrate/frame/revive/uapi/src/host/riscv32.rs rename to substrate/frame/revive/uapi/src/host/riscv64.rs index e8b27057ed18..3cba14db6a04 100644 --- a/substrate/frame/revive/uapi/src/host/riscv32.rs +++ b/substrate/frame/revive/uapi/src/host/riscv64.rs @@ -26,10 +26,10 @@ mod sys { mod abi {} impl abi::FromHost for ReturnCode { - type Regs = (u32,); + type Regs = (u64,); fn from_host((a0,): Self::Regs) -> Self { - ReturnCode(a0) + ReturnCode(a0 as _) } } @@ -207,33 +207,33 @@ impl HostFn for HostFnImpl { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); let salt_ptr = ptr_or_sentinel(&salt); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { - code_hash: *const u8, + code_hash: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - value: *const u8, - input: *const u8, + deposit_limit: u32, + value: u32, + input: u32, input_len: u32, - address: *const u8, - output: *mut u8, - output_len: *mut u32, - salt: *const u8, + address: u32, + output: u32, + output_len: u32, + salt: u32, } let args = Args { - code_hash: code_hash.as_ptr(), + code_hash: code_hash.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - value: value.as_ptr(), - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + value: value.as_ptr() as _, + input: input.as_ptr() as _, input_len: input.len() as _, - address, - output: output_ptr, - output_len: &mut output_len as *mut _, - salt: salt_ptr, + address: address as _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, + salt: salt_ptr as _, }; let ret_code = { unsafe { sys::instantiate(&args as *const Args as *const _) } }; @@ -257,31 +257,31 @@ impl HostFn for HostFnImpl { ) -> Result { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { flags: u32, - callee: *const u8, + callee: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - value: *const u8, - input: *const u8, + deposit_limit: u32, + value: u32, + input: u32, input_len: u32, - output: *mut u8, - output_len: *mut u32, + output: u32, + output_len: u32, } let args = Args { flags: flags.bits(), - callee: callee.as_ptr(), + callee: callee.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - value: value.as_ptr(), - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + value: value.as_ptr() as _, + input: input.as_ptr() as _, input_len: input.len() as _, - output: output_ptr, - output_len: &mut output_len as *mut _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, }; let ret_code = { unsafe { sys::call(&args as *const Args as *const _) } }; @@ -308,29 +308,29 @@ impl HostFn for HostFnImpl { ) -> Result { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { flags: u32, - address: *const u8, + address: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - input: *const u8, + deposit_limit: u32, + input: u32, input_len: u32, - output: *mut u8, - output_len: *mut u32, + output: u32, + output_len: u32, } let args = Args { flags: flags.bits(), - address: address.as_ptr(), + address: address.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + input: input.as_ptr() as _, input_len: input.len() as _, - output: output_ptr, - output_len: &mut output_len as *mut _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, }; let ret_code = { unsafe { sys::delegate_call(&args as *const Args as *const _) } }; diff --git a/substrate/frame/revive/uapi/src/lib.rs b/substrate/frame/revive/uapi/src/lib.rs index e660ce36ef75..91c2543bb719 100644 --- a/substrate/frame/revive/uapi/src/lib.rs +++ b/substrate/frame/revive/uapi/src/lib.rs @@ -65,6 +65,12 @@ impl From for u32 { } } +impl From for u64 { + fn from(error: ReturnErrorCode) -> Self { + u32::from(error).into() + } +} + define_error_codes! { /// The called function trapped and has its state changes reverted. /// In this case no output buffer is returned. From 1e89a311471eba937a9552d7d1f55af1661feb08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 29 Nov 2024 14:09:49 +0100 Subject: [PATCH 05/29] Fix runtime api impl detection by construct runtime (#6665) Construct runtime uses autoref-based specialization to fetch the metadata about the implemented runtime apis. This is done to not fail to compile when there are no runtime apis implemented. However, there was an issue with detecting runtime apis when they were implemented in a different file. The problem is solved by moving the trait implemented by `impl_runtime_apis!` to the metadata ir crate. Closes: https://github.com/paritytech/polkadot-sdk/issues/6659 --------- Co-authored-by: GitHub Action --- Cargo.lock | 1 + prdoc/pr_6665.prdoc | 15 ++++++ .../src/construct_runtime/expand/metadata.rs | 2 + .../procedural/src/construct_runtime/mod.rs | 3 +- .../support/test/tests/runtime_metadata.rs | 49 ++++++++++--------- .../api/proc-macro/src/runtime_metadata.rs | 6 +-- substrate/primitives/api/test/Cargo.toml | 3 +- .../api/test/tests/decl_and_impl.rs | 2 + substrate/primitives/metadata-ir/src/lib.rs | 10 ++++ 9 files changed, 62 insertions(+), 29 deletions(-) create mode 100644 prdoc/pr_6665.prdoc diff --git a/Cargo.lock b/Cargo.lock index e1abeea49283..5e4e9c267b08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -25548,6 +25548,7 @@ dependencies = [ "sp-api 26.0.0", "sp-consensus", "sp-core 28.0.0", + "sp-metadata-ir 0.6.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-tracing 16.0.0", diff --git a/prdoc/pr_6665.prdoc b/prdoc/pr_6665.prdoc new file mode 100644 index 000000000000..b5aaf8a3b184 --- /dev/null +++ b/prdoc/pr_6665.prdoc @@ -0,0 +1,15 @@ +title: Fix runtime api impl detection by construct runtime +doc: +- audience: Runtime Dev + description: |- + Construct runtime uses autoref-based specialization to fetch the metadata about the implemented runtime apis. This is done to not fail to compile when there are no runtime apis implemented. However, there was an issue with detecting runtime apis when they were implemented in a different file. The problem is solved by moving the trait implemented by `impl_runtime_apis!` to the metadata ir crate. + + + Closes: https://github.com/paritytech/polkadot-sdk/issues/6659 +crates: +- name: frame-support-procedural + bump: patch +- name: sp-api-proc-macro + bump: patch +- name: sp-metadata-ir + bump: patch diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index 4590a3a7f490..0b3bd5168865 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -113,6 +113,8 @@ pub fn expand_runtime_metadata( <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Extension >(); + use #scrate::__private::metadata_ir::InternalImplRuntimeApis; + #scrate::__private::metadata_ir::MetadataIR { pallets: #scrate::__private::vec![ #(#pallets),* ], extrinsic: #scrate::__private::metadata_ir::ExtrinsicMetadataIR { diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 17042c248780..087faf37252d 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -466,7 +466,6 @@ fn construct_runtime_final_expansion( // Therefore, the `Deref` trait will resolve the `runtime_metadata` from `impl_runtime_apis!` // when both macros are called; and will resolve an empty `runtime_metadata` when only the `construct_runtime!` // is called. - #[doc(hidden)] trait InternalConstructRuntime { #[inline(always)] @@ -477,6 +476,8 @@ fn construct_runtime_final_expansion( #[doc(hidden)] impl InternalConstructRuntime for &#name {} + use #scrate::__private::metadata_ir::InternalImplRuntimeApis; + #outer_event #outer_error diff --git a/substrate/frame/support/test/tests/runtime_metadata.rs b/substrate/frame/support/test/tests/runtime_metadata.rs index 7523a415d458..a098643abb91 100644 --- a/substrate/frame/support/test/tests/runtime_metadata.rs +++ b/substrate/frame/support/test/tests/runtime_metadata.rs @@ -80,34 +80,39 @@ sp_api::decl_runtime_apis! { } } -sp_api::impl_runtime_apis! { - impl self::Api for Runtime { - fn test(_data: u64) { - unimplemented!() - } +// Module to emulate having the implementation in a different file. +mod apis { + use super::{Block, BlockT, Runtime}; - fn something_with_block(_: Block) -> Block { - unimplemented!() - } + sp_api::impl_runtime_apis! { + impl crate::Api for Runtime { + fn test(_data: u64) { + unimplemented!() + } - fn function_with_two_args(_: u64, _: Block) { - unimplemented!() - } + fn something_with_block(_: Block) -> Block { + unimplemented!() + } - fn same_name() {} + fn function_with_two_args(_: u64, _: Block) { + unimplemented!() + } - fn wild_card(_: u32) {} - } + fn same_name() {} - impl sp_api::Core for Runtime { - fn version() -> sp_version::RuntimeVersion { - unimplemented!() - } - fn execute_block(_: Block) { - unimplemented!() + fn wild_card(_: u32) {} } - fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { - unimplemented!() + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { + unimplemented!() + } } } } diff --git a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs index 6be396339259..1706f8ca6fbb 100644 --- a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs +++ b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs @@ -298,18 +298,14 @@ pub fn generate_impl_runtime_metadata(impls: &[ItemImpl]) -> Result #crate_::vec::Vec<#crate_::metadata_ir::RuntimeApiMetadataIR> { #crate_::vec![ #( #metadata, )* ] } } - #[doc(hidden)] - impl InternalImplRuntimeApis for #runtime_name {} } )) } diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index 1d21f23eb804..27f6dafa24bf 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -21,6 +21,7 @@ sp-version = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-metadata-ir = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } codec = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } @@ -40,5 +41,5 @@ name = "bench" harness = false [features] -"enable-staging-api" = [] +enable-staging-api = [] disable-ui-tests = [] diff --git a/substrate/primitives/api/test/tests/decl_and_impl.rs b/substrate/primitives/api/test/tests/decl_and_impl.rs index 890cf6eccdbc..2e5a078cb382 100644 --- a/substrate/primitives/api/test/tests/decl_and_impl.rs +++ b/substrate/primitives/api/test/tests/decl_and_impl.rs @@ -309,6 +309,8 @@ fn mock_runtime_api_works_with_advanced() { #[test] fn runtime_api_metadata_matches_version_implemented() { + use sp_metadata_ir::InternalImplRuntimeApis; + let rt = Runtime {}; let runtime_metadata = rt.runtime_metadata(); diff --git a/substrate/primitives/metadata-ir/src/lib.rs b/substrate/primitives/metadata-ir/src/lib.rs index bf234432a1a6..dc01f7eaadb3 100644 --- a/substrate/primitives/metadata-ir/src/lib.rs +++ b/substrate/primitives/metadata-ir/src/lib.rs @@ -87,6 +87,16 @@ pub fn into_unstable(metadata: MetadataIR) -> RuntimeMetadataPrefixed { latest.into() } +/// INTERNAL USE ONLY +/// +/// Special trait that is used together with `InternalConstructRuntime` by `construct_runtime!` to +/// fetch the runtime api metadata without exploding when there is no runtime api implementation +/// available. +#[doc(hidden)] +pub trait InternalImplRuntimeApis { + fn runtime_metadata(&self) -> alloc::vec::Vec; +} + #[cfg(test)] mod test { use super::*; From 4e7c968ae97c66812df989117ad251cba3864632 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Fri, 29 Nov 2024 16:49:45 +0200 Subject: [PATCH 06/29] archive: Refactor `archive_storage` method into subscription (#6483) This PR adapts the `archive_storage` implementation from a method to a subscription. This keeps the archive APIs uniform and consistent. Builds on: https://github.com/paritytech/polkadot-sdk/pull/5997 cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile Co-authored-by: James Wilson --- .../client/rpc-spec-v2/src/archive/api.rs | 13 +- .../client/rpc-spec-v2/src/archive/archive.rs | 202 +++---- .../src/archive/archive_storage.rs | 105 +--- .../client/rpc-spec-v2/src/archive/mod.rs | 2 +- .../client/rpc-spec-v2/src/archive/tests.rs | 500 +++++++----------- .../rpc-spec-v2/src/chain_head/event.rs | 3 +- .../client/rpc-spec-v2/src/common/events.rs | 59 ++- .../client/rpc-spec-v2/src/common/storage.rs | 151 ++++-- substrate/client/service/src/builder.rs | 2 - 9 files changed, 458 insertions(+), 579 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/archive/api.rs b/substrate/client/rpc-spec-v2/src/archive/api.rs index dcfeaecb147b..a205d0502c93 100644 --- a/substrate/client/rpc-spec-v2/src/archive/api.rs +++ b/substrate/client/rpc-spec-v2/src/archive/api.rs @@ -20,8 +20,7 @@ use crate::{ common::events::{ - ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageResult, - PaginatedStorageQuery, + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, }, MethodResult, }; @@ -100,13 +99,17 @@ pub trait ArchiveApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "archive_unstable_storage", blocking)] + #[subscription( + name = "archive_unstable_storage" => "archive_unstable_storageEvent", + unsubscribe = "archive_unstable_stopStorage", + item = ArchiveStorageEvent, + )] fn archive_unstable_storage( &self, hash: Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ) -> RpcResult; + ); /// Returns the storage difference between two blocks. /// diff --git a/substrate/client/rpc-spec-v2/src/archive/archive.rs b/substrate/client/rpc-spec-v2/src/archive/archive.rs index 55054d91d85d..62e44a016241 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive.rs @@ -20,13 +20,13 @@ use crate::{ archive::{ - archive_storage::{ArchiveStorage, ArchiveStorageDiff}, - error::Error as ArchiveError, - ArchiveApiServer, + archive_storage::ArchiveStorageDiff, error::Error as ArchiveError, ArchiveApiServer, }, - common::events::{ - ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageResult, - PaginatedStorageQuery, + common::{ + events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, + }, + storage::{QueryResult, StorageSubscriptionClient}, }, hex_string, MethodResult, SubscriptionTaskExecutor, }; @@ -57,42 +57,12 @@ use tokio::sync::mpsc; pub(crate) const LOG_TARGET: &str = "rpc-spec-v2::archive"; -/// The configuration of [`Archive`]. -pub struct ArchiveConfig { - /// The maximum number of items the `archive_storage` can return for a descendant query before - /// pagination is required. - pub max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - pub max_queried_items: usize, -} - -/// The maximum number of items the `archive_storage` can return for a descendant query before -/// pagination is required. -/// -/// Note: this is identical to the `chainHead` value. -const MAX_DESCENDANT_RESPONSES: usize = 5; - -/// The maximum number of queried items allowed for the `archive_storage` at a time. -/// -/// Note: A queried item can also be a descendant query which can return up to -/// `MAX_DESCENDANT_RESPONSES`. -const MAX_QUERIED_ITEMS: usize = 8; - /// The buffer capacity for each storage query. /// /// This is small because the underlying JSON-RPC server has /// its down buffer capacity per connection as well. const STORAGE_QUERY_BUF: usize = 16; -impl Default for ArchiveConfig { - fn default() -> Self { - Self { - max_descendant_responses: MAX_DESCENDANT_RESPONSES, - max_queried_items: MAX_QUERIED_ITEMS, - } - } -} - /// An API for archive RPC calls. pub struct Archive, Block: BlockT, Client> { /// Substrate client. @@ -103,11 +73,6 @@ pub struct Archive, Block: BlockT, Client> { executor: SubscriptionTaskExecutor, /// The hexadecimal encoded hash of the genesis block. genesis_hash: String, - /// The maximum number of items the `archive_storage` can return for a descendant query before - /// pagination is required. - storage_max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - storage_max_queried_items: usize, /// Phantom member to pin the block type. _phantom: PhantomData, } @@ -119,18 +84,9 @@ impl, Block: BlockT, Client> Archive { backend: Arc, genesis_hash: GenesisHash, executor: SubscriptionTaskExecutor, - config: ArchiveConfig, ) -> Self { let genesis_hash = hex_string(&genesis_hash.as_ref()); - Self { - client, - backend, - executor, - genesis_hash, - storage_max_descendant_responses: config.max_descendant_responses, - storage_max_queried_items: config.max_queried_items, - _phantom: PhantomData, - } + Self { client, backend, executor, genesis_hash, _phantom: PhantomData } } } @@ -260,47 +216,53 @@ where fn archive_unstable_storage( &self, + pending: PendingSubscriptionSink, hash: Block::Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ) -> RpcResult { - let items = items - .into_iter() - .map(|query| { - let key = StorageKey(parse_hex_param(query.key)?); - let pagination_start_key = query - .pagination_start_key - .map(|key| parse_hex_param(key).map(|key| StorageKey(key))) - .transpose()?; - - // Paginated start key is only supported - if pagination_start_key.is_some() && !query.query_type.is_descendant_query() { - return Err(ArchiveError::InvalidParam( - "Pagination start key is only supported for descendants queries" - .to_string(), - )) - } + ) { + let mut storage_client = + StorageSubscriptionClient::::new(self.client.clone()); + + let fut = async move { + let Ok(mut sink) = pending.accept().await.map(Subscription::from) else { return }; - Ok(PaginatedStorageQuery { - key, - query_type: query.query_type, - pagination_start_key, + let items = match items + .into_iter() + .map(|query| { + let key = StorageKey(parse_hex_param(query.key)?); + Ok(StorageQuery { key, query_type: query.query_type }) }) - }) - .collect::, ArchiveError>>()?; + .collect::, ArchiveError>>() + { + Ok(items) => items, + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + return + }, + }; - let child_trie = child_trie - .map(|child_trie| parse_hex_param(child_trie)) - .transpose()? - .map(ChildInfo::new_default_from_vec); + let child_trie = child_trie.map(|child_trie| parse_hex_param(child_trie)).transpose(); + let child_trie = match child_trie { + Ok(child_trie) => child_trie.map(ChildInfo::new_default_from_vec), + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + return + }, + }; - let storage_client = ArchiveStorage::new( - self.client.clone(), - self.storage_max_descendant_responses, - self.storage_max_queried_items, - ); + let (tx, mut rx) = tokio::sync::mpsc::channel(STORAGE_QUERY_BUF); + let storage_fut = storage_client.generate_events(hash, items, child_trie, tx); - Ok(storage_client.handle_query(hash, items, child_trie)) + // We don't care about the return value of this join: + // - process_events might encounter an error (if the client disconnected) + // - storage_fut might encounter an error while processing a trie queries and + // the error is propagated via the sink. + let _ = futures::future::join(storage_fut, process_storage_events(&mut rx, &mut sink)) + .await; + }; + + self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); } fn archive_unstable_storage_diff( @@ -337,24 +299,74 @@ where // - process_events might encounter an error (if the client disconnected) // - storage_fut might encounter an error while processing a trie queries and // the error is propagated via the sink. - let _ = futures::future::join(storage_fut, process_events(&mut rx, &mut sink)).await; + let _ = + futures::future::join(storage_fut, process_storage_diff_events(&mut rx, &mut sink)) + .await; }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); } } -/// Sends all the events to the sink. -async fn process_events(rx: &mut mpsc::Receiver, sink: &mut Subscription) { - while let Some(event) = rx.recv().await { - if event.is_done() { - log::debug!(target: LOG_TARGET, "Finished processing partial trie query"); - } else if event.is_err() { - log::debug!(target: LOG_TARGET, "Error encountered while processing partial trie query"); +/// Sends all the events of the storage_diff method to the sink. +async fn process_storage_diff_events( + rx: &mut mpsc::Receiver, + sink: &mut Subscription, +) { + loop { + tokio::select! { + _ = sink.closed() => { + return + }, + + maybe_event = rx.recv() => { + let Some(event) = maybe_event else { + break; + }; + + if event.is_done() { + log::debug!(target: LOG_TARGET, "Finished processing partial trie query"); + } else if event.is_err() { + log::debug!(target: LOG_TARGET, "Error encountered while processing partial trie query"); + } + + if sink.send(&event).await.is_err() { + return + } + } } + } +} + +/// Sends all the events of the storage method to the sink. +async fn process_storage_events(rx: &mut mpsc::Receiver, sink: &mut Subscription) { + loop { + tokio::select! { + _ = sink.closed() => { + break + } + + maybe_storage = rx.recv() => { + let Some(event) = maybe_storage else { + break; + }; + + match event { + Ok(None) => continue, + + Ok(Some(event)) => + if sink.send(&ArchiveStorageEvent::result(event)).await.is_err() { + return + }, - if sink.send(&event).await.is_err() { - return + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error)).await; + return + } + } + } } } + + let _ = sink.send(&ArchiveStorageEvent::StorageDone).await; } diff --git a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs index 5a3920882f00..390db765a48f 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs @@ -33,114 +33,13 @@ use crate::{ common::{ events::{ ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, - ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageResult, - PaginatedStorageQuery, StorageQueryType, StorageResult, + ArchiveStorageDiffResult, ArchiveStorageDiffType, StorageResult, }, - storage::{IterQueryType, QueryIter, Storage}, + storage::Storage, }, }; use tokio::sync::mpsc; -/// Generates the events of the `archive_storage` method. -pub struct ArchiveStorage { - /// Storage client. - client: Storage, - /// The maximum number of responses the API can return for a descendant query at a time. - storage_max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - storage_max_queried_items: usize, -} - -impl ArchiveStorage { - /// Constructs a new [`ArchiveStorage`]. - pub fn new( - client: Arc, - storage_max_descendant_responses: usize, - storage_max_queried_items: usize, - ) -> Self { - Self { - client: Storage::new(client), - storage_max_descendant_responses, - storage_max_queried_items, - } - } -} - -impl ArchiveStorage -where - Block: BlockT + 'static, - BE: Backend + 'static, - Client: StorageProvider + 'static, -{ - /// Generate the response of the `archive_storage` method. - pub fn handle_query( - &self, - hash: Block::Hash, - mut items: Vec>, - child_key: Option, - ) -> ArchiveStorageResult { - let discarded_items = items.len().saturating_sub(self.storage_max_queried_items); - items.truncate(self.storage_max_queried_items); - - let mut storage_results = Vec::with_capacity(items.len()); - for item in items { - match item.query_type { - StorageQueryType::Value => { - match self.client.query_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - } - }, - StorageQueryType::Hash => - match self.client.query_hash(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - }, - StorageQueryType::ClosestDescendantMerkleValue => - match self.client.query_merkle_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - }, - StorageQueryType::DescendantsValues => { - match self.client.query_iter_pagination( - QueryIter { - query_key: item.key, - ty: IterQueryType::Value, - pagination_start_key: item.pagination_start_key, - }, - hash, - child_key.as_ref(), - self.storage_max_descendant_responses, - ) { - Ok((results, _)) => storage_results.extend(results), - Err(error) => return ArchiveStorageResult::err(error), - } - }, - StorageQueryType::DescendantsHashes => { - match self.client.query_iter_pagination( - QueryIter { - query_key: item.key, - ty: IterQueryType::Hash, - pagination_start_key: item.pagination_start_key, - }, - hash, - child_key.as_ref(), - self.storage_max_descendant_responses, - ) { - Ok((results, _)) => storage_results.extend(results), - Err(error) => return ArchiveStorageResult::err(error), - } - }, - }; - } - - ArchiveStorageResult::ok(storage_results, discarded_items) - } -} - /// Parse hex-encoded string parameter as raw bytes. /// /// If the parsing fails, returns an error propagated to the RPC method. diff --git a/substrate/client/rpc-spec-v2/src/archive/mod.rs b/substrate/client/rpc-spec-v2/src/archive/mod.rs index 5f020c203eab..14fa104c113a 100644 --- a/substrate/client/rpc-spec-v2/src/archive/mod.rs +++ b/substrate/client/rpc-spec-v2/src/archive/mod.rs @@ -32,4 +32,4 @@ pub mod archive; pub mod error; pub use api::ArchiveApiServer; -pub use archive::{Archive, ArchiveConfig}; +pub use archive::Archive; diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index 994c5d28bd61..cddaafde6659 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -19,16 +19,13 @@ use crate::{ common::events::{ ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, - ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageMethodOk, - ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType, StorageResultType, + ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageEvent, StorageQuery, + StorageQueryType, StorageResult, StorageResultType, }, hex_string, MethodResult, }; -use super::{ - archive::{Archive, ArchiveConfig}, - *, -}; +use super::{archive::Archive, *}; use assert_matches::assert_matches; use codec::{Decode, Encode}; @@ -55,8 +52,6 @@ use substrate_test_runtime_client::{ const CHAIN_GENESIS: [u8; 32] = [0; 32]; const INVALID_HASH: [u8; 32] = [1; 32]; -const MAX_PAGINATION_LIMIT: usize = 5; -const MAX_QUERIED_LIMIT: usize = 5; const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; const CHILD_STORAGE_KEY: &[u8] = b"child"; @@ -65,10 +60,7 @@ const CHILD_VALUE: &[u8] = b"child value"; type Header = substrate_test_runtime_client::runtime::Header; type Block = substrate_test_runtime_client::runtime::Block; -fn setup_api( - max_descendant_responses: usize, - max_queried_items: usize, -) -> (Arc>, RpcModule>>) { +fn setup_api() -> (Arc>, RpcModule>>) { let child_info = ChildInfo::new_default(CHILD_STORAGE_KEY); let builder = TestClientBuilder::new().add_extra_child_storage( &child_info, @@ -83,7 +75,6 @@ fn setup_api( backend, CHAIN_GENESIS, Arc::new(TokioTestExecutor::default()), - ArchiveConfig { max_descendant_responses, max_queried_items }, ) .into_rpc(); @@ -101,7 +92,7 @@ async fn get_next_event(sub: &mut RpcSubscriptio #[tokio::test] async fn archive_genesis() { - let (_client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (_client, api) = setup_api(); let genesis: String = api.call("archive_unstable_genesisHash", EmptyParams::new()).await.unwrap(); @@ -110,7 +101,7 @@ async fn archive_genesis() { #[tokio::test] async fn archive_body() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -144,7 +135,7 @@ async fn archive_body() { #[tokio::test] async fn archive_header() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -178,7 +169,7 @@ async fn archive_header() { #[tokio::test] async fn archive_finalized_height() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let client_height: u32 = client.info().finalized_number.saturated_into(); @@ -190,7 +181,7 @@ async fn archive_finalized_height() { #[tokio::test] async fn archive_hash_by_height() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Genesis height. let hashes: Vec = api.call("archive_unstable_hashByHeight", [0]).await.unwrap(); @@ -296,7 +287,7 @@ async fn archive_hash_by_height() { #[tokio::test] async fn archive_call() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let invalid_hash = hex_string(&INVALID_HASH); // Invalid parameter (non-hex). @@ -355,7 +346,7 @@ async fn archive_call() { #[tokio::test] async fn archive_storage_hashes_values() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let block = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -369,42 +360,23 @@ async fn archive_storage_hashes_values() { let block_hash = format!("{:?}", block.header.hash()); let key = hex_string(&KEY); - let items: Vec> = vec![ - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::DescendantsHashes, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::Value, - pagination_start_key: None, - }, + let items: Vec> = vec![ + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }, ]; - let result: ArchiveStorageResult = api - .call("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) + let mut sub = api + .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - // Key has not been imported yet. - assert_eq!(result.len(), 0); - assert_eq!(discarded_items, 0); - }, - _ => panic!("Unexpected result"), - }; + // Key has not been imported yet. + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone, + ); // Import a block with the given key value pair. let mut builder = BlockBuilderBuilder::new(&*client) @@ -420,32 +392,103 @@ async fn archive_storage_hashes_values() { let expected_hash = format!("{:?}", Blake2Hasher::hash(&VALUE)); let expected_value = hex_string(&VALUE); - let result: ArchiveStorageResult = api - .call("archive_unstable_storage", rpc_params![&block_hash, items]) + let mut sub = api + .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items]) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 4); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, key); - assert_eq!(result[0].result, StorageResultType::Hash(expected_hash.clone())); - assert_eq!(result[1].key, key); - assert_eq!(result[1].result, StorageResultType::Value(expected_value.clone())); - assert_eq!(result[2].key, key); - assert_eq!(result[2].result, StorageResultType::Hash(expected_hash)); - assert_eq!(result[3].key, key); - assert_eq!(result[3].result, StorageResultType::Value(expected_value)); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash.clone()), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value.clone()), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value), + child_trie_key: None, + }), + ); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); +} + +#[tokio::test] +async fn archive_storage_hashes_values_child_trie() { + let (client, api) = setup_api(); + + // Get child storage values set in `setup_api`. + let child_info = hex_string(&CHILD_STORAGE_KEY); + let key = hex_string(&KEY); + let genesis_hash = format!("{:?}", client.genesis_hash()); + let expected_hash = format!("{:?}", Blake2Hasher::hash(&CHILD_VALUE)); + let expected_value = hex_string(&CHILD_VALUE); + + let items: Vec> = vec![ + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + ]; + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storage", + rpc_params![&genesis_hash, items, &child_info], + ) + .await + .unwrap(); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash.clone()), + child_trie_key: Some(child_info.clone()), + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value.clone()), + child_trie_key: Some(child_info.clone()), + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone, + ); } #[tokio::test] async fn archive_storage_closest_merkle_value() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); /// The core of this test. /// @@ -457,55 +500,47 @@ async fn archive_storage_closest_merkle_value() { api: &RpcModule>>, block_hash: String, ) -> HashMap { - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &block_hash, vec![ - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAB"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Key with descendant. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":A"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Keys below this comment do not produce a result. // Key that exceed the keyspace of the trie. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAABX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Key that are not part of the trie. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, ] ], @@ -513,19 +548,21 @@ async fn archive_storage_closest_merkle_value() { .await .unwrap(); - let merkle_values: HashMap<_, _> = match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, .. }) => result - .into_iter() - .map(|res| { - let value = match res.result { + let mut merkle_values = HashMap::new(); + loop { + let event = get_next_event::(&mut sub).await; + match event { + ArchiveStorageEvent::Storage(result) => { + let str_result = match result.result { StorageResultType::ClosestDescendantMerkleValue(value) => value, - _ => panic!("Unexpected StorageResultType"), + _ => panic!("Unexpected result type"), }; - (res.key, value) - }) - .collect(), - _ => panic!("Unexpected result"), - }; + merkle_values.insert(result.key, str_result); + }, + ArchiveStorageEvent::StorageError(err) => panic!("Unexpected error {err:?}"), + ArchiveStorageEvent::StorageDone => break, + } + } // Response for AAAA, AAAB, A and AA. assert_eq!(merkle_values.len(), 4); @@ -604,9 +641,9 @@ async fn archive_storage_closest_merkle_value() { } #[tokio::test] -async fn archive_storage_paginate_iterations() { +async fn archive_storage_iterations() { // 1 iteration allowed before pagination kicks in. - let (client, api) = setup_api(1, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Import a new block with storage changes. let mut builder = BlockBuilderBuilder::new(&*client) @@ -625,237 +662,94 @@ async fn archive_storage_paginate_iterations() { // Calling with an invalid hash. let invalid_hash = hex_string(&INVALID_HASH); - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &invalid_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Err(_) => (), - _ => panic!("Unexpected result"), - }; - - // Valid call with storage at the key. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":m")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); - }, - _ => panic!("Unexpected result"), - }; - - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":m")), - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":mo")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"ab"))); - }, - _ => panic!("Unexpected result"), - }; - - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { + vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":mo")), }] ], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":moD")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcmoD"))); - }, - _ => panic!("Unexpected result"), - }; - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":moD")), - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":moc")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abc"))); - }, - _ => panic!("Unexpected result"), - }; + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageError(_) + ); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( + // Valid call with storage at the key. + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &block_hash, - vec![PaginatedStorageQuery { + vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":moc")), }] ], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - assert_eq!(result[0].key, hex_string(b":mock")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcd"))); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":m"), + result: StorageResultType::Value(hex_string(b"a")), + child_trie_key: None, + }) + ); - // Continue with pagination until no keys are returned. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":mock")), - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 0); - assert_eq!(discarded_items, 0); - }, - _ => panic!("Unexpected result"), - }; -} + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":mo"), + result: StorageResultType::Value(hex_string(b"ab")), + child_trie_key: None, + }) + ); -#[tokio::test] -async fn archive_storage_discarded_items() { - // One query at a time - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, 1); + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":moD"), + result: StorageResultType::Value(hex_string(b"abcmoD")), + child_trie_key: None, + }) + ); - // Import a new block with storage changes. - let mut builder = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap(); - builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap(); - let block = builder.build().unwrap().block; - let block_hash = format!("{:?}", block.header.hash()); - client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":moc"), + result: StorageResultType::Value(hex_string(b"abc")), + child_trie_key: None, + }) + ); - // Valid call with storage at the key. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![ - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Value, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - } - ] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 2); + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":mock"), + result: StorageResultType::Value(hex_string(b"abcd")), + child_trie_key: None, + }) + ); - assert_eq!(result[0].key, hex_string(b":m")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); - }, - _ => panic!("Unexpected result"), - }; + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); } #[tokio::test] async fn archive_storage_diff_main_trie() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let mut builder = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -965,7 +859,7 @@ async fn archive_storage_diff_main_trie() { #[tokio::test] async fn archive_storage_diff_no_changes() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Build 2 identical blocks. let mut builder = BlockBuilderBuilder::new(&*client) @@ -1012,7 +906,7 @@ async fn archive_storage_diff_no_changes() { #[tokio::test] async fn archive_storage_diff_deleted_changes() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Blocks are imported as forks. let mut builder = BlockBuilderBuilder::new(&*client) @@ -1079,7 +973,7 @@ async fn archive_storage_diff_deleted_changes() { #[tokio::test] async fn archive_storage_diff_invalid_params() { let invalid_hash = hex_string(&INVALID_HASH); - let (_, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (_, api) = setup_api(); // Invalid shape for parameters. let items: Vec> = Vec::new(); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/event.rs b/substrate/client/rpc-spec-v2/src/chain_head/event.rs index bd9863060910..de74145a3f08 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/event.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/event.rs @@ -235,7 +235,7 @@ pub struct OperationCallDone { pub output: String, } -/// The response of the `chainHead_call` method. +/// The response of the `chainHead_storage` method. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct OperationStorageItems { @@ -536,6 +536,7 @@ mod tests { items: vec![StorageResult { key: "0x1".into(), result: StorageResultType::Value("0x123".to_string()), + child_trie_key: None, }], }); diff --git a/substrate/client/rpc-spec-v2/src/common/events.rs b/substrate/client/rpc-spec-v2/src/common/events.rs index 198a60bf4cac..44f722c0c61b 100644 --- a/substrate/client/rpc-spec-v2/src/common/events.rs +++ b/substrate/client/rpc-spec-v2/src/common/events.rs @@ -78,6 +78,10 @@ pub struct StorageResult { /// The result of the query. #[serde(flatten)] pub result: StorageResultType, + /// The child trie key if provided. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub child_trie_key: Option, } /// The type of the storage query. @@ -105,23 +109,41 @@ pub struct StorageResultErr { /// The result of a storage call. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(untagged)] -pub enum ArchiveStorageResult { +#[serde(rename_all = "camelCase")] +#[serde(tag = "event")] +pub enum ArchiveStorageEvent { /// Query generated a result. - Ok(ArchiveStorageMethodOk), + Storage(StorageResult), /// Query encountered an error. - Err(ArchiveStorageMethodErr), + StorageError(ArchiveStorageMethodErr), + /// Operation storage is done. + StorageDone, } -impl ArchiveStorageResult { - /// Create a new `ArchiveStorageResult::Ok` result. - pub fn ok(result: Vec, discarded_items: usize) -> Self { - Self::Ok(ArchiveStorageMethodOk { result, discarded_items }) +impl ArchiveStorageEvent { + /// Create a new `ArchiveStorageEvent::StorageErr` event. + pub fn err(error: String) -> Self { + Self::StorageError(ArchiveStorageMethodErr { error }) } - /// Create a new `ArchiveStorageResult::Err` result. - pub fn err(error: String) -> Self { - Self::Err(ArchiveStorageMethodErr { error }) + /// Create a new `ArchiveStorageEvent::StorageResult` event. + pub fn result(result: StorageResult) -> Self { + Self::Storage(result) + } + + /// Checks if the event is a `StorageDone` event. + pub fn is_done(&self) -> bool { + matches!(self, Self::StorageDone) + } + + /// Checks if the event is a `StorageErr` event. + pub fn is_err(&self) -> bool { + matches!(self, Self::StorageError(_)) + } + + /// Checks if the event is a `StorageResult` event. + pub fn is_result(&self) -> bool { + matches!(self, Self::Storage(_)) } } @@ -354,8 +376,11 @@ mod tests { #[test] fn storage_result() { // Item with Value. - let item = - StorageResult { key: "0x1".into(), result: StorageResultType::Value("res".into()) }; + let item = StorageResult { + key: "0x1".into(), + result: StorageResultType::Value("res".into()), + child_trie_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","value":"res"}"#; @@ -365,8 +390,11 @@ mod tests { assert_eq!(dec, item); // Item with Hash. - let item = - StorageResult { key: "0x1".into(), result: StorageResultType::Hash("res".into()) }; + let item = StorageResult { + key: "0x1".into(), + result: StorageResultType::Hash("res".into()), + child_trie_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","hash":"res"}"#; @@ -379,6 +407,7 @@ mod tests { let item = StorageResult { key: "0x1".into(), result: StorageResultType::ClosestDescendantMerkleValue("res".into()), + child_trie_key: None, }; // Encode let ser = serde_json::to_string(&item).unwrap(); diff --git a/substrate/client/rpc-spec-v2/src/common/storage.rs b/substrate/client/rpc-spec-v2/src/common/storage.rs index 673e20b2bc78..a1e34d51530e 100644 --- a/substrate/client/rpc-spec-v2/src/common/storage.rs +++ b/substrate/client/rpc-spec-v2/src/common/storage.rs @@ -24,7 +24,7 @@ use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; use sp_runtime::traits::Block as BlockT; use tokio::sync::mpsc; -use super::events::{StorageResult, StorageResultType}; +use super::events::{StorageQuery, StorageQueryType, StorageResult, StorageResultType}; use crate::hex_string; /// Call into the storage of blocks. @@ -70,9 +70,6 @@ pub enum IterQueryType { /// The result of making a query call. pub type QueryResult = Result, String>; -/// The result of iterating over keys. -pub type QueryIterResult = Result<(Vec, Option), String>; - impl Storage where Block: BlockT + 'static, @@ -97,6 +94,7 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Value(hex_string(&storage_data.0)), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -120,6 +118,7 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Hash(hex_string(&storage_data.as_ref())), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -149,6 +148,7 @@ where StorageResult { key: hex_string(&key.0), result: StorageResultType::ClosestDescendantMerkleValue(result), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), } })) }) @@ -199,56 +199,6 @@ where } } - /// Iterate over at most the provided number of keys. - /// - /// Returns the storage result with a potential next key to resume iteration. - pub fn query_iter_pagination( - &self, - query: QueryIter, - hash: Block::Hash, - child_key: Option<&ChildInfo>, - count: usize, - ) -> QueryIterResult { - let QueryIter { ty, query_key, pagination_start_key } = query; - - let mut keys_iter = if let Some(child_key) = child_key { - self.client.child_storage_keys( - hash, - child_key.to_owned(), - Some(&query_key), - pagination_start_key.as_ref(), - ) - } else { - self.client.storage_keys(hash, Some(&query_key), pagination_start_key.as_ref()) - } - .map_err(|err| err.to_string())?; - - let mut ret = Vec::with_capacity(count); - let mut next_pagination_key = None; - for _ in 0..count { - let Some(key) = keys_iter.next() else { break }; - - next_pagination_key = Some(key.clone()); - - let result = match ty { - IterQueryType::Value => self.query_value(hash, &key, child_key), - IterQueryType::Hash => self.query_hash(hash, &key, child_key), - }?; - - if let Some(value) = result { - ret.push(value); - } - } - - // Save the next key if any to continue the iteration. - let maybe_next_query = keys_iter.next().map(|_| QueryIter { - ty, - query_key, - pagination_start_key: next_pagination_key, - }); - Ok((ret, maybe_next_query)) - } - /// Raw iterator over the keys. pub fn raw_keys_iter( &self, @@ -264,3 +214,96 @@ where keys_iter.map_err(|err| err.to_string()) } } + +/// Generates storage events for `chainHead_storage` and `archive_storage` subscriptions. +pub struct StorageSubscriptionClient { + /// Storage client. + client: Storage, + _phandom: PhantomData<(BE, Block)>, +} + +impl Clone for StorageSubscriptionClient { + fn clone(&self) -> Self { + Self { client: self.client.clone(), _phandom: PhantomData } + } +} + +impl StorageSubscriptionClient { + /// Constructs a new [`StorageSubscriptionClient`]. + pub fn new(client: Arc) -> Self { + Self { client: Storage::new(client), _phandom: PhantomData } + } +} + +impl StorageSubscriptionClient +where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: StorageProvider + Send + Sync + 'static, +{ + /// Generate storage events to the provided sender. + pub async fn generate_events( + &mut self, + hash: Block::Hash, + items: Vec>, + child_key: Option, + tx: mpsc::Sender, + ) -> Result<(), tokio::task::JoinError> { + let this = self.clone(); + + tokio::task::spawn_blocking(move || { + for item in items { + match item.query_type { + StorageQueryType::Value => { + let rp = this.client.query_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::Hash => { + let rp = this.client.query_hash(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::ClosestDescendantMerkleValue => { + let rp = + this.client.query_merkle_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::DescendantsValues => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Value, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + StorageQueryType::DescendantsHashes => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Hash, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + } + } + }) + .await?; + + Ok(()) + } +} diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 027a444012af..a47a05c0a190 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -756,8 +756,6 @@ where backend.clone(), genesis_hash, task_executor.clone(), - // Defaults to sensible limits for the `Archive`. - sc_rpc_spec_v2::archive::ArchiveConfig::default(), ) .into_rpc(); rpc_api.merge(archive_v2).map_err(|e| Error::Application(e.into()))?; From 1d519a1054d2edb8fc0b868eba6318fb3d448b33 Mon Sep 17 00:00:00 2001 From: Pavlo Khrystenko <45178695+pkhry@users.noreply.github.com> Date: Fri, 29 Nov 2024 16:24:58 +0100 Subject: [PATCH 07/29] Update scale-info to 2.11.6 (#6681) # Description Updates scale-info to from 2.11.5 2.11.6, so that generated code is annotated with `allow(deprecated)` Pre-requisite for https://github.com/paritytech/polkadot-sdk/pull/6312 --- Cargo.lock | 8 +- Cargo.toml | 2 +- prdoc/pr_6681.prdoc | 406 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 411 insertions(+), 5 deletions(-) create mode 100644 prdoc/pr_6681.prdoc diff --git a/Cargo.lock b/Cargo.lock index 5e4e9c267b08..1fe2d766f16a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23715,9 +23715,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.5" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aa7ffc1c0ef49b0452c6e2986abf2b07743320641ffd5fc63d552458e3b779b" +checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" dependencies = [ "bitvec", "cfg-if", @@ -23729,9 +23729,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.5" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46385cc24172cf615450267463f937c10072516359b3ff1cb24228a4a08bf951" +checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", diff --git a/Cargo.toml b/Cargo.toml index 964964908a9b..ecc385504181 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1197,7 +1197,7 @@ sc-tracing-proc-macro = { path = "substrate/client/tracing/proc-macro", default- sc-transaction-pool = { path = "substrate/client/transaction-pool", default-features = false } sc-transaction-pool-api = { path = "substrate/client/transaction-pool/api", default-features = false } sc-utils = { path = "substrate/client/utils", default-features = false } -scale-info = { version = "2.11.1", default-features = false } +scale-info = { version = "2.11.6", default-features = false } schemars = { version = "0.8.13", default-features = false } schnellru = { version = "0.2.3" } schnorrkel = { version = "0.11.4", default-features = false } diff --git a/prdoc/pr_6681.prdoc b/prdoc/pr_6681.prdoc new file mode 100644 index 000000000000..93a967d4a66c --- /dev/null +++ b/prdoc/pr_6681.prdoc @@ -0,0 +1,406 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: update scale-info to 2.11.6 + +doc: + - audience: Runtime Dev + description: | + Updates scale-info to 2.11.1 from 2.11.5. + Updated version of scale-info annotates generated code with `allow(deprecated)` + +crates: + - name: bridge-runtime-common + bump: none + - name: bp-header-chain + bump: none + - name: bp-runtime + bump: none + - name: frame-support + bump: none + - name: sp-core + bump: none + - name: sp-trie + bump: none + - name: sp-runtime + bump: none + - name: sp-application-crypto + bump: none + - name: sp-arithmetic + bump: none + - name: sp-weights + bump: none + - name: sp-api + bump: none + - name: sp-metadata-ir + bump: none + - name: sp-version + bump: none + - name: sp-inherents + bump: none + - name: frame-executive + bump: none + - name: frame-system + bump: none + - name: pallet-balances + bump: none + - name: frame-benchmarking + bump: none + - name: pallet-migrations + bump: none + - name: cumulus-pallet-parachain-system + bump: none + - name: cumulus-primitives-core + bump: none + - name: polkadot-core-primitives + bump: none + - name: polkadot-parachain-primitives + bump: none + - name: polkadot-primitives + bump: none + - name: sp-authority-discovery + bump: none + - name: sp-consensus-slots + bump: none + - name: sp-staking + bump: none + - name: staging-xcm + bump: none + - name: cumulus-primitives-parachain-inherent + bump: none + - name: pallet-message-queue + bump: none + - name: polkadot-runtime-common + bump: none + - name: frame-election-provider-support + bump: none + - name: sp-npos-elections + bump: none + - name: sp-consensus-grandpa + bump: none + - name: polkadot-primitives + bump: none + - name: sp-authority-discovery + bump: none + - name: sp-consensus-grandpa + bump: none + - name: sp-genesis-builder + bump: none + - name: sp-consensus-babe + bump: none + - name: sp-mixnet + bump: none + - name: sc-rpc-api + bump: none + - name: sp-session + bump: none + - name: sp-statement-store + bump: none + - name: sp-transaction-storage-proof + bump: none + - name: pallet-asset-rate + bump: none + - name: pallet-authorship + bump: none + - name: pallet-babe + bump: none + - name: pallet-session + bump: none + - name: pallet-timestamp + bump: none + - name: pallet-offences + bump: none + - name: pallet-staking + bump: none + - name: pallet-bags-list + bump: none + - name: pallet-broker + bump: none + - name: pallet-election-provider-multi-phase + bump: none + - name: pallet-fast-unstake + bump: none + - name: pallet-identity + bump: none + - name: pallet-transaction-payment + bump: none + - name: pallet-treasury + bump: none + - name: pallet-utility + bump: none + - name: pallet-collective + bump: none + - name: pallet-root-testing + bump: none + - name: pallet-vesting + bump: none + - name: polkadot-runtime-parachains + bump: none + - name: pallet-authority-discovery + bump: none + - name: pallet-mmr + bump: none + - name: sp-mmr-primitives + bump: none + - name: staging-xcm-executor + bump: none + - name: staging-xcm-builder + bump: none + - name: pallet-asset-conversion + bump: none + - name: pallet-assets + bump: none + - name: pallet-salary + bump: none + - name: pallet-ranked-collective + bump: none + - name: pallet-xcm + bump: none + - name: xcm-runtime-apis + bump: none + - name: pallet-grandpa + bump: none + - name: pallet-indices + bump: none + - name: pallet-sudo + bump: none + - name: sp-consensus-beefy + bump: none + - name: cumulus-primitives-storage-weight-reclaim + bump: none + - name: cumulus-pallet-aura-ext + bump: none + - name: pallet-aura + bump: none + - name: sp-consensus-aura + bump: none + - name: pallet-collator-selection + bump: none + - name: pallet-glutton + bump: none + - name: staging-parachain-info + bump: none + - name: westend-runtime + bump: none + - name: frame-metadata-hash-extension + bump: none + - name: frame-system-benchmarking + bump: none + - name: pallet-beefy + bump: none + - name: pallet-beefy-mmr + bump: none + - name: pallet-conviction-voting + bump: none + - name: pallet-scheduler + bump: none + - name: pallet-preimage + bump: none + - name: pallet-delegated-staking + bump: none + - name: pallet-nomination-pools + bump: none + - name: pallet-democracy + bump: none + - name: pallet-elections-phragmen + bump: none + - name: pallet-membership + bump: none + - name: pallet-multisig + bump: none + - name: polkadot-sdk-frame + bump: none + - name: pallet-dev-mode + bump: none + - name: pallet-verify-signature + bump: none + - name: pallet-nomination-pools-benchmarking + bump: none + - name: pallet-offences-benchmarking + bump: none + - name: pallet-im-online + bump: none + - name: pallet-parameters + bump: none + - name: pallet-proxy + bump: none + - name: pallet-recovery + bump: none + - name: pallet-referenda + bump: none + - name: pallet-society + bump: none + - name: pallet-state-trie-migration + bump: none + - name: pallet-whitelist + bump: none + - name: pallet-xcm-benchmarks + bump: none + - name: rococo-runtime + bump: none + - name: pallet-bounties + bump: none + - name: pallet-child-bounties + bump: none + - name: pallet-nis + bump: none + - name: pallet-tips + bump: none + - name: parachains-common + bump: none + - name: pallet-asset-tx-payment + bump: none + - name: cumulus-pallet-xcmp-queue + bump: none + - name: bp-xcm-bridge-hub-router + bump: none + - name: pallet-xcm-bridge-hub-router + bump: none + - name: assets-common + bump: none + - name: bp-messages + bump: none + - name: bp-parachains + bump: none + - name: bp-polkadot-core + bump: none + - name: bp-relayers + bump: none + - name: bp-xcm-bridge-hub + bump: none + - name: bridge-hub-common + bump: none + - name: snowbridge-core + bump: none + - name: snowbridge-beacon-primitives + bump: none + - name: snowbridge-ethereum + bump: none + - name: pallet-bridge-grandpa + bump: none + - name: pallet-bridge-messages + bump: none + - name: pallet-bridge-parachains + bump: none + - name: pallet-bridge-relayers + bump: none + - name: pallet-xcm-bridge-hub + bump: none + - name: cumulus-pallet-dmp-queue + bump: none + - name: cumulus-pallet-solo-to-para + bump: none + - name: cumulus-pallet-xcm + bump: none + - name: cumulus-ping + bump: none + - name: frame-benchmarking-pallet-pov + bump: none + - name: pallet-alliance + bump: none + - name: pallet-asset-conversion-ops + bump: none + - name: pallet-asset-conversion-tx-payment + bump: none + - name: pallet-assets-freezer + bump: none + - name: pallet-atomic-swap + bump: none + - name: pallet-collective-content + bump: none + - name: pallet-contracts + bump: none + - name: pallet-contracts-uapi + bump: none + - name: pallet-insecure-randomness-collective-flip + bump: none + - name: pallet-contracts-mock-network + bump: none + - name: xcm-simulator + bump: none + - name: pallet-core-fellowship + bump: none + - name: pallet-lottery + bump: none + - name: pallet-mixnet + bump: none + - name: pallet-nft-fractionalization + bump: none + - name: pallet-nfts + bump: none + - name: pallet-node-authorization + bump: none + - name: pallet-paged-list + bump: none + - name: pallet-remark + bump: none + - name: pallet-revive + bump: none + - name: pallet-revive-uapi + bump: none + - name: pallet-revive-eth-rpc + bump: none + - name: pallet-skip-feeless-payment + bump: none + - name: pallet-revive-mock-network + bump: none + - name: pallet-root-offences + bump: none + - name: pallet-safe-mode + bump: none + - name: pallet-scored-pool + bump: none + - name: pallet-statement + bump: none + - name: pallet-transaction-storage + bump: none + - name: pallet-tx-pause + bump: none + - name: pallet-uniques + bump: none + - name: snowbridge-outbound-queue-merkle-tree + bump: none + - name: snowbridge-pallet-ethereum-client + bump: none + - name: snowbridge-pallet-inbound-queue + bump: none + - name: snowbridge-router-primitives + bump: none + - name: snowbridge-pallet-outbound-queue + bump: none + - name: snowbridge-pallet-system + bump: none + - name: bp-asset-hub-rococo + bump: none + - name: bp-asset-hub-westend + bump: none + - name: bp-polkadot-bulletin + bump: none + - name: asset-hub-rococo-runtime + bump: none + - name: asset-hub-westend-runtime + bump: none + - name: bridge-hub-rococo-runtime + bump: none + - name: bridge-hub-westend-runtime + bump: none + - name: collectives-westend-runtime + bump: none + - name: coretime-rococo-runtime + bump: none + - name: coretime-westend-runtime + bump: none + - name: people-rococo-runtime + bump: none + - name: people-westend-runtime + bump: none + - name: penpal-runtime + bump: none + - name: contracts-rococo-runtime + bump: none + - name: glutton-westend-runtime + bump: none + - name: rococo-parachain-runtime + bump: none + - name: xcm-simulator-example + bump: none \ No newline at end of file From 5ad8780b653350050c6a854205de20c439aa7b65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexandre=20R=2E=20Bald=C3=A9?= Date: Fri, 29 Nov 2024 19:35:06 +0000 Subject: [PATCH 08/29] People chain integration tests (#6377) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Made as a follow-up of https://github.com/polkadot-fellows/runtimes/pull/499 ## Integration N/A ## Review Notes N/A --------- Co-authored-by: Dónal Murray --- Cargo.lock | 1 + .../tests/people/people-westend/Cargo.toml | 1 + .../people-westend/src/tests/governance.rs | 503 ++++++++++++++++++ .../people/people-westend/src/tests/mod.rs | 1 + 4 files changed, 506 insertions(+) create mode 100644 cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs diff --git a/Cargo.lock b/Cargo.lock index 1fe2d766f16a..a945d148e051 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16662,6 +16662,7 @@ dependencies = [ "sp-runtime 31.0.1", "staging-xcm 7.0.0", "staging-xcm-executor 7.0.0", + "westend-runtime", "westend-runtime-constants 7.0.0", "westend-system-emulated-network", ] diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml index aa6eebc5458f..53acd038cdf5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml @@ -21,6 +21,7 @@ sp-runtime = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true, default-features = true } westend-runtime-constants = { workspace = true, default-features = true } +westend-runtime = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs new file mode 100644 index 000000000000..3dadcdd94870 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs @@ -0,0 +1,503 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::imports::*; +use frame_support::traits::ProcessMessageError; + +use codec::Encode; +use frame_support::sp_runtime::traits::Dispatchable; +use parachains_common::AccountId; +use people_westend_runtime::people::IdentityInfo; +use westend_runtime::governance::pallet_custom_origins::Origin::GeneralAdmin as GeneralAdminOrigin; +use westend_system_emulated_network::people_westend_emulated_chain::people_westend_runtime; + +use pallet_identity::Data; + +use emulated_integration_tests_common::accounts::{ALICE, BOB}; + +#[test] +fn relay_commands_add_registrar() { + let (origin_kind, origin) = (OriginKind::Superuser, ::RuntimeOrigin::root()); + + let registrar: AccountId = [1; 32].into(); + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_registrar_call = + PeopleCall::Identity(pallet_identity::Call::::add_registrar { + account: registrar.into(), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_registrar_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::RegistrarAdded { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_add_registrar_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + let mut signed_origin = true; + + for (origin_kind, origin) in origins { + let registrar: AccountId = [1; 32].into(); + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_registrar_call = + PeopleCall::Identity(pallet_identity::Call::::add_registrar { + account: registrar.into(), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_registrar_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + if signed_origin { + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::ProcessingFailed { error: ProcessMessageError::Unsupported, .. }) => {}, + ] + ); + } else { + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + } + }); + + signed_origin = false; + } +} + +#[test] +fn relay_commands_kill_identity() { + // To kill an identity, first one must be set + PeopleWestend::execute_with(|| { + type PeopleRuntime = ::Runtime; + type PeopleRuntimeEvent = ::RuntimeEvent; + + let people_westend_alice = + ::RuntimeOrigin::signed(PeopleWestend::account_id_of(ALICE)); + + let identity_info = IdentityInfo { + email: Data::Raw(b"test@test.io".to_vec().try_into().unwrap()), + ..Default::default() + }; + let identity: Box<::IdentityInformation> = + Box::new(identity_info); + + assert_ok!(::Identity::set_identity( + people_westend_alice, + identity + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::IdentitySet { .. }) => {}, + ] + ); + }); + + let (origin_kind, origin) = (OriginKind::Superuser, ::RuntimeOrigin::root()); + + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type PeopleCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleRuntime = ::Runtime; + + let kill_identity_call = + PeopleCall::Identity(pallet_identity::Call::::kill_identity { + target: people_westend_runtime::MultiAddress::Id(PeopleWestend::account_id_of( + ALICE, + )), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: kill_identity_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::IdentityKilled { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_kill_identity_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(BOB); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + for (origin_kind, origin) in origins { + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type PeopleCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleRuntime = ::Runtime; + + let kill_identity_call = + PeopleCall::Identity(pallet_identity::Call::::kill_identity { + target: people_westend_runtime::MultiAddress::Id(PeopleWestend::account_id_of( + ALICE, + )), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: kill_identity_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + } +} + +#[test] +fn relay_commands_add_remove_username_authority() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + let people_westend_bob = PeopleWestend::account_id_of(BOB); + + let (origin_kind, origin, usr) = + (OriginKind::Superuser, ::RuntimeOrigin::root(), "rootusername"); + + // First, add a username authority. + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_username_authority = + PeopleCall::Identity(pallet_identity::Call::::add_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + allocation: 10, + }); + + let add_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_username_authority.encode().into() } + ]))), + }); + + assert_ok!(add_authority_xcm_msg.dispatch(origin.clone())); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Check events system-parachain-side + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::AuthorityAdded { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); + + // Now, use the previously added username authority to concede a username to an account. + PeopleWestend::execute_with(|| { + type PeopleRuntimeEvent = ::RuntimeEvent; + let full_username = [usr.to_owned(), ".suffix1".to_owned()].concat().into_bytes(); + + assert_ok!(::Identity::set_username_for( + ::RuntimeOrigin::signed(people_westend_alice.clone()), + people_westend_runtime::MultiAddress::Id(people_westend_bob.clone()), + full_username, + None, + true + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::UsernameQueued { .. }) => {}, + ] + ); + }); + + // Accept the given username + PeopleWestend::execute_with(|| { + type PeopleRuntimeEvent = ::RuntimeEvent; + let full_username = [usr.to_owned(), ".suffix1".to_owned()].concat().into_bytes(); + + assert_ok!(::Identity::accept_username( + ::RuntimeOrigin::signed(people_westend_bob.clone()), + full_username.try_into().unwrap(), + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::UsernameSet { .. }) => {}, + ] + ); + }); + + // Now, remove the username authority with another priviledged XCM call. + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let remove_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::remove_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + }); + + let remove_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: remove_username_authority.encode().into() } + ]))), + }); + + assert_ok!(remove_authority_xcm_msg.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Final event check. + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::AuthorityRemoved { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_add_remove_username_authority_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice.clone()), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + for (origin_kind, origin) in origins { + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::add_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + allocation: 10, + }); + + let add_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_username_authority.encode().into() } + ]))), + }); + + assert_ok!(add_authority_xcm_msg.dispatch(origin.clone())); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Check events system-parachain-side + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let remove_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::remove_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + }); + + let remove_authority_xcm_msg = + RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::SovereignAccount, + call: remove_username_authority.encode().into(), + } + ]))), + }); + + assert_ok!(remove_authority_xcm_msg.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + } +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs index 08749b295dc2..b9ad9e3db467 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs @@ -14,4 +14,5 @@ // limitations under the License. mod claim_assets; +mod governance; mod teleport; From 8eac4e887c827ea0bac8915901c305a05457a8d9 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 29 Nov 2024 23:28:34 +0200 Subject: [PATCH 09/29] network/libp2p-backend: Suppress warning adding already reserved node as reserved (#6703) Fixes https://github.com/paritytech/polkadot-sdk/issues/6598. --------- Co-authored-by: GitHub Action --- prdoc/pr_6703.prdoc | 7 +++++++ substrate/client/network/src/protocol_controller.rs | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 prdoc/pr_6703.prdoc diff --git a/prdoc/pr_6703.prdoc b/prdoc/pr_6703.prdoc new file mode 100644 index 000000000000..2dd0962a3eea --- /dev/null +++ b/prdoc/pr_6703.prdoc @@ -0,0 +1,7 @@ +title: 'network/libp2p-backend: Suppress warning adding already reserved node as reserved' +doc: +- audience: Node Dev + description: Fixes https://github.com/paritytech/polkadot-sdk/issues/6598. +crates: +- name: sc-network + bump: patch diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs index af7adb50907f..11f5321294d0 100644 --- a/substrate/client/network/src/protocol_controller.rs +++ b/substrate/client/network/src/protocol_controller.rs @@ -464,7 +464,7 @@ impl ProtocolController { /// maintain connections with such peers. fn on_add_reserved_peer(&mut self, peer_id: PeerId) { if self.reserved_nodes.contains_key(&peer_id) { - warn!( + debug!( target: LOG_TARGET, "Trying to add an already reserved node {peer_id} as reserved on {:?}.", self.set_id, From 5e0bcb0ee9788b7bb16ccfbda4fdc153b24c6386 Mon Sep 17 00:00:00 2001 From: eskimor Date: Sat, 30 Nov 2024 00:31:27 +0100 Subject: [PATCH 10/29] Let's be a bit less strict here. (#6662) This might actually happen in non malicious cases. Co-authored-by: eskimor --- polkadot/node/network/collator-protocol/src/error.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/network/collator-protocol/src/error.rs b/polkadot/node/network/collator-protocol/src/error.rs index ae7f9a8c1fbc..598cdcf43900 100644 --- a/polkadot/node/network/collator-protocol/src/error.rs +++ b/polkadot/node/network/collator-protocol/src/error.rs @@ -122,7 +122,7 @@ impl SecondingError { PersistedValidationDataMismatch | CandidateHashMismatch | RelayParentMismatch | - Duplicate | ParentHeadDataMismatch | + ParentHeadDataMismatch | InvalidCoreIndex(_, _) | InvalidSessionIndex(_, _) | InvalidReceiptVersion(_) From d1fafa85fa1254af143b8e9b0ebf5d2731f8d91a Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Sun, 1 Dec 2024 17:30:09 +0100 Subject: [PATCH 11/29] [pallet-revive] eth-prc fix geth diff (#6608) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add a bunch of differential tests to ensure that responses from eth-rpc matches the one from `geth` - These [tests](https://github.com/paritytech/polkadot-sdk/blob/pg/fix-geth-diff/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts) are not run in CI for now but can be run locally with ```bash cd revive/rpc/examples/js bun test ``` * EVM RPC server will not fail gas_estimation if no gas is specified, I updated pallet-revive to add an extra `skip_transfer` boolean check to replicate this behavior in our pallet * `eth_transact` and `bare_eth_transact` api have been updated to use `GenericTransaction` directly as this is what is used by `eth_estimateGas` and `eth_call` ## TODO - [ ] Add tests the new `skip_transfer` flag --------- Co-authored-by: GitHub Action Co-authored-by: Alexander Theißen --- Cargo.lock | 1 + .../assets/asset-hub-westend/src/lib.rs | 30 +- prdoc/pr_6608.prdoc | 14 + substrate/bin/node/runtime/src/lib.rs | 25 +- substrate/frame/revive/Cargo.toml | 1 + .../frame/revive/mock-network/src/tests.rs | 4 +- substrate/frame/revive/rpc/Cargo.toml | 2 +- .../revive/rpc/examples/js/abi/errorTester.ts | 106 ++++++ .../revive/rpc/examples/js/abi/event.json | 34 -- .../frame/revive/rpc/examples/js/abi/event.ts | 34 ++ .../revive/rpc/examples/js/abi/piggyBank.json | 65 ---- .../piggyBank.ts} | 19 +- .../revive/rpc/examples/js/abi/revert.json | 14 - .../frame/revive/rpc/examples/js/bun.lockb | Bin 45391 -> 33662 bytes .../rpc/examples/js/contracts/.solhint.json | 3 + .../rpc/examples/js/contracts/ErrorTester.sol | 51 +++ .../rpc/examples/js/contracts/PiggyBank.sol | 8 +- .../frame/revive/rpc/examples/js/package.json | 41 ++- .../rpc/examples/js/pvm/errorTester.polkavm | Bin 0 -> 12890 bytes .../revive/rpc/examples/js/src/balance.ts | 8 + .../rpc/examples/js/src/build-contracts.ts | 27 +- .../frame/revive/rpc/examples/js/src/event.ts | 40 +- .../rpc/examples/js/src/geth-diff-setup.ts | 162 ++++++++ .../rpc/examples/js/src/geth-diff.test.ts | 245 +++++++++++++ .../frame/revive/rpc/examples/js/src/lib.ts | 126 ++++--- .../revive/rpc/examples/js/src/piggy-bank.ts | 81 +++- .../revive/rpc/examples/js/src/revert.ts | 10 - .../revive/rpc/examples/js/src/transfer.ts | 15 +- .../js/types/ethers-contracts/Event.ts | 117 ------ .../js/types/ethers-contracts/PiggyBank.ts | 96 ----- .../js/types/ethers-contracts/Revert.ts | 78 ---- .../js/types/ethers-contracts/common.ts | 100 ----- .../factories/Event__factory.ts | 51 --- .../factories/Revert__factory.ts | 31 -- .../types/ethers-contracts/factories/index.ts | 6 - .../js/types/ethers-contracts/index.ts | 10 - .../frame/revive/rpc/revive_chain.metadata | Bin 658056 -> 659977 bytes substrate/frame/revive/rpc/src/client.rs | 125 ++++--- substrate/frame/revive/rpc/src/lib.rs | 44 +-- .../frame/revive/rpc/src/rpc_methods_gen.rs | 1 + .../frame/revive/rpc/src/subxt_client.rs | 12 +- substrate/frame/revive/rpc/src/tests.rs | 3 +- .../frame/revive/src/benchmarking/mod.rs | 2 +- .../frame/revive/src/evm/api/rlp_codec.rs | 18 +- .../frame/revive/src/evm/api/rpc_types.rs | 148 ++++---- .../frame/revive/src/evm/api/rpc_types_gen.rs | 24 +- substrate/frame/revive/src/evm/runtime.rs | 345 ++++++++++-------- substrate/frame/revive/src/exec.rs | 73 +++- substrate/frame/revive/src/lib.rs | 215 +++++++---- substrate/frame/revive/src/primitives.rs | 45 ++- substrate/frame/revive/src/storage/meter.rs | 52 ++- .../frame/revive/src/test_utils/builder.rs | 11 +- substrate/frame/revive/src/tests.rs | 12 +- .../frame/revive/src/tests/test_debug.rs | 5 +- substrate/frame/revive/src/wasm/mod.rs | 11 +- 55 files changed, 1553 insertions(+), 1248 deletions(-) create mode 100644 prdoc/pr_6608.prdoc create mode 100644 substrate/frame/revive/rpc/examples/js/abi/errorTester.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/event.json create mode 100644 substrate/frame/revive/rpc/examples/js/abi/event.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/piggyBank.json rename substrate/frame/revive/rpc/examples/js/{types/ethers-contracts/factories/PiggyBank__factory.ts => abi/piggyBank.ts} (62%) delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/revert.json create mode 100644 substrate/frame/revive/rpc/examples/js/contracts/.solhint.json create mode 100644 substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol create mode 100644 substrate/frame/revive/rpc/examples/js/pvm/errorTester.polkavm create mode 100644 substrate/frame/revive/rpc/examples/js/src/balance.ts create mode 100644 substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts create mode 100644 substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/src/revert.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Event.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/PiggyBank.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Revert.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/common.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Event__factory.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Revert__factory.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/index.ts delete mode 100644 substrate/frame/revive/rpc/examples/js/types/ethers-contracts/index.ts diff --git a/Cargo.lock b/Cargo.lock index a945d148e051..bc2ebb2a057d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14633,6 +14633,7 @@ dependencies = [ "assert_matches", "bitflags 1.3.2", "derive_more 0.99.17", + "env_logger 0.11.3", "environmental", "ethereum-types 0.15.1", "frame-benchmarking 28.0.0", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index f20b6b1fece0..98d647d868db 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -124,7 +124,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("westmint"), impl_name: alloc::borrow::Cow::Borrowed("westmint"), authoring_version: 1, - spec_version: 1_016_006, + spec_version: 1_016_008, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, @@ -2081,18 +2081,10 @@ impl_runtime_apis! { let account = ::AddressMapper::to_account_id(&address); System::account_nonce(account) } - fn eth_transact( - from: H160, - dest: Option, - value: U256, - input: Vec, - gas_limit: Option, - storage_deposit_limit: Option, - ) -> pallet_revive::EthContractResult + + fn eth_transact(tx: pallet_revive::evm::GenericTransaction) -> Result, pallet_revive::EthTransactError> { - use pallet_revive::AddressMapper; - let blockweights = ::BlockWeights::get(); - let origin = ::AddressMapper::to_account_id(&from); + let blockweights: BlockWeights = ::BlockWeights::get(); let encoded_size = |pallet_call| { let call = RuntimeCall::Revive(pallet_call); @@ -2101,15 +2093,9 @@ impl_runtime_apis! { }; Revive::bare_eth_transact( - origin, - dest, - value, - input, - gas_limit.unwrap_or(blockweights.max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + tx, + blockweights.max_block, encoded_size, - pallet_revive::DebugInfo::UnsafeDebug, - pallet_revive::CollectEvents::UnsafeCollect, ) } @@ -2127,7 +2113,7 @@ impl_runtime_apis! { dest, value, gas_limit.unwrap_or(blockweights.max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), input_data, pallet_revive::DebugInfo::UnsafeDebug, pallet_revive::CollectEvents::UnsafeCollect, @@ -2149,7 +2135,7 @@ impl_runtime_apis! { RuntimeOrigin::signed(origin), value, gas_limit.unwrap_or(blockweights.max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), code, data, salt, diff --git a/prdoc/pr_6608.prdoc b/prdoc/pr_6608.prdoc new file mode 100644 index 000000000000..b9cd7008de47 --- /dev/null +++ b/prdoc/pr_6608.prdoc @@ -0,0 +1,14 @@ +title: '[pallet-revive] eth-prc fix geth diff' +doc: +- audience: Runtime Dev + description: |- + * Add a bunch of differential tests to ensure that responses from eth-rpc matches the one from `geth` + * EVM RPC server will not fail gas_estimation if no gas is specified, I updated pallet-revive to add an extra `skip_transfer` boolean check to replicate this behavior in our pallet + * `eth_transact` and `bare_eth_transact` api have been updated to use `GenericTransaction` directly as this is what is used by `eth_estimateGas` and `eth_call` +crates: +- name: pallet-revive-eth-rpc + bump: minor +- name: pallet-revive + bump: minor +- name: asset-hub-westend-runtime + bump: minor diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index bff263548087..faffcd23fbcf 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -3218,18 +3218,9 @@ impl_runtime_apis! { System::account_nonce(account) } - fn eth_transact( - from: H160, - dest: Option, - value: U256, - input: Vec, - gas_limit: Option, - storage_deposit_limit: Option, - ) -> pallet_revive::EthContractResult + fn eth_transact(tx: pallet_revive::evm::GenericTransaction) -> Result, pallet_revive::EthTransactError> { - use pallet_revive::AddressMapper; let blockweights: BlockWeights = ::BlockWeights::get(); - let origin = ::AddressMapper::to_account_id(&from); let encoded_size = |pallet_call| { let call = RuntimeCall::Revive(pallet_call); @@ -3238,15 +3229,9 @@ impl_runtime_apis! { }; Revive::bare_eth_transact( - origin, - dest, - value, - input, - gas_limit.unwrap_or(blockweights.max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + tx, + blockweights.max_block, encoded_size, - pallet_revive::DebugInfo::UnsafeDebug, - pallet_revive::CollectEvents::UnsafeCollect, ) } @@ -3263,7 +3248,7 @@ impl_runtime_apis! { dest, value, gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), input_data, pallet_revive::DebugInfo::UnsafeDebug, pallet_revive::CollectEvents::UnsafeCollect, @@ -3284,7 +3269,7 @@ impl_runtime_apis! { RuntimeOrigin::signed(origin), value, gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), code, data, salt, diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml index 677ef0e1367f..098a66df8dee 100644 --- a/substrate/frame/revive/Cargo.toml +++ b/substrate/frame/revive/Cargo.toml @@ -65,6 +65,7 @@ pallet-revive-fixtures = { workspace = true, default-features = true } secp256k1 = { workspace = true, features = ["recovery"] } serde_json = { workspace = true } hex-literal = { workspace = true } +env_logger = { workspace = true } # Polkadot SDK Dependencies pallet-balances = { workspace = true, default-features = true } diff --git a/substrate/frame/revive/mock-network/src/tests.rs b/substrate/frame/revive/mock-network/src/tests.rs index bd05726a1a45..34f797c2b530 100644 --- a/substrate/frame/revive/mock-network/src/tests.rs +++ b/substrate/frame/revive/mock-network/src/tests.rs @@ -24,7 +24,7 @@ use frame_support::traits::{fungibles::Mutate, Currency}; use frame_system::RawOrigin; use pallet_revive::{ test_utils::{self, builder::*}, - Code, + Code, DepositLimit, }; use pallet_revive_fixtures::compile_module; use pallet_revive_uapi::ReturnErrorCode; @@ -52,7 +52,7 @@ fn instantiate_test_contract(name: &str) -> Contract { RawOrigin::Signed(ALICE).into(), Code::Upload(wasm), ) - .storage_deposit_limit(1_000_000_000_000) + .storage_deposit_limit(DepositLimit::Balance(1_000_000_000_000)) .build_and_unwrap_contract() }); diff --git a/substrate/frame/revive/rpc/Cargo.toml b/substrate/frame/revive/rpc/Cargo.toml index 9f89b74c668f..fe9cc82dd4d9 100644 --- a/substrate/frame/revive/rpc/Cargo.toml +++ b/substrate/frame/revive/rpc/Cargo.toml @@ -67,13 +67,13 @@ hex = { workspace = true } hex-literal = { workspace = true, optional = true } scale-info = { workspace = true } secp256k1 = { workspace = true, optional = true, features = ["recovery"] } -env_logger = { workspace = true } ethabi = { version = "18.0.0" } [features] example = ["hex-literal", "rlp", "secp256k1", "subxt-signer"] [dev-dependencies] +env_logger = { workspace = true } static_init = { workspace = true } hex-literal = { workspace = true } pallet-revive-fixtures = { workspace = true } diff --git a/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts b/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts new file mode 100644 index 000000000000..93daf34e02b6 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts @@ -0,0 +1,106 @@ +export const abi = [ + { + inputs: [ + { + internalType: 'string', + name: 'message', + type: 'string', + }, + ], + name: 'CustomError', + type: 'error', + }, + { + inputs: [ + { + internalType: 'bool', + name: 'newState', + type: 'bool', + }, + ], + name: 'setState', + outputs: [], + stateMutability: 'nonpayable', + type: 'function', + }, + { + inputs: [], + name: 'state', + outputs: [ + { + internalType: 'bool', + name: '', + type: 'bool', + }, + ], + stateMutability: 'view', + type: 'function', + }, + { + inputs: [], + name: 'triggerAssertError', + outputs: [], + stateMutability: 'pure', + type: 'function', + }, + { + inputs: [], + name: 'triggerCustomError', + outputs: [], + stateMutability: 'pure', + type: 'function', + }, + { + inputs: [], + name: 'triggerDivisionByZero', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256', + }, + ], + stateMutability: 'pure', + type: 'function', + }, + { + inputs: [], + name: 'triggerOutOfBoundsError', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256', + }, + ], + stateMutability: 'pure', + type: 'function', + }, + { + inputs: [], + name: 'triggerRequireError', + outputs: [], + stateMutability: 'pure', + type: 'function', + }, + { + inputs: [], + name: 'triggerRevertError', + outputs: [], + stateMutability: 'pure', + type: 'function', + }, + { + inputs: [ + { + internalType: 'uint256', + name: 'value', + type: 'uint256', + }, + ], + name: 'valueMatch', + outputs: [], + stateMutability: 'payable', + type: 'function', + }, +] as const diff --git a/substrate/frame/revive/rpc/examples/js/abi/event.json b/substrate/frame/revive/rpc/examples/js/abi/event.json deleted file mode 100644 index d36089fbc84e..000000000000 --- a/substrate/frame/revive/rpc/examples/js/abi/event.json +++ /dev/null @@ -1,34 +0,0 @@ -[ - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "sender", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "value", - "type": "uint256" - }, - { - "indexed": false, - "internalType": "string", - "name": "message", - "type": "string" - } - ], - "name": "ExampleEvent", - "type": "event" - }, - { - "inputs": [], - "name": "triggerEvent", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - } -] diff --git a/substrate/frame/revive/rpc/examples/js/abi/event.ts b/substrate/frame/revive/rpc/examples/js/abi/event.ts new file mode 100644 index 000000000000..c389e2daf1da --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/event.ts @@ -0,0 +1,34 @@ +export const abi = [ + { + anonymous: false, + inputs: [ + { + indexed: true, + internalType: 'address', + name: 'sender', + type: 'address', + }, + { + indexed: false, + internalType: 'uint256', + name: 'value', + type: 'uint256', + }, + { + indexed: false, + internalType: 'string', + name: 'message', + type: 'string', + }, + ], + name: 'ExampleEvent', + type: 'event', + }, + { + inputs: [], + name: 'triggerEvent', + outputs: [], + stateMutability: 'nonpayable', + type: 'function', + }, +] as const diff --git a/substrate/frame/revive/rpc/examples/js/abi/piggyBank.json b/substrate/frame/revive/rpc/examples/js/abi/piggyBank.json deleted file mode 100644 index 2c2cfd5f7533..000000000000 --- a/substrate/frame/revive/rpc/examples/js/abi/piggyBank.json +++ /dev/null @@ -1,65 +0,0 @@ -[ - { - "inputs": [], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "name": "deposit", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "payable", - "type": "function" - }, - { - "inputs": [], - "name": "getDeposit", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "owner", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "withdrawAmount", - "type": "uint256" - } - ], - "name": "withdraw", - "outputs": [ - { - "internalType": "uint256", - "name": "remainingBal", - "type": "uint256" - } - ], - "stateMutability": "nonpayable", - "type": "function" - } -] diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/PiggyBank__factory.ts b/substrate/frame/revive/rpc/examples/js/abi/piggyBank.ts similarity index 62% rename from substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/PiggyBank__factory.ts rename to substrate/frame/revive/rpc/examples/js/abi/piggyBank.ts index 0efea80ed2dc..3d44cd998ad1 100644 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/PiggyBank__factory.ts +++ b/substrate/frame/revive/rpc/examples/js/abi/piggyBank.ts @@ -1,11 +1,4 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ - -import { Contract, Interface, type ContractRunner } from 'ethers' -import type { PiggyBank, PiggyBankInterface } from '../PiggyBank' - -const _abi = [ +export const abi = [ { inputs: [], stateMutability: 'nonpayable', @@ -70,13 +63,3 @@ const _abi = [ type: 'function', }, ] as const - -export class PiggyBank__factory { - static readonly abi = _abi - static createInterface(): PiggyBankInterface { - return new Interface(_abi) as PiggyBankInterface - } - static connect(address: string, runner?: ContractRunner | null): PiggyBank { - return new Contract(address, _abi, runner) as unknown as PiggyBank - } -} diff --git a/substrate/frame/revive/rpc/examples/js/abi/revert.json b/substrate/frame/revive/rpc/examples/js/abi/revert.json deleted file mode 100644 index be2945fcc0a5..000000000000 --- a/substrate/frame/revive/rpc/examples/js/abi/revert.json +++ /dev/null @@ -1,14 +0,0 @@ -[ - { - "inputs": [], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "name": "doRevert", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - } -] diff --git a/substrate/frame/revive/rpc/examples/js/bun.lockb b/substrate/frame/revive/rpc/examples/js/bun.lockb index 700dca51da2ad3f843e890258b59c16fd4df6457..0ff3d54157db21a636e30076634343a24c812dca 100755 GIT binary patch delta 9083 zcmeG?X;@UpvgZr~3@FH^4u~MAfFQ##AUh)niZIIJ!ib8>GK5h!83aV*0Ad7{1S~O% zpc2<;Hkah8i3@5pafwT!*EL=f;~HENm*9U39E zSNC*xaq0&_$6JCr$)KX9gr5`ge;?ae+b?_Z>T92NPgpE(-Sy$9H=3Pp4H{e(a=(({ zQgXf0SvzmMuD~#vs@!~{i`A-J!lein3{yTskEiJI7{uMNzJz6%Ziq+d%W3KAqS*y1 zMy<&&(O?~yA8RuV9yC`$A=O^+2i ztSt{fq9+QtBDO2(bcOo8ucp2h;h>P|0 zV-feqvM*v+#5RbX5&w#v5I%?4NsGj1`ie%x)R7i(AH;=-Y2r~o&crkgNS8l%(9P!` zr%sL^(LC3GH_>Wj?&=*KjV(s|e(lw|es;}r@y@5BfP?!TFPlEE{BZBowl{~b*)Z?+ z#gWlTM~i#?J;ZbHyc6;jvyM&vt?%Yi^KmI#bC!AuH%)7G5~O@~Ol2yJeRJ2_VaIp0 zx3s@EO`iRZ`ISJ6+)|%SHEk-lG>;hghp~r8M`YyDss0=9^l>#>leE_Vr}LJTq#}{oS6B{34hwAeKrN`>VG$@;W(?1<@-|imVWknU4uOJxCJ@qFF32%~GDIs)U=O0>Ch#1Qi7AAL z<$^d(Lh=bhhJ`R!eXQ2XZDMfb+ z<4UY$pjjD~{5d5m>;eV5%^;+Y+;|Tz=XeP36KX8NO>h)Pjq9Y=c2YlgQbFkRahzNw zQX@I)SSMwJJD9F+Vkh-pCv~@zlJ(%_$~vioNR80xg5IK|;ybBjozx|yqB%JyYo1E) zq&9X^cXgCg7;6I|E^=cPI_D$^cL_BWYvW%her4Di!v+gb8tHRC86e zNTqSqeWX%3N`d?58yvMADHTWc7BkFLjwaesEOnd#X5Enyi462a z#sOEF7vS2nK8UsC6SOjjLtS`j(a#tH+S$;C46YQ~ySYS|6rv8O9?iie!nk;7>*eaP zVNMQrOsO|0wxFKAe&X;+2uXN;MtA3!J8_y0QM zkAi1`1Hs?qN6D|ZFK9Y&>!i~c)!*3kxjVq;$&QrN z@rs?ygCA0#{cwZ3We+^jm`q`2PIC_j7o z*4XKn1CFjs+~zdSwsg&+I@fL!8w#&UZJY}Vd%SgfQ{auW|M+C8&0UZ5yPH(ILi2mt z;c8)-jvgI|X`|+o@VtC|MDjl)9qzaNPy?zv9+#_)Q|49{REC$| zYjCqqsQL%n^GHLf`-rG|Z|S!mw7r=b*Y@B>?{aukBG$$yFC1R*ebuV^A1#51#eX+WIbz=Ftq1UGV@cgNL$=#-( zjYo73Ef{{ctmbj))rHNEqsKS8+MWV$AF=jPWy`y*3#-o#-7vbrcZ>6k%-bLD_&Qqj zFyfQQJ%cjc&$jgRQSDUGiO}_VXo)C;!&lvx|1Ldif-L z82e>n>NlOEoZ?j-N_v|)2?edLBMdA|gCIRwu%EB_;SYg<+wl2~1cUY@flYwXVVN9_;ZElk?b=DE@R=$QAfbkPpac%4bO?9ss)?xklJ z?EUo@|2@AsIBuwnYxm2@J6@Z9`EPz3?=7kOE%)@zvJCs$zo|FG@2+XM;gy{GJo3ec zXKR;xGikqzGoV(AwoAU2^5}T(7m{l~mIZCKl+~ps)btLMwmvzSd2xGLqjR37x2oy# z(Q^;Bw$zxfxHkHz#rAjP=T82(ckRzlPc#P&?V=qXHanAWdcyBs(YYJN;rohW8+J6S zwMlJXZ|jla8h0UMLRMYT*MU8D%@8Esw)eZg_{hBLfz2yt%(<8PbWT~xjX533Z`VW} zf^c6kv_&gM;B7X}Q7^z@;!puk|?84HyNX2M65b#O3ct(`Jh2@P!CqnnlX?T7$ zvaacp-${#jHO%E8zl`MvqXi{bA(@B~%AnzzZx5gxDA*1cg|@hcOD)9ol1*tOq2GRI-EMeat<9 zja9NV2lk_stUt`eduVK zRv|t_s?U;{*q_Iji7T(7^4%l%@RskuPkAiKOj|Q{x zN;V$SFi(Kp;}gvIO)(KJdMmX_{L~>9dS?2L6!)AksBE~`>W{`8$r{t^%`2In8$Ucd zcl5ofTRva%sYmc*N$QXHUnIQSx+#0_r5EvU*{jC7Ikit;{aJnA=to^#e@xeQ^Y$ek z6CU)^f{>o%7;jrgt}=^3J6?=fySEHrl{#=HIWc z|I#5jf0bwdpB_w3*x#k$v0WS9_VuA#->Ih!opq$tD^hTzE@97@ZQl(IT%^iQTVmIE zrD*-ZfYTbSd}~p*OOo_Uw*HVt{>}=uzma^oammbyUX3gsYIu8$>)Nne;*!u?Ek(o3 zw^6dJ4 zMbE6eW$U{rJicqY?;RRiF>P$vW>e*tH{$QyD)SYdaXtR!-6d|?Cb`>b=TOVgi&@v&D ztDKxH5?#Pw6udK14EwPeLo&0mzM_5OKJL;#N z&~^4ORV0$&xdiX5<*33qmK6ePb&;RVutdI)3Zy$A}m+Mt0!zBK`UyP@D7}wMRCl0n@#rS%)BprBUN{1I1$hT~ z33*Ep0vR{}VF&_w26>1Kfha3`7MGGjsDq@M-slmO&P{2Bhmo_|9{j-Leoz-g|dmXAO1 z&%MZ$No2kf|3E&Hz`xrHSSj{L=HX)v{L8L@4U+fnGYn(}lLvPLW)Jzeqnj+KX_3TV97J3_N_I=tS|pL6&Dz4nckou}vV0UrK2T|gXP z3A*X)Dj}$>ayVlU*UmdDem2MboS$-zk0o^^|$9pl++EzQ(L!kR%`QwaRv7VeH z!z@J!vRt<2qnFQnY*pK_Q_wmRTtQ99!JVc2^bosoDOVnw^8LuV`hu1V6`<4U{e%|D ztG<97&KYQVyy^?c;U?mB`yl5}?8CRlhsDzQD1?Be`KE=>N6z>-1*e=Z@m@6@Z#mL| z503HS3ho;jP2LeB$g30!P8mUUrA%dE@oJMF#ntMo!neYX>GmUIXVHS z*Axci^Tm#@Pq{CyWm=WvO`veTr(l{1tezhd@IRRsxhYsyiS7Ti3uvvn7r@7j_&AFl z8-_bQZs6q!h`AZsHB0kV&^Z~p0`3<$vCJe9{IEdg$VdA$H6uHAZWu6EXIWac*F_+G zOB}!l(Z2tFfAdz^-e4iFP;3CVXFklP-W|5t-z4{@fn#S5^YBwXFxUE1{bt+VG0zMF zeCRG<)QQhpmhLPwaN^A2#9LxVKGyf%`}3yX7Y#gX5GXKTx=ZGR5e5N1AbELy)(GF_ms$;+o9588Q0&M@ zFw=YeZq~!u?x{h*vKySnw)v3fS5M2G=dK?Q1_3@Oy1I{a&$o9nCK@=3Zs1fc4&cM6 z7QLhItp2F@s6l`av~C@tPAwej-elm^c7vR1u_GUuJ=0><_pryE1cN}K4Xmw}Ir8zx z&+U5Iq`tGVQpoyCWZXJ!w1NBBIUlf0YP^3xV(pm@g8&!PL`O0yI-8mE&94Fj=T{pD zs}Vc$LC~Owa}q4A7M?K(_}W52jW~dhn*KIrW?cBIWpWLaJ%8d z@O&bcESS4!7#vt6b(o9CELzD`K|@ZkrIsBJ^&&Sb z!F;isZ3O-2fyO6&Rvt%W6sim0QJtd?oh5v{IpnQBL`aI>D2vc!%qmp*q^k@3WL@ZB zIJwHfqC}ORHmfK*&$~c2Bmk_J_v!yi4L0{m*;`7xG*o=(0!htcyueq~xn9J#Em@R5gwYry(!Qy zzZ3=XOL26#V^SmBze}ic)kQ_BLcfT#%)+#+tl2)QJdJl=eqnA}j=GH0ha=6SoL>>e zW?m_K4_Ugb|BlL)<5>81Spb}G{GfZfI?snc62i7M&QP_)&WMhKX)9fzc~u{{y($Wx zHTT&urD-&~BW(2?8#uimALh@E{S?4O|C9$s(jW_4`TvvD%UhyyJQ*qP) delta 15944 zcmc(G2VB$1vv?8`AyT9lr6W~BuhN?!qF8_kNC^-i5Nc>P0-gmGMO_hy-@E&N-dmWR-I?9lnc4C!nQz}*TFVpK5-ao9I@cc* zPpLoS7!^}Itw%0A{M`#~XU}Nfum`0IS*E{kYi^(rsc>r%>ZLVc$ul(Twb1lmzm7xrKIK+=5n&Lr&1_cnTcF1 zFIm76a*OhEIk!N4Y{5myu;4U7R%SAf&rRYcRh_jT+TdXu!K=7iE!44J z){F5x!0J#3+bF7NCUuzbm5Qjv6TsNwOipGNFNs3o3KH^p{3ObDXs!YD)qru&Sz^o= z;|MYK0jvgm7GS)lw8WSp#&3X$>1|?sPK1Red&PumFO372$Ch%X<4B2qN8v*MB zE)~;b0UJTx9k3x_9l+4Hqz|NG{0K0RN{)%^n*rm2762XwcqU*x2TQL z$YV23`u&Z@C6*(EbGQjUuZokunT%lEkF}Y&Ys7&?KQj}U7QXr$>7dJE6Nf zJF>Z(vF<^a*||kC6f|fzir5pbkBxbI?y=IE4h_wR*HhFsTORLxa^?ETb$2eC&M5eD zw!yb@brEN9%*^XU3@2!fUwjBYjo?4s3P?eM!x+0~CUQ2l^Rt&!HET2(i-FNZ^dl{wb$!;5Z2^BqJ z1Tmdp8;yjil|ec3&a`?8>d^9(e+fiiAW8$KAY0iHNPm!wyek}aEBdxm1KAVU6!d10 zGcyEg?ogv6Tc$H}CDcIA5(e5S@65aiHGin}i)Jd)$<}X}&hjBpv%`EChz>nOYSsZ2 zs|nhINjnFL)cgWef01Mu)eFvcY@igWG_w#WSSu;CQ^8sOIMlFFSO&A-Eec=}))iAl zhUv>QWq=qAL{M45nb`ofAgF;kvd+w2sCh$8G|VGKrVRKQOOOKhIWzO1hOMO&T2#pp zY-|u1Bje1x4{WTmG~o<=8Ij7O#xP6F$w0v>i^f_BHLNlXJ5ug8)JRp4jkG167^W^9 z)(+5wikA8lQ>8Q!D1-)J5uCylHPFNqJAERds6 zB32)%CZiWqP?xkD!v-!C4!9K~2}(AICGEys4kg@PnvnApYTjs@jGK}X9AsWVhnc6~ zB_bNRN|h-C4BTPBm1}?^a=h&jXZedzvjjfwB%Wv36un|`mHu_E5n0H2(o~dC8lV6r zR12s?33UKEse~-#J!uSOlm^IK8Px)su8cZlJmt^8RU-gY1YIcody%H0f;!|qnF6>Z zi5$37)|pnPg3=T`X`fY4Eg&~l)B$LYDzX>?L#U#(A)bs=>J-X2lrhAOp#qoB5TX>< zU#jabed;gyX~L=}+AV@o5K+3(U((ScY25zOw*JxwqQp{jrw=8Y&FL>)g3>6FDsVj# zl_L8~8~aPopyW@qGuI|d8U3Z5{iP3Ni58@T($qW|*>D+*Mj2{uj0;eTMBCKe>G~u| z2&G8`^D2~Lh?2Ddg~B6BtD(dpN}r)5;zk%!C@BQ(2$Z-)$lov9BgAz{ zj1^#s`Bq{+#<;z;7{h@>BpVoCT=*--UV)Q~NRk)}at6Qyxf0wTW6XCG^D)N#+ySr$ z;LsqFmw0@FfE#!NVC(~clO)DRBpiwae>7%7`k!FjA1+&j;uCRaNJ6rJF&=mlrb3d$ zm>xz@MVJBQNB}qoDO@ofV{BQfxc(yydz*APiG^h0{|JNs$tn0xFy0AE#TNd5!FbSr zDhTU=7b%8+x_yOsTm6guORSlHv3*5O|L6M`w1+gXp9kvy+4dDr`j7VUf3$x^m#rS`B|NnLSf*SvH`~BN~6iVNaK6FYy)%(mB z#e}Bp_uJn#7uI)=bTM~)|C`|(*S>23d^6psr=#?w-*FB;8S+b4^h)>2c)P{5+ntXe zoV0$1=95&nkCRNFK5tmdeTaDD?Z(Afbj*yE zyO!MgOSwkq2e@=lv7LbNRf9})y&aLq+@0$u+?&g*tjsef$EuYwKml6plV$xT7o)T>|Ygc3M6-BRlmcHtU7b zsP>VK>tbU3f9_y6zug_GZ5Cwg`jOon6%*Ea-RGlAvA}ZE)|P<0JMWg8ubgsXERW@1 zog|?h-dtp&Hxx$({odE*zin5P@xJ)SuS^fjY}W9c@V4sfwJnzA2THb0J9MFQ<94l# zh-rS?rl_}aJGpiyZ1=D0=mF|SRXkuhVhz_6E~yzyG3n7gU~Z(a)#JHTaEkA^uIjvFFVE#(%kf>a zVn@>|-N|2M&VB24d$eZ9vtM6C@NG|oO3inhw^rfE@}<8tNN9&IK4hX7d>UVJ<&sIo zVd?Zz-v#TlVq(V|I2tn^H`z2Z*Tx@Ns~i4cQv9hGT6Lj24g~%hvtKy6>qw5%^2QIJ z;^#S?9@Jxq+CV$S4XTb6*Kf8}{J|T!sg`wM<}5+p?;Dk~8!pMW)~R&ePTw<=yEnvd z<4;j5OibLz)(sBe<@|B0UB_mKG-HcP%u@;N;GwEN(H-}POm|NcKG0rw>!xMXjXv`| z6Piob?|W}Jm(%RB;a#`1>HLenug+*4vhq;(SuL|{-&tmA%G3>Ix+P&pH)rm0Y(k5S zHHCJ!n}m0G!*AJWER0~cT;Ar;r7>A+;ft^8HM!KgR@-W(sBy+mj%jkD%nR21joDnL zs@(D6Q039FIl2C-PgX5EAfX++UhPlxBYRTobps|acC6SoZPH0$?VveECi}MXUn}fR zX0Barv9E2k-=LR^BDG|al%{@GiWs(dN$HC9-N*cj{YL-pSo=0j9eJ2&3Vj_DkKH)C zf04UQS;@vExA8<|aEY=8@}C5T+ux5Aa(Vn%x<5YZ?6R>6Hymv7w(~DRHGGe{qdXf`6u$q zGv+rRdYV>z)K+=L;wzuN?b_ir{ek)!l^XZ@nQA%#tMs?Ij=$tO%UdhCc-e*<(L>RCQ%zw{%GGH_AtyL@Y#jrO^gYbXYZT|ciyCfO z{;*W%X!MwSoj21net(&)L|TouLYZ{ueG>dZHRZg9?!ediV@=Z2- z=OQNh2HK4qvr}pgZRFQXx$TR%bB?D)+pD{%HM~w(CnYxD4EtoG-xDsi{jg_`(vt4JE1ACs z?HM=S(qr&)htA23t0lC97X$r?Uc02^h}O(q_ZI7ZTCl75%=+UKn)>o?ZW@|he`R@3 zpS9eSPhA%GXm7QeOCB~obogrj;_jNogO0~2Sm;kln%Ud=IuONJXbM%#nW66;EgWv< z-sP2)=+89TvuATAH|TN6`48Q}=wPDpOX;5bzRD|}xz((&ug*RdLcJzuB|oyXIYrG` z`E!Vegm&x7gS(D>cn*!ln4b@%uvMCum3BpPM}-XPVRf=$Pd+ ztqpmbt{<@NEqOWT+B5a_`|dHkEz4q)mMbeG`4O7J0418|*)I!v-ej#A%(=kOvEZrR zc~#L@;2PFy-em8!#x#x*@X#^2a-@9ri_@}?mMW|||83li$M5tXo(^PI8=FgLr`N9= zoG5v(n(oW4Y}?l0xG&J~sn%v^{qppMlfIN%e_5Nebf#;;iJ#9reAHv76|uob7`SoF zh1t~$Ts>u6kt0eA#%@39X&H8UsLqt|MWv zzkOxZmXr5-^nd;|N!UfDR~Ys@(^I=Wy0RuVTzXoSOkVvl7Sds93aJsV+k6YA-|XmI zc$H`Ey*BXa^htN$t~_1i@T#)#uA!0nh9klgV0JX70i7GC zHTu^sO&jD@TD4uioX@-IcH`@a=%UYW%7>s!pq=T#X%AErXzq>{zKAm!{9A~y>N#UYD|J~Pi|%iFJ0zI{0_nm*KvW^7=< zoHNJOOF}UD_6zGxM_*3!ioFsd_D8#y4}O2ewLn*YdWTD0%2ticOSZ!H%gePc$__U> z8+m)j>~)&U?C+lrp2a9S&mN)Pm_M)5VDN@x5`xLcZ;{}(vTsM+>&mUw&uGXT>L@;T zeEY~#J!Y$R%9cfZ@PGBtDO$Luc3t;`E78Sj>n^;B3>5YnNkn$rpxL zI6Z#JU73~s>$KKo$rl!X^0vQXIymYa6#Jr~Zs~ z%*H!6&n?u|xV(bVs=A-s_+B+ZK}PoOZ-2CXjUUIUqWMknY^}e(OhPXC<^#*72ke^K z=Q=0*ny)b1aq7>v&^qOFU#Htg#ti8xxZlg}U2olK;iobNUl+T^-8EmL!&)-klV84h$)!Wv=ROgt{>lhg z+oB@q);U=ozV2Z|($g*gHv1IA6(_XcSAwV@dU$%#?8Hn^V8M`Z&$FnQsjQM zq_$K-uDyg@A^l~f;J}^>Ng3zv?6?%BZ(J0VczJv3Y3kclV~=xlda@5$S4Ye<+41Uy z+S$$zx4h~EU3xi`&cO9X&3mT@v?WcFyiiH=FKFHtsoCJAK8DQj_7I=%v03AJkNI_8vB3o^tx=7rx9idqLJO@e&$4 zqDo^n)d{_U+!>8FVN+dDHRP`7Gvp&tjVYVzhNxz2sym8=+ygPq*+|n#9%Y!1p?aYM z=4`4rQnFxEeGm_FUvvy|KcqE+P4!1PkO!dl;q1s!{p!P1YCQgBd5;l;$LEC?e=NQ0 zsXNyA*0g%sq9=EZwceV>g}KcO2`!!2^6)CR?a}t-ucy|$3hIdn&>QruLRV|bt(T1w z`Uj$OFh&qEuw+w5qp6SwqgKerAafR*8iGn8AB*llJ`UMhv8kbGKIG%kW5_2US8FzP zB3cUhB-9Rh81k`UQ^QduCZin4Q_wlcQ;~r)o618| zAx}fCkf$Sa7dDlTN+HiccOcJ1wytby7Mc%vHhK(s4sspIrskrhkPA>d*pH*TTmDMJ;0t)wgB*Q@XVUxKe>IGP46@i)T^r0*@GWdRPwMEtf9A8|N}MF?0O zJ@%fod%T}F6~%iN;pXyW6ptzh8$7_qlK5W!40#YkkfFm_bj4eaCZ~cPcqgbALI?;J zi-#cNSPrNHD}t2oPN-w)C&O@syT>77*$&j*E&-zyiAF!l;P)e?!SMwk6{kW}7VeJU z>h=90N|=}l0LI`@7b4+0eo2K@!M`G5UXHkq>EwOCKup7Lr}f3~TRHI0Q7iD@>N2}yM-N{jGR!@pug0Yn2#0l>dm5SOvh*eL7(Y&bRwI|Vxs z-@;)BVk5E9*pb+Y*qOKub}$A!GkAuu^N1fQ@f6|da|0L&fGu?ez+T6*gJ&QBz#jlF zSswsz051SM-Qqp@hcA>dAA3m(0DF@PfW5;77z;2O052J`&mV{tsUZvj${2uPfFyuK zfCK;zKs-PkKrBEEz!ZRJfGB`SfXM)CfCzwafG~hb0C;gu02mJt3NQ|!0057k50D2S z0LaCL4;5V{NQla5FNFlZ+fA zMB74BNAem$#+Cuc7DU5F5rPTWax$C_ICfTcSQ;7sN(S5!970h9edM8!I3!QV02bC1 zY`_VeSwe*ELDZfi)zbr;2l9TvvA2S56f$m=43q+6#7@{-DphoB@D_cVx&i86*iDY$_bM5WFlTqL;~dNkSaqZZeLU44fo7 zxnjM^0Aw|E7 zivf-!xL(0k5iXu3WaQ~UCo)u=j7_CcVHD!P_li{rd#};o_G8~-@fCUcdhL_f#`_M&46I@!!5Os)&9YF-eLMME}!y+c5 z&Z~7k25&ezD;PL1j1^py3lxw|tR_vUfP!P~XcY>mI5vcKUIAT<)zl_~&dE4hVowk* zTCcb)PE(rBGt*Nh?tr`Ul0|ku`ZycwsVNG|pcU5cO zKm+pAgF$&$llYh4(FQnuI%r0crZ)Mlg3Igap$dxgS_c}q>!JpbO@7?a8(UfIuFe$TQg&6(`#b ztpW<(N3&fVFH(yX`fL`8n(S{t=aWsG;KIulBK7DTyb@ zvrgpmFj%Dt5S2eHbf7)Uy1&fA``*qfFJ&4bPhd^nPUZ<%iCGyLoXjK^pO?vHadJ}x zK;sGo+{`>4hc7_<(nP24DApiXvIPs@Y}#Yl3A{`eDOjuyddF8~!D$PB(4Y{$N8++F zIJxObS%sN|_TQ7ly|_P8OEm6Zv<2z-E)=Byh4mdFU%*M>3i5J!nJKK~tlSJv9(I?( zcN7@nyE@3WK|5y{ppASPFTy{B6lnHW(JXw!OSplPo6DKXO5h}>=W!DFTo#`*H7lPO zZKNat+atke!67M{qQb1)Bz#Rx6C`oD+4w1ho0)`_)sd(}Z;2WUZb7l4)&qT^!9azD zueyYja=A(QiClb-m;_TYh$SRxUB^{H$D9`+cC*tRps7gS=^k7G#kHcX^m+BcUmXEU zKkZ*UeIlKbd9bf@x!HV9B3E>4VU-BN_=zYhRoC=83P*qk{4)t=&>lNLPf`-JldO_X z`rB0i4+dDq57rr8AHY%ietp3GJzh2c&xvyutVn&kq_)-%+;Dazxf!Nv(c=9wv4hD{>09wEs0Xth_VxOlw zx!`vTz{Lp8Rk)4E(ldCOJUFisQv z63Z4Fq$42+8cEcMT?12M)juZi`^zxZ_}&>CoC`||j;3tv(k$W}@&6MII{t}5EMi+? zwvZ1zE%g zmK8A(D&(UZ!4-WVi^lLwl!=dIfhc;YA%!X6=OUnD1T@qz!_a|zK?G#7NL&gD`%L14Vpt(@@^lg|(UF+%`;z^BPEp+34&I!KGWW&@8bLJMcnd9!<2S{M~+ z^Xrjgnl`e`7^=oe%H<>{PqpS|3Rs!=O3LR=Q6GH?R}1NGm`fYEUM-rbccR7Mj#SN|7sAwr`7 diff --git a/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json b/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json new file mode 100644 index 000000000000..ce2220e0b756 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json @@ -0,0 +1,3 @@ +{ + "extends": "solhint:recommended" +} diff --git a/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol b/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol new file mode 100644 index 000000000000..f1fdd219624a --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract ErrorTester { + bool public state; + + // Payable function that can be used to test insufficient funds errors + function valueMatch(uint256 value) public payable { + require(msg.value == value , "msg.value does not match value"); + } + + function setState(bool newState) public { + state = newState; + } + + // Trigger a require statement failure with a custom error message + function triggerRequireError() public pure { + require(false, "This is a require error"); + } + + // Trigger an assert statement failure + function triggerAssertError() public pure { + assert(false); + } + + // Trigger a revert statement with a custom error message + function triggerRevertError() public pure { + revert("This is a revert error"); + } + + // Trigger a division by zero error + function triggerDivisionByZero() public pure returns (uint256) { + uint256 a = 1; + uint256 b = 0; + return a / b; + } + + // Trigger an out-of-bounds array access + function triggerOutOfBoundsError() public pure returns (uint256) { + uint256[] memory arr = new uint256[](1); + return arr[2]; + } + + // Trigger a custom error + error CustomError(string message); + + function triggerCustomError() public pure { + revert CustomError("This is a custom error"); + } +} + diff --git a/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol b/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol index 1906c4658889..0c8a4d26f4dc 100644 --- a/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol +++ b/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol @@ -3,7 +3,7 @@ pragma solidity ^0.8.0; contract PiggyBank { - uint private balance; + uint256 private balance; address public owner; constructor() { @@ -11,16 +11,16 @@ contract PiggyBank { balance = 0; } - function deposit() public payable returns (uint) { + function deposit() public payable returns (uint256) { balance += msg.value; return balance; } - function getDeposit() public view returns (uint) { + function getDeposit() public view returns (uint256) { return balance; } - function withdraw(uint withdrawAmount) public returns (uint remainingBal) { + function withdraw(uint256 withdrawAmount) public returns (uint256 remainingBal) { require(msg.sender == owner); balance -= withdrawAmount; (bool success, ) = payable(msg.sender).call{value: withdrawAmount}(""); diff --git a/substrate/frame/revive/rpc/examples/js/package.json b/substrate/frame/revive/rpc/examples/js/package.json index 3ae1f0fbd799..6d8d00fd4214 100644 --- a/substrate/frame/revive/rpc/examples/js/package.json +++ b/substrate/frame/revive/rpc/examples/js/package.json @@ -1,22 +1,23 @@ { - "name": "demo", - "private": true, - "version": "0.0.0", - "type": "module", - "scripts": { - "dev": "vite", - "build": "tsc && vite build", - "preview": "vite preview", - "generate-types": "typechain --target=ethers-v6 'abi/*.json'" - }, - "dependencies": { - "@typechain/ethers-v6": "^0.5.1", - "ethers": "^6.13.4", - "solc": "^0.8.28", - "typechain": "^8.3.2" - }, - "devDependencies": { - "typescript": "^5.5.3", - "vite": "^5.4.8" - } + "name": "demo", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "preview": "vite preview" + }, + "dependencies": { + "ethers": "^6.13.4", + "solc": "^0.8.28", + "viem": "^2.21.47", + "@parity/revive": "^0.0.5" + }, + "devDependencies": { + "prettier": "^3.3.3", + "@types/bun": "^1.1.13", + "typescript": "^5.5.3", + "vite": "^5.4.8" + } } diff --git a/substrate/frame/revive/rpc/examples/js/pvm/errorTester.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/errorTester.polkavm new file mode 100644 index 0000000000000000000000000000000000000000..aebe24c4c0f597fb3d0171a9f527bc86d2d77b93 GIT binary patch literal 12890 zcmds73v?URnVvg$tb0cq$+9w*M$$m!Fl3xG(7>8vsJkmtQ>0*Wyw+^0a=c3tVr(ZN zq_$(rk3(r<;z!cfh?F=Xv?&QmoP;gqWE%o$PulL1@Yu8MS^5GBp}b$G910X@+U$2n zvh!##ER8g&lh0EKoX=-b0T3k{l?_RH5y>QvG-J3m&n^r8luG{my2mVRky}eo5E!`#k zm$Zb8$@j~@mEV#LT2^v)NvUTEyPj|4>(raQ8+^BETm2oShlBIVE)D&2s4H}H=)0kZ zLeGcjl&Mp`QZ9wh46lro8STa+h8F#7v^lya`b5khzbKxKf7iVHj62T=oO$rfOQ+_h z4o$sh>N8Vm;>wD*Dzek|PgBlHoOR(@4QKt)+JE*1(|>A5DsQWNu9D0+Z^ptI-7^Mf z+&1II8T~V#oVoc^cYLa$JJ1tUSgH3C#VbkV)590>AubP04Rsy)?1kv?Dr}16<&q@N z$mwB*?}nVhf|AS!066|@iX@jEp+S?D8#JuT<%v+)kx(a5n8H0l`L;0QlI$mvBHo}4 z#ynC+BGc(Cg-)lH;$ewLLOe|IFvLSwXqW{R$~?>yR8&vUbGa;+9i+h;T3$`VRdRU} zR2sqxODa6!3G&YzV=OEy9JHp>#gdH8zF|6DApUCNBP}Lb6(p<4kWPk8vQ{JO6f$6tL9X5F z)gDl_F;yF2+F4vYH?UH6J=R~Cn)lW}GuBH;vq3uQ$%-WD&62(p>2XL~9cix(RPxZE zbW94&Ftkw~nt`9{My^gXw7iNA+F}N(ix?NV7#9^W{?5>DR<$7-*jWURx!|!Pc$XpO zG$aQO7r_s>;0KD}hYd07AuVuE5xn08?=OPyGqnAJd_d))jpz&bL`TRc!1r-gGPJvV z=m=cSL-z^rTmyUvU0wv&8roi;U<_t(m}402e3dLQ#lNGxlrJYO;9O&1s5thpm}+Q1 zyxhgrL8HJ!?1&H`t?^Hc)p)rk$&C9HHd~?dXc;L01OEWvR54t$$#Wbcr_Phh3X&H* z>rbUjkzA2b7*SM33IQmWZTT||HWFH%B{b|1xqJbED2?xy{_4@fTM$04kU$z~^m-tZ zUJcUeRUo!r31aK@LSnr_cI~w7bJ9NhLVL`)P}P#m=~bzb^>WCteaK$q9Fj?-G2W$2 z?IrdAHR_xJ9}U-Q_cN!>r+uE$n4@8N1I+36DNr{^=LeEsS9qE4r-~v&uP|4M26DyC z>@_s(Yq#JiWU0%5NNNKrjit^_zkBm-KRS<`o4)hKTa>R*mi||KZ}G77aQqhQc3zr} z8vmHNLGf4A5PwBAS{2%Cv{`7CXck(6$dLpQY7v@Eh>;*9T17~#hLHFyLQIR0a5W*3 zN^)E|nweHPp&Y9MB$6dfqoeJsV^4g=;TF%$_oCtO;3ihr%rd=P-f;|4VC7hRC(-*k&#A!Aba8Di zi+2&dUFD0fQEF0gIn7WHNw%5+v&6!0^YgL^MUhOAL z^QjOBC+98o-pvZDkG=H{W7?mNuU=@%zH8qxy1TL1Y4W%CER(TVf5m3WUoncd9&H3| z9oisTKiUnF9JxVq#bQJv#tjmQu9rw`vqa+SBx3eUBs?mS$e{EdV(}kfSC(wi$quqx zBV!8L!(G-kg0;Mx%l*g9_1s#oW(HLG&@sM&TabX9ObsV~i0HkHx5?(+%(|OqIw{#5 zG}md`A(rW;^1frdonqJH`-wiH@@~!A?8$U_$PR;ynVQcRzl-S1$G`8fzVEXb)4Lhp z$3M5$ZgtkG_H6s1%t4>FM(C$~8HqN=HzuZTB+g|jw#?pU+Zl^#Gri6ZRlAxAa|ccC zRZG>M!&nIeX5RWXW8eGZNvvh*KSB-9QA*SMSL}YL3@Y_;s@s6~lc8?su-p9wo}4-G z2gYuD_e7p3QljJqc=E)viI0~X-&8*-+*n;&;KsD>w;21zJ126Z%vH9JQz&+E63&ul z(xQ=8g)HH6?@2efb-V_5a}%9vp>neH-l!wAavHL3;u18MGrX zx?Gwa>g`t8z%v|(3W4Sj^;sS}(k~W33>!d>?D@@XpAbp&48YCAC?BJnZ5sU9I zw0$hJyBLcj4?rNkgCUL$4De8wz(ByggN23##x6JD-YL8mjbsDOMT`~^goj!L#voU( zF|?Kfh7&kc#5n9?94=xYop3!LjnoHN5rexJyoiC6;fBH(b%D!@7%3McRm4En&|ko) z4P02nsCF@`ix^0GRu?dm0lSD%>0(qCF_1Q`@@So&4{A3eh*=H9tcYLDW=3KNUE@jf z9zlc{wdN>CNN0}0(^nIuE+mSORzMK-#_NUoizD_m9TTaPK+2jsIf*p_pn;=Edj=R| zrJnKJV%ANsF}CCPB$6U+^`t#XIOf$cnW4b~ArBG;;hwu>cQ!&PkH z60f$&b4t2&^6nu~G#Wojsy8GBQpMyU?Ph{$K_1Bic4a*hMP>3V)H_)k+wc$)O>GaS z!K|i;6c9m_NH1Fp>EJhCr?Dh8>u9)^>h(145F_=z>7fT{e?zk;c<$$Z$5`fdyn>9t zZK1pZ=gq^Mx~$gpLB1LTNJAWdtA?ZR0Pa4oU+E;FfiiqX0O$QG_8)? z@OD~enwGgngRUVQxBAeZ<+fsVcrw#G{A$UX%}6MTV3P*r$O0NtJi*zHL1K$3GN9-J z5^N#yWyGjAhnY1j)95Tr4tWSQsxvv67d|lU=xaPyAIDSQ%k@6QfR4UGq$ng_1@}zivvs&(+lQxaGsrVBYxDy?Vpi)9`5?J4 z5yZ&CZHs*>RyAIFO$e_sOV4>4T0Ft?55CISFJ41-O^nKh)``@+YA5=C_}WR9t6Uij3p|H@H2h>oV(3pq0`UOK=OB^*WMokgp zVgo@FEC!8c1KmZ89=8b6BQSPx8cC{s6G%>ACJ*frgl37p&@6#;KPNXRG*qYB29jUb zz#lignt{wGq!q}KVNF%ta!&!UNsJ^!-PQX# z?INM{kSDtf9?4%}BKI-SbhIFvhQ`qpGzo3;(8pi#D}+Fgqx}@^5wyc-52O7%+CdaC zWkW*g=a?KroD@HZs^vTqLQFK}1;j~+k79_K;)t0{#7u6SbW{=rEv!RZ>w7;ZLL&zx z8oo!O=3a@$?viM9k3^^JlxS$1MB`f|8I=*2_7+L;B|ut1d{Zb2)~mI*q?{SJ}zkm3Pw0rfklt**28*$XnwlJ5m!ynC_1S{V~lSqkzQ@eXjwWsd!Jp`9i}x&xxgLFF_goVbdQr{9)Z6 z4*J8IKg|7M#qXJy>isfw1TT1k&Vw%cZ(j6=6JSv2w67@K5<}1bmW%tn7mxc9%XO0l zJ&bl(ew9ld0-9sQSwUEC+1KmCp_xYGq6eBRyB|8122w{1ChKlh<6=c4&f ziydlx?gYyzV*39Z%lZ1VlUdHi_x@JQ?LVJA#jbsL%ee|e!8T7=&TFzRg{7xbhHPA_);3`}+nP#tzwZIj`$u;7AVNEUb{E=S zw4G>s(6*s%aU-{4^n@S|!4$$LUl_4mF@RcfsXOEBGw^bM)fme2%=^bsurer?c!J9g z9uaK&@26pt?3_pwJ3TnRJe>o!2;o??-6pj_$Gtd*APp9mH%Q5>8QIPoCHdWGZKeVaJ?S4WC#SfqQe=${; zJvEuB>M3>SHtngunyLEbk&{hT-<`h^t3QbDzfzF5(xotAs;;kd30x%zoM@^}4Dvdg z+ByoBs!SeSNOR&EN20Yo;b%@uYrDc1W6qugjw{`v^dqinEG9QMPDlC{8>*@$XR4c~_f zlS;$0NF>QOc&tt>vr)_3haywGTVw)GoqJ5oGWQ{2c6g3jt9>NY$nTab8}W2Us+)p_uiLy2a8_F#yWC8%3c{%gK|5e~8|ii*L8MZ?_cRE)<0k zy^WU?cDmr5MKH>isDTc!056smy`m%J6<`#dQ4ZcBs-%Ir#nJ|9D7XYNw+Kenahs@+ zAWNcSjdW+0?r`Y#dJ>*R_teqbQ*qjmOZW&?`4j=cj_ zOM7!>gQjg_{;8DqAJ_bG?vE=t%C}qM_7S9B;))0Aj1Z71j2{3S%VP^;eRpIlfxdSuFe}(V9Y!pmqT;p^64FI#IuhW^oCE44GuuAZvB9E=Vzv z9)tGkv@b}8G&0P|T7?W!%%g|XUWMkyIk!&w*{=odryqyEBOPRgLOMC-D{gY+f}~H! zeGVb3qKFkhSqf(iGmHBPS-pMy6k@!#k17~2E-qrAw!gK2k)4po2VIN@ix@a#K-H{= zPI`=y;}gQZ$$fiM@ofk@viNR!LYkMj;3Y*cq#0HA9?!(-1;iDz^A^DcaqT&t=+wFO zvy(48?PGSYtvS7#z0{uREcI$De9i%{z1FUAaA7Q-wfDNW2dKC`uvf)VhO+PM0U|?ehW_LYG|_p z+ew>I8mG-1+E1H>^`y-~SaaH}L#HV2*hL!Mi(`p~4W7h$Dd7%u{-cVDOuwN;*mpPQ zPuwDvxS)ub+G)=X{QQKZKQSu({1<{V&;Q!R7m5G*r+uxE_$M|=cewcbetq15ScGaag_>OD6ge)6Yc{AAR4WrdZm{3X-idV zCTL9P1ueI@xZkNvZ1!KDqHzNe35uJZ=lDbZ`%HBlFXUDTg zPY=Ir)&Mhbjm2w=eMP|z@g)I|P+a|60IgZEBsq>X3{&Hb<&-9lY{&L-4 z9`u)M{&Maw$EMGYcO7&!0`ebp6+t6W#TkWaaAytkmz1&xP`;flH;Jl>7<#*8W9Yro z#Lz37FmM9bcrp0Gj;mWh;JS{)wII^eo>&e-O^Lo2*sz4=^u(E@7`#H@-B z;PRF?Gw3A|gYUrYGPr7!@58~6Cv%@CvkS7BK-_P20-=)c!1+;0W>*P`SXh*`5}mVn z51zD*cEgr(iW^>?L1AuJ8nnkG!6YfK!F@TotagfoEXR}BDeZj(5s4?UOWOU2bVt9X z&b0J~)Y<9$aPT|fIlQ8Ogs1cCQSlSM`z-)At+Ysjk2Tj@EWyrrg7`F^YLyFyLp zNBXUFY7T$}kRA$cnv-`vwK&yUTWhJ&Ibxu>XQy{sGX#b^Zm`0#D$=2#_>|%EwZXx` zRNlJDMFG~tU-`6cRh)CcCc4CrhVw6@Jpo#t!$ z$ukvOpLZWZazIqjPnU~d?oJBc;*$uuTuuo_xSyLg70|I%3?SC)O*dTO&P)vSUZRO% z@h1cn61Q#r`usN0Beot5l)vi2#XqUMQb2CVZv$t!vPyh`4=EW#+-O+^VkdE0Ol4dF z1Oc%n7^LAQF|QEVihuR|NELSI_!M2?KV<_1+}#GOEIrcDFgGvuqoLSNn3|wc)Q5B) zR4z#bt#KCNdsV9TESDJGObNyoR*tb}s5j~OKc7(3i8NN&zYQxsMT$WYOoKT?}1i8fRf-*mjwgy>*{?hrx+#BHgDpx~@{`_lZX)SN_+EAl1o P|DsnIq4bp_3F&_TlmMih literal 0 HcmV?d00001 diff --git a/substrate/frame/revive/rpc/examples/js/src/balance.ts b/substrate/frame/revive/rpc/examples/js/src/balance.ts new file mode 100644 index 000000000000..1261dcab7812 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/balance.ts @@ -0,0 +1,8 @@ +import { walletClient } from './lib.ts' + +const recipient = '0x8D97689C9818892B700e27F316cc3E41e17fBeb9' +try { + console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) +} catch (err) { + console.error(err) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts index c6b7700d1ccf..b25b5a7f2199 100644 --- a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts +++ b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts @@ -1,11 +1,23 @@ import { compile } from '@parity/revive' +import { format } from 'prettier' +import { parseArgs } from 'node:util' import solc from 'solc' import { readFileSync, writeFileSync } from 'fs' import { join } from 'path' type CompileInput = Parameters[0] -type CompileOutput = Awaited> -type Abi = CompileOutput['contracts'][string][string]['abi'] + +const { + values: { filter }, +} = parseArgs({ + args: process.argv.slice(2), + options: { + filter: { + type: 'string', + short: 'f', + }, + }, +}) function evmCompile(sources: CompileInput) { const input = { @@ -27,9 +39,9 @@ console.log('Compiling contracts...') const input = [ { file: 'Event.sol', contract: 'EventExample', keypath: 'event' }, - { file: 'Revert.sol', contract: 'RevertExample', keypath: 'revert' }, { file: 'PiggyBank.sol', contract: 'PiggyBank', keypath: 'piggyBank' }, -] + { file: 'ErrorTester.sol', contract: 'ErrorTester', keypath: 'errorTester' }, +].filter(({ keypath }) => !filter || keypath.includes(filter)) for (const { keypath, contract, file } of input) { const input = { @@ -41,7 +53,12 @@ for (const { keypath, contract, file } of input) { const out = JSON.parse(evmCompile(input)) const entry = out.contracts[file][contract] writeFileSync(join('evm', `${keypath}.bin`), Buffer.from(entry.evm.bytecode.object, 'hex')) - writeFileSync(join('abi', `${keypath}.json`), JSON.stringify(entry.abi, null, 2)) + writeFileSync( + join('abi', `${keypath}.ts`), + await format(`export const abi = ${JSON.stringify(entry.abi, null, 2)} as const`, { + parser: 'typescript', + }) + ) } { diff --git a/substrate/frame/revive/rpc/examples/js/src/event.ts b/substrate/frame/revive/rpc/examples/js/src/event.ts index 94cc2560272e..2e672a9772ff 100644 --- a/substrate/frame/revive/rpc/examples/js/src/event.ts +++ b/substrate/frame/revive/rpc/examples/js/src/event.ts @@ -1,15 +1,29 @@ //! Run with bun run script-event.ts -import { call, getContract, deploy } from './lib.ts' - -try { - const { abi, bytecode } = getContract('event') - const contract = await deploy(bytecode, abi) - const receipt = await call('triggerEvent', await contract.getAddress(), abi) - if (receipt) { - for (const log of receipt.logs) { - console.log('Event log:', JSON.stringify(log, null, 2)) - } - } -} catch (err) { - console.error(err) + +import { abi } from '../abi/event.ts' +import { assert, getByteCode, walletClient } from './lib.ts' + +const deployHash = await walletClient.deployContract({ + abi, + bytecode: getByteCode('event'), +}) +const deployReceipt = await walletClient.waitForTransactionReceipt({ hash: deployHash }) +const contractAddress = deployReceipt.contractAddress +console.log('Contract deployed:', contractAddress) +assert(contractAddress, 'Contract address should be set') + +const { request } = await walletClient.simulateContract({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'triggerEvent', +}) + +const hash = await walletClient.writeContract(request) +const receipt = await walletClient.waitForTransactionReceipt({ hash }) +console.log(`Receipt: ${receipt.status}`) +console.log(`Logs receipt: ${receipt.status}`) + +for (const log of receipt.logs) { + console.log('Event log:', log) } diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts new file mode 100644 index 000000000000..92b20473d165 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts @@ -0,0 +1,162 @@ +import { spawn, spawnSync, Subprocess } from 'bun' +import { join, resolve } from 'path' +import { readFileSync } from 'fs' +import { createWalletClient, defineChain, Hex, http, publicActions } from 'viem' +import { privateKeyToAccount } from 'viem/accounts' + +export function getByteCode(name: string, evm: boolean): Hex { + const bytecode = evm ? readFileSync(`evm/${name}.bin`) : readFileSync(`pvm/${name}.polkavm`) + return `0x${Buffer.from(bytecode).toString('hex')}` +} + +export type JsonRpcError = { + code: number + message: string + data: Hex +} + +export function killProcessOnPort(port: number) { + // Check which process is using the specified port + const result = spawnSync(['lsof', '-ti', `:${port}`]) + const output = result.stdout.toString().trim() + + if (output) { + console.log(`Port ${port} is in use. Killing process...`) + const pids = output.split('\n') + + // Kill each process using the port + for (const pid of pids) { + spawnSync(['kill', '-9', pid]) + console.log(`Killed process with PID: ${pid}`) + } + } +} + +export let jsonRpcErrors: JsonRpcError[] = [] +export async function createEnv(name: 'geth' | 'kitchensink') { + const gethPort = process.env.GETH_PORT || '8546' + const kitchensinkPort = process.env.KITCHENSINK_PORT || '8545' + const url = `http://localhost:${name == 'geth' ? gethPort : kitchensinkPort}` + const chain = defineChain({ + id: name == 'geth' ? 1337 : 420420420, + name, + nativeCurrency: { + name: 'Westie', + symbol: 'WST', + decimals: 18, + }, + rpcUrls: { + default: { + http: [url], + }, + }, + testnet: true, + }) + + const transport = http(url, { + onFetchResponse: async (response) => { + const raw = await response.clone().json() + if (raw.error) { + jsonRpcErrors.push(raw.error as JsonRpcError) + } + }, + }) + + const wallet = createWalletClient({ + transport, + chain, + }) + + const [account] = await wallet.getAddresses() + const serverWallet = createWalletClient({ + account, + transport, + chain, + }).extend(publicActions) + + const accountWallet = createWalletClient({ + account: privateKeyToAccount( + '0xa872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f' + ), + transport, + chain, + }).extend(publicActions) + + return { serverWallet, accountWallet, evm: name == 'geth' } +} + +// wait for http request to return 200 +export function waitForHealth(url: string) { + return new Promise((resolve, reject) => { + const start = Date.now() + const interval = setInterval(() => { + fetch(url) + .then((res) => { + if (res.status === 200) { + clearInterval(interval) + resolve() + } + }) + .catch(() => { + const elapsed = Date.now() - start + if (elapsed > 30_000) { + clearInterval(interval) + reject(new Error('hit timeout')) + } + }) + }, 1000) + }) +} + +export const procs: Subprocess[] = [] +const polkadotSdkPath = resolve(__dirname, '../../../../../../..') +if (!process.env.USE_LIVE_SERVERS) { + procs.push( + // Run geth on port 8546 + // + (() => { + killProcessOnPort(8546) + return spawn( + 'geth --http --http.api web3,eth,debug,personal,net --http.port 8546 --dev --verbosity 0'.split( + ' ' + ), + { stdout: Bun.file('/tmp/geth.out.log'), stderr: Bun.file('/tmp/geth.err.log') } + ) + })(), + //Run the substate node + (() => { + killProcessOnPort(9944) + return spawn( + [ + './target/debug/substrate-node', + '--dev', + '-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug', + ], + { + stdout: Bun.file('/tmp/kitchensink.out.log'), + stderr: Bun.file('/tmp/kitchensink.err.log'), + cwd: polkadotSdkPath, + } + ) + })(), + // Run eth-rpc on 8545 + await (async () => { + killProcessOnPort(8545) + const proc = spawn( + [ + './target/debug/eth-rpc', + '--dev', + '--node-rpc-url=ws://localhost:9944', + '-l=rpc-metrics=debug,eth-rpc=debug', + ], + { + stdout: Bun.file('/tmp/eth-rpc.out.log'), + stderr: Bun.file('/tmp/eth-rpc.err.log'), + cwd: polkadotSdkPath, + } + ) + await waitForHealth('http://localhost:8545/health').catch() + return proc + })() + ) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts new file mode 100644 index 000000000000..468e7860bb9a --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts @@ -0,0 +1,245 @@ +import { jsonRpcErrors, procs, createEnv, getByteCode } from './geth-diff-setup.ts' +import { afterAll, afterEach, beforeAll, describe, expect, test } from 'bun:test' +import { encodeFunctionData, Hex, parseEther } from 'viem' +import { abi } from '../abi/errorTester' + +afterEach(() => { + jsonRpcErrors.length = 0 +}) + +afterAll(async () => { + procs.forEach((proc) => proc.kill()) +}) + +const envs = await Promise.all([createEnv('geth'), createEnv('kitchensink')]) + +for (const env of envs) { + describe(env.serverWallet.chain.name, () => { + let errorTesterAddr: Hex = '0x' + beforeAll(async () => { + const hash = await env.serverWallet.deployContract({ + abi, + bytecode: getByteCode('errorTester', env.evm), + }) + const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash }) + if (!deployReceipt.contractAddress) throw new Error('Contract address should be set') + errorTesterAddr = deployReceipt.contractAddress + }) + + test('triggerAssertError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi, + functionName: 'triggerAssertError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x4e487b710000000000000000000000000000000000000000000000000000000000000001' + ) + expect(lastJsonRpcError?.message).toBe('execution reverted: assert(false)') + } + }) + + test('triggerRevertError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi, + functionName: 'triggerRevertError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.message).toBe('execution reverted: This is a revert error') + expect(lastJsonRpcError?.data).toBe( + '0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001654686973206973206120726576657274206572726f7200000000000000000000' + ) + } + }) + + test('triggerDivisionByZero', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi, + functionName: 'triggerDivisionByZero', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x4e487b710000000000000000000000000000000000000000000000000000000000000012' + ) + expect(lastJsonRpcError?.message).toBe( + 'execution reverted: division or modulo by zero' + ) + } + }) + + test('triggerOutOfBoundsError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi, + functionName: 'triggerOutOfBoundsError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x4e487b710000000000000000000000000000000000000000000000000000000000000032' + ) + expect(lastJsonRpcError?.message).toBe( + 'execution reverted: out-of-bounds access of an array or bytesN' + ) + } + }) + + test('triggerCustomError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi, + functionName: 'triggerCustomError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x8d6ea8be0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001654686973206973206120637573746f6d206572726f7200000000000000000000' + ) + expect(lastJsonRpcError?.message).toBe('execution reverted') + } + }) + + test('eth_call (not enough funds)', async () => { + expect.assertions(3) + try { + await env.accountWallet.simulateContract({ + address: errorTesterAddr, + abi, + functionName: 'valueMatch', + value: parseEther('10'), + args: [parseEther('10')], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_call transfer (not enough funds)', async () => { + expect.assertions(3) + try { + await env.accountWallet.sendTransaction({ + to: '0x75E480dB528101a381Ce68544611C169Ad7EB342', + value: parseEther('10'), + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_estimate (not enough funds)', async () => { + expect.assertions(3) + try { + await env.accountWallet.estimateContractGas({ + address: errorTesterAddr, + abi, + functionName: 'valueMatch', + value: parseEther('10'), + args: [parseEther('10')], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_estimate (revert)', async () => { + expect.assertions(3) + try { + await env.serverWallet.estimateContractGas({ + address: errorTesterAddr, + abi, + functionName: 'valueMatch', + value: parseEther('11'), + args: [parseEther('10')], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.message).toBe( + 'execution reverted: msg.value does not match value' + ) + expect(lastJsonRpcError?.data).toBe( + '0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001e6d73672e76616c756520646f6573206e6f74206d617463682076616c75650000' + ) + } + }) + + test('eth_get_balance (no account)', async () => { + const balance = await env.serverWallet.getBalance({ + address: '0x0000000000000000000000000000000000000123', + }) + expect(balance).toBe(0n) + }) + + test('eth_estimate (not enough funds to cover gas specified)', async () => { + expect.assertions(4) + try { + let balance = await env.serverWallet.getBalance(env.accountWallet.account) + expect(balance).toBe(0n) + + await env.accountWallet.estimateContractGas({ + address: errorTesterAddr, + abi, + functionName: 'setState', + args: [true], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_estimate (no gas specified)', async () => { + let balance = await env.serverWallet.getBalance(env.accountWallet.account) + expect(balance).toBe(0n) + + const data = encodeFunctionData({ + abi, + functionName: 'setState', + args: [true], + }) + + await env.accountWallet.request({ + method: 'eth_estimateGas', + params: [ + { + data, + from: env.accountWallet.account.address, + to: errorTesterAddr, + }, + ], + }) + }) + }) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/lib.ts b/substrate/frame/revive/rpc/examples/js/src/lib.ts index 975d8faf15b3..e1f0e780d95b 100644 --- a/substrate/frame/revive/rpc/examples/js/src/lib.ts +++ b/substrate/frame/revive/rpc/examples/js/src/lib.ts @@ -1,22 +1,11 @@ -import { - Contract, - ContractFactory, - JsonRpcProvider, - TransactionReceipt, - TransactionResponse, - Wallet, -} from 'ethers' import { readFileSync } from 'node:fs' -import type { compile } from '@parity/revive' import { spawn } from 'node:child_process' import { parseArgs } from 'node:util' -import { BaseContract } from 'ethers' - -type CompileOutput = Awaited> -type Abi = CompileOutput['contracts'][string][string]['abi'] +import { createWalletClient, defineChain, Hex, http, parseEther, publicActions } from 'viem' +import { privateKeyToAccount } from 'viem/accounts' const { - values: { geth, westend, ['private-key']: privateKey }, + values: { geth, proxy, westend, endowment, ['private-key']: privateKey }, } = parseArgs({ args: process.argv.slice(2), options: { @@ -24,6 +13,13 @@ const { type: 'string', short: 'k', }, + endowment: { + type: 'string', + short: 'e', + }, + proxy: { + type: 'boolean', + }, geth: { type: 'boolean', }, @@ -42,7 +38,7 @@ if (geth) { '--http.api', 'web3,eth,debug,personal,net', '--http.port', - '8546', + process.env.GETH_PORT ?? '8546', '--dev', '--verbosity', '0', @@ -55,56 +51,78 @@ if (geth) { await new Promise((resolve) => setTimeout(resolve, 500)) } -export const provider = new JsonRpcProvider( - westend +const rpcUrl = proxy + ? 'http://localhost:8080' + : westend ? 'https://westend-asset-hub-eth-rpc.polkadot.io' : geth ? 'http://localhost:8546' : 'http://localhost:8545' -) -export const signer = privateKey ? new Wallet(privateKey, provider) : await provider.getSigner() -console.log(`Signer address: ${await signer.getAddress()}, Nonce: ${await signer.getNonce()}`) +export const chain = defineChain({ + id: geth ? 1337 : 420420420, + name: 'Asset Hub Westend', + network: 'asset-hub', + nativeCurrency: { + name: 'Westie', + symbol: 'WST', + decimals: 18, + }, + rpcUrls: { + default: { + http: [rpcUrl], + }, + }, + testnet: true, +}) + +const wallet = createWalletClient({ + transport: http(), + chain, +}) +const [account] = await wallet.getAddresses() +export const serverWalletClient = createWalletClient({ + account, + transport: http(), + chain, +}) + +export const walletClient = await (async () => { + if (privateKey) { + const account = privateKeyToAccount(`0x${privateKey}`) + console.log(`Wallet address ${account.address}`) + + const wallet = createWalletClient({ + account, + transport: http(), + chain, + }) + + if (endowment) { + await serverWalletClient.sendTransaction({ + to: account.address, + value: parseEther(endowment), + }) + console.log(`Endowed address ${account.address} with: ${endowment}`) + } + + return wallet.extend(publicActions) + } else { + return serverWalletClient.extend(publicActions) + } +})() /** * Get one of the pre-built contracts * @param name - the contract name */ -export function getContract(name: string): { abi: Abi; bytecode: string } { +export function getByteCode(name: string): Hex { const bytecode = geth ? readFileSync(`evm/${name}.bin`) : readFileSync(`pvm/${name}.polkavm`) - const abi = JSON.parse(readFileSync(`abi/${name}.json`, 'utf8')) as Abi - return { abi, bytecode: Buffer.from(bytecode).toString('hex') } + return `0x${Buffer.from(bytecode).toString('hex')}` } -/** - * Deploy a contract - * @returns the contract address - **/ -export async function deploy(bytecode: string, abi: Abi, args: any[] = []): Promise { - console.log('Deploying contract with', args) - const contractFactory = new ContractFactory(abi, bytecode, signer) - - const contract = await contractFactory.deploy(args) - await contract.waitForDeployment() - const address = await contract.getAddress() - console.log(`Contract deployed: ${address}`) - - return contract -} - -/** - * Call a contract - **/ -export async function call( - method: string, - address: string, - abi: Abi, - args: any[] = [], - opts: { value?: bigint } = {} -): Promise { - console.log(`Calling ${method} at ${address} with`, args, opts) - const contract = new Contract(address, abi, signer) - const tx = (await contract[method](...args, opts)) as TransactionResponse - console.log('Call transaction hash:', tx.hash) - return tx.wait() +export function assert(condition: any, message: string): asserts condition { + if (!condition) { + throw new Error(message) + } } diff --git a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts index 7a8edbde3662..0040b0c78dc4 100644 --- a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts +++ b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts @@ -1,24 +1,69 @@ -import { provider, call, getContract, deploy } from './lib.ts' -import { parseEther } from 'ethers' -import { PiggyBank } from '../types/ethers-contracts/PiggyBank' +import { assert, getByteCode, walletClient } from './lib.ts' +import { abi } from '../abi/piggyBank.ts' +import { parseEther } from 'viem' -try { - const { abi, bytecode } = getContract('piggyBank') - const contract = (await deploy(bytecode, abi)) as PiggyBank - const address = await contract.getAddress() +const hash = await walletClient.deployContract({ + abi, + bytecode: getByteCode('piggyBank'), +}) +const deployReceipt = await walletClient.waitForTransactionReceipt({ hash }) +const contractAddress = deployReceipt.contractAddress +console.log('Contract deployed:', contractAddress) +assert(contractAddress, 'Contract address should be set') - let receipt = await call('deposit', address, abi, [], { - value: parseEther('10.0'), +// Deposit 10 WST +{ + const result = await walletClient.estimateContractGas({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'deposit', + value: parseEther('10'), }) - console.log('Deposit receipt:', receipt?.status) - console.log(`Contract balance: ${await provider.getBalance(address)}`) - console.log('deposit: ', await contract.getDeposit()) + console.log(`Gas estimate: ${result}`) - receipt = await call('withdraw', address, abi, [parseEther('5.0')]) - console.log('Withdraw receipt:', receipt?.status) - console.log(`Contract balance: ${await provider.getBalance(address)}`) - console.log('deposit: ', await contract.getDeposit()) -} catch (err) { - console.error(err) + const { request } = await walletClient.simulateContract({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'deposit', + value: parseEther('10'), + }) + + request.nonce = 0 + const hash = await walletClient.writeContract(request) + + const receipt = await walletClient.waitForTransactionReceipt({ hash }) + console.log(`Deposit receipt: ${receipt.status}`) + if (process.env.STOP) { + process.exit(0) + } +} + +// Withdraw 5 WST +{ + const { request } = await walletClient.simulateContract({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'withdraw', + args: [parseEther('5')], + }) + + const hash = await walletClient.writeContract(request) + const receipt = await walletClient.waitForTransactionReceipt({ hash }) + console.log(`Withdraw receipt: ${receipt.status}`) + + // Check remaining balance + const balance = await walletClient.readContract({ + address: contractAddress, + abi, + functionName: 'getDeposit', + }) + + console.log(`Get deposit: ${balance}`) + console.log( + `Get contract balance: ${await walletClient.getBalance({ address: contractAddress })}` + ) } diff --git a/substrate/frame/revive/rpc/examples/js/src/revert.ts b/substrate/frame/revive/rpc/examples/js/src/revert.ts deleted file mode 100644 index ea1bf4eceeb9..000000000000 --- a/substrate/frame/revive/rpc/examples/js/src/revert.ts +++ /dev/null @@ -1,10 +0,0 @@ -//! Run with bun run script-revert.ts -import { call, getContract, deploy } from './lib.ts' - -try { - const { abi, bytecode } = getContract('revert') - const contract = await deploy(bytecode, abi) - await call('doRevert', await contract.getAddress(), abi) -} catch (err) { - console.error(err) -} diff --git a/substrate/frame/revive/rpc/examples/js/src/transfer.ts b/substrate/frame/revive/rpc/examples/js/src/transfer.ts index ae2dd50f2af8..aef9a487b0c0 100644 --- a/substrate/frame/revive/rpc/examples/js/src/transfer.ts +++ b/substrate/frame/revive/rpc/examples/js/src/transfer.ts @@ -1,17 +1,18 @@ -import { parseEther } from 'ethers' -import { provider, signer } from './lib.ts' +import { parseEther } from 'viem' +import { walletClient } from './lib.ts' const recipient = '0x75E480dB528101a381Ce68544611C169Ad7EB342' try { - console.log(`Signer balance: ${await provider.getBalance(signer.address)}`) - console.log(`Recipient balance: ${await provider.getBalance(recipient)}`) - await signer.sendTransaction({ + console.log(`Signer balance: ${await walletClient.getBalance(walletClient.account)}`) + console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) + + await walletClient.sendTransaction({ to: recipient, value: parseEther('1.0'), }) console.log(`Sent: ${parseEther('1.0')}`) - console.log(`Signer balance: ${await provider.getBalance(signer.address)}`) - console.log(`Recipient balance: ${await provider.getBalance(recipient)}`) + console.log(`Signer balance: ${await walletClient.getBalance(walletClient.account)}`) + console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) } catch (err) { console.error(err) } diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Event.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Event.ts deleted file mode 100644 index d65f953969f0..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Event.ts +++ /dev/null @@ -1,117 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -import type { - BaseContract, - BigNumberish, - BytesLike, - FunctionFragment, - Result, - Interface, - EventFragment, - AddressLike, - ContractRunner, - ContractMethod, - Listener, -} from 'ethers' -import type { - TypedContractEvent, - TypedDeferredTopicFilter, - TypedEventLog, - TypedLogDescription, - TypedListener, - TypedContractMethod, -} from './common' - -export interface EventInterface extends Interface { - getFunction(nameOrSignature: 'triggerEvent'): FunctionFragment - - getEvent(nameOrSignatureOrTopic: 'ExampleEvent'): EventFragment - - encodeFunctionData(functionFragment: 'triggerEvent', values?: undefined): string - - decodeFunctionResult(functionFragment: 'triggerEvent', data: BytesLike): Result -} - -export namespace ExampleEventEvent { - export type InputTuple = [sender: AddressLike, value: BigNumberish, message: string] - export type OutputTuple = [sender: string, value: bigint, message: string] - export interface OutputObject { - sender: string - value: bigint - message: string - } - export type Event = TypedContractEvent - export type Filter = TypedDeferredTopicFilter - export type Log = TypedEventLog - export type LogDescription = TypedLogDescription -} - -export interface Event extends BaseContract { - connect(runner?: ContractRunner | null): Event - waitForDeployment(): Promise - - interface: EventInterface - - queryFilter( - event: TCEvent, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - queryFilter( - filter: TypedDeferredTopicFilter, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - - on( - event: TCEvent, - listener: TypedListener - ): Promise - on( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - once( - event: TCEvent, - listener: TypedListener - ): Promise - once( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - listeners( - event: TCEvent - ): Promise>> - listeners(eventName?: string): Promise> - removeAllListeners(event?: TCEvent): Promise - - triggerEvent: TypedContractMethod<[], [void], 'nonpayable'> - - getFunction(key: string | FunctionFragment): T - - getFunction(nameOrSignature: 'triggerEvent'): TypedContractMethod<[], [void], 'nonpayable'> - - getEvent( - key: 'ExampleEvent' - ): TypedContractEvent< - ExampleEventEvent.InputTuple, - ExampleEventEvent.OutputTuple, - ExampleEventEvent.OutputObject - > - - filters: { - 'ExampleEvent(address,uint256,string)': TypedContractEvent< - ExampleEventEvent.InputTuple, - ExampleEventEvent.OutputTuple, - ExampleEventEvent.OutputObject - > - ExampleEvent: TypedContractEvent< - ExampleEventEvent.InputTuple, - ExampleEventEvent.OutputTuple, - ExampleEventEvent.OutputObject - > - } -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/PiggyBank.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/PiggyBank.ts deleted file mode 100644 index ca137fcc8b30..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/PiggyBank.ts +++ /dev/null @@ -1,96 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -import type { - BaseContract, - BigNumberish, - BytesLike, - FunctionFragment, - Result, - Interface, - ContractRunner, - ContractMethod, - Listener, -} from 'ethers' -import type { - TypedContractEvent, - TypedDeferredTopicFilter, - TypedEventLog, - TypedListener, - TypedContractMethod, -} from './common' - -export interface PiggyBankInterface extends Interface { - getFunction(nameOrSignature: 'deposit' | 'getDeposit' | 'owner' | 'withdraw'): FunctionFragment - - encodeFunctionData(functionFragment: 'deposit', values?: undefined): string - encodeFunctionData(functionFragment: 'getDeposit', values?: undefined): string - encodeFunctionData(functionFragment: 'owner', values?: undefined): string - encodeFunctionData(functionFragment: 'withdraw', values: [BigNumberish]): string - - decodeFunctionResult(functionFragment: 'deposit', data: BytesLike): Result - decodeFunctionResult(functionFragment: 'getDeposit', data: BytesLike): Result - decodeFunctionResult(functionFragment: 'owner', data: BytesLike): Result - decodeFunctionResult(functionFragment: 'withdraw', data: BytesLike): Result -} - -export interface PiggyBank extends BaseContract { - connect(runner?: ContractRunner | null): PiggyBank - waitForDeployment(): Promise - - interface: PiggyBankInterface - - queryFilter( - event: TCEvent, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - queryFilter( - filter: TypedDeferredTopicFilter, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - - on( - event: TCEvent, - listener: TypedListener - ): Promise - on( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - once( - event: TCEvent, - listener: TypedListener - ): Promise - once( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - listeners( - event: TCEvent - ): Promise>> - listeners(eventName?: string): Promise> - removeAllListeners(event?: TCEvent): Promise - - deposit: TypedContractMethod<[], [bigint], 'payable'> - - getDeposit: TypedContractMethod<[], [bigint], 'view'> - - owner: TypedContractMethod<[], [string], 'view'> - - withdraw: TypedContractMethod<[withdrawAmount: BigNumberish], [bigint], 'nonpayable'> - - getFunction(key: string | FunctionFragment): T - - getFunction(nameOrSignature: 'deposit'): TypedContractMethod<[], [bigint], 'payable'> - getFunction(nameOrSignature: 'getDeposit'): TypedContractMethod<[], [bigint], 'view'> - getFunction(nameOrSignature: 'owner'): TypedContractMethod<[], [string], 'view'> - getFunction( - nameOrSignature: 'withdraw' - ): TypedContractMethod<[withdrawAmount: BigNumberish], [bigint], 'nonpayable'> - - filters: {} -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Revert.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Revert.ts deleted file mode 100644 index ad6e23b38a65..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/Revert.ts +++ /dev/null @@ -1,78 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -import type { - BaseContract, - BytesLike, - FunctionFragment, - Result, - Interface, - ContractRunner, - ContractMethod, - Listener, -} from 'ethers' -import type { - TypedContractEvent, - TypedDeferredTopicFilter, - TypedEventLog, - TypedListener, - TypedContractMethod, -} from './common' - -export interface RevertInterface extends Interface { - getFunction(nameOrSignature: 'doRevert'): FunctionFragment - - encodeFunctionData(functionFragment: 'doRevert', values?: undefined): string - - decodeFunctionResult(functionFragment: 'doRevert', data: BytesLike): Result -} - -export interface Revert extends BaseContract { - connect(runner?: ContractRunner | null): Revert - waitForDeployment(): Promise - - interface: RevertInterface - - queryFilter( - event: TCEvent, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - queryFilter( - filter: TypedDeferredTopicFilter, - fromBlockOrBlockhash?: string | number | undefined, - toBlock?: string | number | undefined - ): Promise>> - - on( - event: TCEvent, - listener: TypedListener - ): Promise - on( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - once( - event: TCEvent, - listener: TypedListener - ): Promise - once( - filter: TypedDeferredTopicFilter, - listener: TypedListener - ): Promise - - listeners( - event: TCEvent - ): Promise>> - listeners(eventName?: string): Promise> - removeAllListeners(event?: TCEvent): Promise - - doRevert: TypedContractMethod<[], [void], 'nonpayable'> - - getFunction(key: string | FunctionFragment): T - - getFunction(nameOrSignature: 'doRevert'): TypedContractMethod<[], [void], 'nonpayable'> - - filters: {} -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/common.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/common.ts deleted file mode 100644 index 247b9468ece2..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/common.ts +++ /dev/null @@ -1,100 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -import type { - FunctionFragment, - Typed, - EventFragment, - ContractTransaction, - ContractTransactionResponse, - DeferredTopicFilter, - EventLog, - TransactionRequest, - LogDescription, -} from 'ethers' - -export interface TypedDeferredTopicFilter<_TCEvent extends TypedContractEvent> - extends DeferredTopicFilter {} - -export interface TypedContractEvent< - InputTuple extends Array = any, - OutputTuple extends Array = any, - OutputObject = any, -> { - ( - ...args: Partial - ): TypedDeferredTopicFilter> - name: string - fragment: EventFragment - getFragment(...args: Partial): EventFragment -} - -type __TypechainAOutputTuple = T extends TypedContractEvent ? W : never -type __TypechainOutputObject = - T extends TypedContractEvent ? V : never - -export interface TypedEventLog extends Omit { - args: __TypechainAOutputTuple & __TypechainOutputObject -} - -export interface TypedLogDescription - extends Omit { - args: __TypechainAOutputTuple & __TypechainOutputObject -} - -export type TypedListener = ( - ...listenerArg: [...__TypechainAOutputTuple, TypedEventLog, ...undefined[]] -) => void - -export type MinEthersFactory = { - deploy(...a: ARGS[]): Promise -} - -export type GetContractTypeFromFactory = F extends MinEthersFactory ? C : never -export type GetARGsTypeFromFactory = - F extends MinEthersFactory ? Parameters : never - -export type StateMutability = 'nonpayable' | 'payable' | 'view' - -export type BaseOverrides = Omit -export type NonPayableOverrides = Omit -export type PayableOverrides = Omit -export type ViewOverrides = Omit -export type Overrides = S extends 'nonpayable' - ? NonPayableOverrides - : S extends 'payable' - ? PayableOverrides - : ViewOverrides - -export type PostfixOverrides, S extends StateMutability> = - | A - | [...A, Overrides] -export type ContractMethodArgs, S extends StateMutability> = PostfixOverrides< - { [I in keyof A]-?: A[I] | Typed }, - S -> - -export type DefaultReturnType = R extends Array ? R[0] : R - -// export interface ContractMethod = Array, R = any, D extends R | ContractTransactionResponse = R | ContractTransactionResponse> { -export interface TypedContractMethod< - A extends Array = Array, - R = any, - S extends StateMutability = 'payable', -> { - ( - ...args: ContractMethodArgs - ): S extends 'view' ? Promise> : Promise - - name: string - - fragment: FunctionFragment - - getFragment(...args: ContractMethodArgs): FunctionFragment - - populateTransaction(...args: ContractMethodArgs): Promise - staticCall(...args: ContractMethodArgs): Promise> - send(...args: ContractMethodArgs): Promise - estimateGas(...args: ContractMethodArgs): Promise - staticCallResult(...args: ContractMethodArgs): Promise -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Event__factory.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Event__factory.ts deleted file mode 100644 index 2e16b18a7ed8..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Event__factory.ts +++ /dev/null @@ -1,51 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ - -import { Contract, Interface, type ContractRunner } from 'ethers' -import type { Event, EventInterface } from '../Event' - -const _abi = [ - { - anonymous: false, - inputs: [ - { - indexed: true, - internalType: 'address', - name: 'sender', - type: 'address', - }, - { - indexed: false, - internalType: 'uint256', - name: 'value', - type: 'uint256', - }, - { - indexed: false, - internalType: 'string', - name: 'message', - type: 'string', - }, - ], - name: 'ExampleEvent', - type: 'event', - }, - { - inputs: [], - name: 'triggerEvent', - outputs: [], - stateMutability: 'nonpayable', - type: 'function', - }, -] as const - -export class Event__factory { - static readonly abi = _abi - static createInterface(): EventInterface { - return new Interface(_abi) as EventInterface - } - static connect(address: string, runner?: ContractRunner | null): Event { - return new Contract(address, _abi, runner) as unknown as Event - } -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Revert__factory.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Revert__factory.ts deleted file mode 100644 index ece1c6b5426e..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/Revert__factory.ts +++ /dev/null @@ -1,31 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ - -import { Contract, Interface, type ContractRunner } from 'ethers' -import type { Revert, RevertInterface } from '../Revert' - -const _abi = [ - { - inputs: [], - stateMutability: 'nonpayable', - type: 'constructor', - }, - { - inputs: [], - name: 'doRevert', - outputs: [], - stateMutability: 'nonpayable', - type: 'function', - }, -] as const - -export class Revert__factory { - static readonly abi = _abi - static createInterface(): RevertInterface { - return new Interface(_abi) as RevertInterface - } - static connect(address: string, runner?: ContractRunner | null): Revert { - return new Contract(address, _abi, runner) as unknown as Revert - } -} diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/index.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/index.ts deleted file mode 100644 index 67370dba411c..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/factories/index.ts +++ /dev/null @@ -1,6 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -export { Event__factory } from './Event__factory' -export { PiggyBank__factory } from './PiggyBank__factory' -export { Revert__factory } from './Revert__factory' diff --git a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/index.ts b/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/index.ts deleted file mode 100644 index 3e324e80dcb1..000000000000 --- a/substrate/frame/revive/rpc/examples/js/types/ethers-contracts/index.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* Autogenerated file. Do not edit manually. */ -/* tslint:disable */ -/* eslint-disable */ -export type { Event } from './Event' -export type { PiggyBank } from './PiggyBank' -export type { Revert } from './Revert' -export * as factories from './factories' -export { Event__factory } from './factories/Event__factory' -export { PiggyBank__factory } from './factories/PiggyBank__factory' -export { Revert__factory } from './factories/Revert__factory' diff --git a/substrate/frame/revive/rpc/revive_chain.metadata b/substrate/frame/revive/rpc/revive_chain.metadata index 3560b3b90407acce7f602ce91ac089843be8dea8..64b1f2014dd06815fcea6a87bc96306eb00eda8b 100644 GIT binary patch delta 13838 zcmbt*4OmrG*6`Wq-h1}h_a6lX0llcGC?F^(D41xZSR|-ezKM8+i{6BL;r^&hkul{Y zjWikIip-KJGG}s(96MPtMrM;GCY6m;_%o$xrD~dt*uWbXI?LQ8 z&VbiW$n z&*w@|q3Uby7`ZDetvteL0ZAYGR)C2W!Y z!^31Gd>LnW{EkAhV1>(3;qsKYJbGN*rq!&&YPdIAn_sUv{j%3WNjj`E;CfLe37 zDud24hZj4%J{JNwJ&vW>Yux^T%M)-r%Wia4c>V6c(!_iMdCLu#>fd70#+Mud`(76bGLv zR6K2nQQ&DqWH<>`cD-tdB*wffkHcA9jFX10*9;3Ci?bZ7ya8A5^;Yg(s$!zs?o}aU&D(C+KB+0}~pUd5piJ$RBX7L|sLBOG{mzVpqmo zt)1^$g`)?m5L$SXMCG}B%iLvUg3!p|_--;(x!WxbXVF`bn%Kky{2^7LT_-fFPup3V zupM6YN#n#F44euXto-VeZYE+2t8coLG;uf6&*zl)FcQAdRh+S4njq}vm}YybC}1~| z#rIZ~SQ`r$+EhF!dWj4q`=Mr)6anU-G@TrP+@R#fM(sKrl%5D{XC!Tbv#hKtV}`TL ziBm$_?fiQ&>EO*k_-U1N3poPWtEF4TlMD)T=uq(#gX$bQ0*Z^;tAoLhSJ7_i`sYVKL~|J zMKUS-s-)Wu;w5dqo_spcd|3q+x*7hUs8hN_Bt|%}R@$lTyifW{gyD}#Nx&hE7Ne?$ z^+Jg9!hO;ik%eK07-1A5py>}%6^Z6e6ij|VS}MjE>&MR@skA;I4b_uah&ios^0V!VQ8uUzA2d#zRslNmue8lDb8b33E3}wIoZW zQ$P38K#~nZACbZN%)QlNBil131+h@;IUh2VNj zdYm|4YLr3VIOTQNSNA(v)x@Pe@_V{kSxbRA~cO!@(y|E-!4B29p|id$Tkw zYMqfR$np3CzMyJ!x4GQQR|Nd(LaouDhdm)ZPuB70nWPr%Tcl)C2Q#)v@nj=-w@9N& z1Hcw(n!eFUK7_~dGZ>$g>|_f>J}D)VCYbXi!qmasr?3%V*i+It5)La!sUADj5&IMNJvVIyug!h2K12w+cBlXCDWDK>=cfNx)r%3=PC(o`{>z{4*} zgAM6K*ki1Z@J7Ps36#O7Ez)Yzs)-df86VbWyk>=Me8qI=g#GZ>7bSn_0aa#;NSe^D zNzX2HaLiNty>OBbgRiI5 zp766q;jBhhweWN5^R$aSBrf+NfqmgxeAVk0kV{4)+i&%HTqqj*d~dl+FI+;3Xs?%g zIP>k6R2 zt~3lAJ{8T$xZ%ZjRe&GfmC~#(6N$RX=fsU=&&S*1$#{;$hkHc zgev&zuryYzHq{r_Mkq-g(f|>Smk*J-u15lOz0k0)*TSjoua#P)i(09LmJg)Mtd5Ve z&LnK)uLI$&52bvu!Bl_j!AK?Lh_slHM%eg~^dp*c?Z2ctH0LK|8UtxZrIGktcoc2A z8P*+@Qip8Ek>U$HK1_66nAMcXBs}0QUg-+#fDeyK(?|;p?3Ct^?XaX%T0C(Fm!&Lh zHwi6$_}$T;Uk-}=?$&g%M@jxzdQ3F6ss!3h7&28^gzfK<#erV3*bhUxq>Td)@Hy@` z3GFIQ2jX;SI7fPLI(y+90sS%Q9%CoRIbssJOpx?}7^cOn=Z{HaMB_=740_#DaO{Ni z9BG3GPD*3QS>=V3(h@y62e#7~S1!UoPD^p*qH^XmCIsY?%D#BnR3G#B5ctbJ$*eqe zMtV;q-QfL7`a&e;`e{!NQaoQv-Gmr`T#%xa_2(rA8NkdWKFjZS1?C5wfgmm%#=j1N z!{10r#KNB|BSXw2B}+}ku0WA+pUdg@dcvUYf^-83(QrPyfZjVydmeXD+H8uz^^C~~ zF8OHaxG0sOm8!%Sd@GID$C`yuXtL4hVIF6>D;CF?fyo{wneziaOr-=8?yo9e<}Hhb zec!5<-6qjwnEN0dK;o3l@1%tiN$6>lfq!d=qVk|r^EJ~>#o@(Hose#Z{FilMyXVqF zqL^uh7g|XuOw>^xXcl-SaY2rnhTm zk!CJexojhLp|aSz1gP9oH)phL%XqM50w)qGb#O@BJCFo_*J1|vnm zjsFW>Jv>Cv4sud^<`NG-Wk!f3l9Go>xhrs!3$4|M0s9mODG<+^>s#I%q)dyXLrmx+ zLPpbJux1o}3_S!sPvagUZw!5(T!s;G)H$@evBe~zPD zL}F30#?p~G(O{|H`dv7zO`v5~X5nJ9h#{h2v6${~|!+;NUbm0#o)2)95HMM}vl> z8|W+q+j;{XJvQIcJJJ?d`sF_bC?H%g1uAX?{QCyFXyT$iNQM283N<9&pu?*sf>YI7 z-1Quoi)+Ab5%7n=jilTnl=m=tVmduNveH6AX9j(%Tmp$NcYBKb!HNp6FL13}sO$k3 zXV5pM2Q9>LlY5P;Bxh2JKpbm|aIcJU=QrI|*Xzd3Y|v64^GHkt$3;7+vJkVIKUN`c z2D4CYseky_NToGMcj?gMS!U4}&FfUQg+?OOTA<@C{SY`ci_XWrLi%i)joxbQY&t=# z>-p5q`!>SB96B5Q+~OQ`rA=^m4jqY|d|{7i=vml}y%u(ZI)Xh_5*qPI#Mdo7NM+w#T19Y6!ui!U7Ht-# zPCRSr@1@UL`g&=5&t@|L^B>`y1uDlIBH*(^IvjHyX$jgzH{7}eC-z+tj`sKxIul*m zu_e?YUdAltUs%&pMjE0ZsfY&g!?Q&+7Jc2pBDCplmDW%gxRm}uBvzU0u%eKm`)J}=?GCIca`~JVxZp&EvL57l%jgVZfwroi zNzP?f&Fh6&`_D7P+JBzRifcH`iWBAki4j%-f7~GPL$uWlP03iEyZ0A8RrjRi(QR{! zDJ3yV-ksE?pAkQw7X%l0y|;K*3o-iVcm=IbjSvfV0k79l=3R{~FAq8^D$3lhlEjRx zGR)gbs$ig(j)$*3G>MFWXfIueg~x!GF4e{K8TB617D4Aib z)c&xZ=ES?Kz5Te`+Ru->ky+%~eHGhQOLO(*S6>6Q^lse7U9818R;f-$AGDgak(8m7R^|eI0-YaM zRoH=^nYH(t*#Qk~_G7ewwey)BunHZ@vyagmjhMrI^=EpQ*lC5B6ls+5(9?7bAzi8h zVB$6!0;XpAI60*}+f41e&Zay|)5ux%J7!=h+vqa%T(Aut*x#Z3De1DZ>p40Pgs%YkcMjvz0nqY;<*suFxbtcddxHUbm};2V)9 zMqc7F8+qB$Zegaaw{fN0 z`WaU`qM>nRA>2HdKUfx!G1tBy4+ydmR-U_3FWisB1)Q-Db3qL_8xjxG;qcCTbQGp; z7vH19BXezJc7bQ52lp?%6^4{^6a>~2bcooFfQ23OR?P64J5Uw{@NNekn_OV)8vttb z80pe*T8nTv^*^!DCg2YPK#e{PGqO?d(|H5)Z6qnjv%=-WBb*zZ0aS=f<7>zJbW)PL z&xFcty=PR;>GqvcxrUnh0eu1Xeu$H!AJXKcU>_n?oJjQ*u~hXUQjO0z)hc-XLmC6M zAJR#rnm;eWRBPA~bd2jD=Ln6BtL>wWn*Q3T(FoN69HAd$X65^c9*S$|gVop{t5L&h z1oyva2Ifu&kJ6#Ap$naG6Kw6Gi*aw*-9DtOVqRsS(hI-~xOszX1ql-ok>9P^W9l=Q{QynRVxSvHh zogVR$@I&=Boe+nN{65?Rn>47K01alwXU8y)317}8Ht6S9+il8#tQ z?7iJbckl7K`;BL?ho)4hoAe@|Yn6MI%j{ZJZgy64Ift-epC2wCKlp>MAM1v-=e;e~f{Q z9t_Fc51eDLvG#FpyX7TvLR{%0+lCTcpn8 zy0kNTfvqjNF33_2g2Q|H3F}po*98src<~6cKbLaAd%X!`d2<9;RGcQGwJ2p(fLy8= zyo3EI2@IErHd9{BvDo`XA`6y5(TVa#46zF#|F>15>V2)I_-qXPc#{k zlq8TOKi=%Rt`q8~+ z*4f|Zb)pI2%wp^SC+Vosv3(>F(O(kC2*N}0DLSt3e=rr5A3NuVvm+BZh&e!VA1h3? zqi0G->S`2EMVeP)Ub;$ONVS9gI2{t5ksVlZC1~e(O1=81o)c}{RolVxZ<-n#<-&a) zE*zYkog0Hd?08+q*;@?h8nw04dr`xr!Rr%H(>y-+m6{Nhi>8C>&>Jr=u}R$^V>Ah%4x$v_<`uwn(G7h@(ERxUdhnvp={~1Luui z4i6VxeGuIJ5!@Pr+b%;3FV8=HijKvN-632gx^k^^ARc|-jPb4b+{cv5A#^ zq>zH*3|?6i`~bKo4GL=1gK*+BeQj-3ADpD#I7$6*s`~)faD`nRp$5tRW(eM;m)F_r z?7}*H6l?85tzEtmuZWbF`?$9%cf&WQsR0~kFzVHDhC*S{8O(^|;O;XtD{*6=@z(bq zZhgPe8sMulG-qHV`l9`4KSSJdG3MjP+Q*fzWShSp5c!<>|2DJROgV{I&B~bS;4M z=jlY8%J6S!0*;dR4KlO`s=vW+A+Ylsge-*PyeS9U1#GI}h6}jL8lZwVjnKq9_dv%5 zMDK!Md2<#LF5I3VBBG&p||52zCOGrA|3^(_vU4}ouSHCMt@ zylH@UzQvu`S@;3LG5s0;9i58Bb}0J}`Lw{+?`W1j+CrX#*OO5QzkY|M$OIVnJ?=F# z;g#>{WaRMM?~$h(7=DRP5_enbo2=2W`Vx&oHXpr2CyQ;CtMUT#54bz+g6JRUWPP-i zJOR^hmm;9@2c((|8-Jh+u{KhlWRHed&rx&zBKsh$h16Htqu{0=@jg$XwSI>^9ParM z!GrMRk2D2q4Ili7ldlH-Pgv@!ff+yHTL<`mq7!h-{OnIS?N0dmCpra>9!6iL8(7X8 zTfZn3OoC(1_6{=e*PegFod+TWVtUyN+Y-sZQtjWf#3VfSqbk!-RCdtEf+M{WT95TXC$MnJ&C8kiW;Jfnpm9k?{?|W&E;KT_I zH=Z-DftDrm$RVFP)a^LG9jV?etKdnSH|Q^`f^U|{L)QxV#DPK9%}W>jK2*HJ6lD|0OU%~i0aj@2vN#VaBRUaG^f{CuzCSb?QFKff!c z-nPTrb9h}%y-4O5hL`yQj^*BfdOhtLt5NGe};}e#ARdO4;4=CL$V)a3LQpHR2vQMr20FxCe#}#yb!p#M&`F6y*2U@H2Xs}a;T__Hr~%ik5Q!i9(klZLwP@Zp;hv^MY>@h>ki)@_o^A`CAf|fA#DY*$Pq3bDmA6i1wR(UuUg5KOJ zpQU(->2J@;r%?e7+vQU9JHKp~XObPt^uNffMX7}oKMa*_IaKL*UjCV&$+rJZoA|G{5?6Ml)We)Be*lU`S0>U+!clI zmQU)tjd&ZUd$+t0FXAW#FUj{2JiY5wMo@PMjd*{dUDBSUH{20ZK=gS|< z>GpQxZbTJGw1LzS0p&O36I5v{q<68+3Qmz-# z<2HXLuSAiBd@fJLg`NAkJQH z`YsT?Xg8SQx$kkxco_4&e5)?P91WkIlP~LHur8bb3k_4wotGamU`V+4dwG=je}Nt; zD!2V4dvxgQ-}+hR&iVAuayhQ<1;5A{23*w}2@4QiruhYKHt~?6V_DbUp3$+Fq%1Rv z^m*tAkt6F9f1&VuJ&VL8|D>Kx;WhP61H*Gp<-a6`J^=Mf@oRzdHDw#aQLpzn*espP zyp>ZQ0~Z`DgSa6fhLs}9qcQ9v*XAEavl7(iSEHE&ZJ;udHK0=Icvc0GaqL+`H6I_Q zCb3lbIgW*4YzP_4(y=&{l*~p^vnYzS<~mrC%u-O(JI1o7br=it6WK6Sv_?!pV&l?7F*IMJcRYy=K^-H#VGxnb2H{CP5*?>IU~car2=&Q0%_DFqnSDgMl;#O+v;ohi z+9$JQRQ#`#*>v=5)6Z%_@>~j7GyGq^3N$OR^;*ed`{1x5L& zVkT{7HPyyAkVHwLcth3ZwG{+C2#cVU#uDnpp8bq=iZYo6y?tz+8)Ilq> zl(KtpB7GhD&2F{|W&ehoJ%nB=|4#NhUAv_N4&BL4(Kd{`vWvjnKcR;?ypoklM>vmf z^Nuhjzl=SN<#XjyIXl1=v)9kIp(!p8u)FY3PWe253dRGumsYW5I$UzaRqRF`O6Vk_Fxz9_n$EW-z3~ zclR-m84rh$(L%y+oj@?hnm7{em-GKHnpq_1w;l4_r!;6L;VND^zrt0#P^(XM zTkUCT--0zgJ;Y{DQhRs_%ol>ta~2ewEEbSN!R#Xzi&G_8Qxz zPq*neKu9Ya1pj`GjU$~i5BDO~RlJmMYh zfTI|*PCWF*^b*CX{yj#NR`pZZd+bR)uIlGLU^)?3)$>Q#TKz_w{!OU-h>d`GAE6O$ zgz}Hr!x)4A^AStZ=^8cuy&`E-nm%P42nMLcW9$W7g~yMv?Y2FdfYR)KXIUVv6<+>~ z&4H80S(bRf1|6TXyJ71ICXo(h+X)_bIwAVs>?GPhN48(;ja-8<7_o1Y^(bP4J=qQg zpR?bgzdQUnyI;(=at- z$}bT=Upf6HE8t!t;~e{gs*nBW*qvf{xjh3*Wkvi-8sC_Ey``utnyPXL=6V}{V$B#5CtqfhFcx|)vpX@t>AcL|vaYj-szmGT z8||UrL)I^Bi1N?>hbC$2^+n121+5dq;1Atwh8}n8FT=h6VbQSYKWqd_xcWcrHqxvd o`43yCM>D@eXKW&Sly05zBHr^*zA+dv9<(WQnQ;=)?Y9g64VE!@asU7T delta 11763 zcmb_?eO#1P_V{z3nYs75^NxTF0yd+f;47#osHiAtn5g)=Zjp>I>Ll+BDw!FT-^9W+ z!jq;c-_|uMw^-L6*#I5pw%{xJ2F z7|2~~^4tZkB~A~d9wWUEY)+SpV2MW;avOjCLPwf+Z=Uepk^a_-wGwer+}@8kjB zk+sRN+{r>j-3q84EG9mqi4tW^lxFT-6<#hJp?$1B(ZP+m+7XafqoH!-JneH-!6bjQ zSo_(hBotm5tlObkK;}bVqb?Rc9-`}I&vs=mbrslGI19r_VV1LSh24{DU+S_u^YXIX zuFRoe7^({k^sI2%^PB}9d+sv3Qdein>xb&btO*JvgYD^g&f=_GXJ-01`+OXpJ9Ex3 z_~1`E2c79!?#w8bzyFgi#73;LwpJHHESU*8c4tOLZc&a0u{j~~UERXq^aT5=T#rlX z@E`%M9D9K)J9m{UvwUl=Lr(seuJsN1$(3|658KKGL9E~c{sgTg)N6$8YHb&f6?VZl z9y)|n%7!AELBzcr9Cx%)qpG-e9!?hab7EWQ%9t>Jq99Z$Kt^kfC}1~Y#CElc=ODKU z2RV?maFvt!t=17V3m|*U%Z_QC=Q4JqR4dbUHZ> z?qWJiJjtQt0oGSM!=ds4)*tQ^(}#c8*-u9V;kcL3f%QH@8@do$uTo|GR%lSj#MVjg zn!VQ2$%t58bLCn(#lKOV7$cq+n!F&mi`2C=uzcbqE}vUVojTGClS^o#p+y}RTDi~) z&zI1L#5N8sCG;(^otKkYq43cnW|U9NWB(FKhZlnARc3D#bo$@T;2eiUlU^{vq*59R zO&e(%@l)!LX#({|NGPR!;JjXB@;9Z_t|h_SHq%5gOb-wD5&J;;W?D`haBDN&M#ACQ z=V>O1gd5M(7%@_>$Sx30B+~(qzJ&&e!}R479}JeC-$E~nBpM3KX{i{a&WeKJFQPEw zV9Ja1I*Etq3L1*VtO`1xB*3c`bTCPhKdGQ|H6&SfY^VK+n4$tsA*oQhgYF;;z`Tow zz@(SyZL%1~?4&8;;!XlG*+4kFlNtik^dxr5`~sIN#hEwZfuZ(=s77gUekTnIav~E1 z<)7%%3;09L9BmWa3Rahx?OQ7!ZoGmV`eGLiB1Ld*7rl=ZgYgyG2V3gM)K}=6*oF0} zWF3s&O`}N(EZa?ok_}M4n+_tS@Q>YeqP9#=zJPae7>4hmRm;?w%K_ecW#bWJD)V` ziDaFXo8v-WSm)(tyRVh}f={Hu(Gyoo4IM zlf*d%xvSinu7U|uoLSDC3>WH3hj(AeHFUO-7!>y-SUdEBPX1F3O=5(@mBTcZm|#L3 zjf^xIy3*xm=s}ksVnDk5U~?TEM*`t^9gQKuaJP?>#yai^TV6Hjh?- zA`L!};kM@G1BXqnzUR?x1)3%L!;Ea*dSO*RHiG-Sqy#kEC|MYg66##^6Sxn7Aw>y zk~BC~kGK{?pHFD4d9i_nPAhP`axx3iD&_P~Xu6iTpzbpo7wGP?CAUJoPA6o8<#Re% z%s0TKYB5Nj`#E|Wv`K|$@x?x#m2`o_zAiemuBU9)A!3xxIyiZfw(}BYnPLN)4xxUp(8^RKu1aOUxZhBI{e?bq z-q&=UXsA}P95mnwsO~e5Z<^*Bg94n9KIsj~;Eg(3}x`dc&@ zi_5q0a0@mnhuahqCOg~cLP{LoQVdu7qhPYgq`*kyJu8jGMZ_~2VVImIvTsE(+PLkQ zCIA-cSSg8tCLP;jj8o{4tno%6-iM`@GO2e$7c2=1mKq`?86oOJO%N1+LJe{UWtBus z_QLf3Z!RLmI~JH~^tz7)T~MaN5Y86y1q$X=qp%pBeYB$WWvdw6Z$`rV5 z#Vr>r^&R$g>O|`?ClN~2H5!Ob*r3*IZwjTx^5^S<;qOmUqr9XSi_nT?MkraX?F;*Q zv)9F~YQrC%aBEF{DvYRv5{aFfi^OFpr@`FZEIb4%e3SMBv1oC-ukl?F>ld`kI3OV} z51qU-tB_P01tST_U7h2Ctrux)ixLx(1xZfz?Pw@iBglZqD*}-gu zc+l6l63l`^YK%kYc?#S)%V)Z?-5voq-AP>yg!W-0#Tp}2XKMo>vk%MaQ>!4fT8}l7 z1|rm{*w2uoM&YOrWk(!Gs2ajTv3P$7+a-J4&@_s@r8k*S=ebGnGlAm@3zXj) z&5nv_`z5i=A2yCfl@0dR2gkD6QkXJ3N^mG$Q5fs~HI^;GLY*8Q$7W&N>xpA;;SoAu z92<_sym73JM9azJSt3CjcP1fLdm^K-ZvtD3r)&Rs_5?<^=O?lVEcQ%fgWy&?v;D3s zZ6XWq)-|bHYY;?DLYTNN=+76iInXu%&!W#KvF9<2Zd%2nJ6HJUWTwZ1`-91BCP{&S z1oX!7U0~-Yu<4Nrrmm-Bys5_l6pyUH11Jf0Ca^(x{@>f0ZVLP3sFZH-sXgISz3@tb z4sRV17O1j{_;0>zvBLH=lYl>*yq!2rg0stl+Ntcj{w@;RM<{1d{{q|*}*&mwW9>mX0Rj-30|GS zMq*1HxjlnT#;d{D2iW}>Mm+HVyANINo(I?f>{PUOP^nLQ%erW9sfynkLxnP|iP+q# zU<-lRnQR)uV|^*Eqb?w(u%~{9d6yS+r4RGoE|@D-%-hii?^Q5kv&xHk_Xk=Ahn}Ps zdHyW6N<;R`AEmMiLaG&9iQ+-i@9va%?^ei%!wc~ktnIQwt%5g|3U$!9kWC^-z0+nb zV*Yq(P(Z@m)N8NxU7+d}C^yBZwE?a3hOYD(F<~x=H_J1$<57D951o2c6(MVaTPcT1Z1*l znvPC@qD(ebBxc_w8+Sdc!zWUmx#zZZ=I*DD6;GcI6Dl(|BO{c5qRA}aPq9cG@H4{` z^}2rW#%#*TmVJdT#z>&-$zm>TuL%iR7*k~yr?{6ZThBzFxHg}K!;E|u1q1T&5ZsfG z9;ZKime10~Koyf6<`*zo3^v2_6WB6{TE!eF;LzDQ%X9H4(+XiwS@{3W9g1PuV>@53IA!%k7T{%KdooW7&_vK8&PK+ z&!EL8!?b7ER3vB9Gb~a}_K|bx8Rjph_!?@GBvnS30itGsS%Mos>jL4?P0WoKr*Aj0 zDThjA)Kg5H||?5RL0T>^Bqm0o;FFR>iMUR9!26mFFl!51&#(YRl! zk3j#Hx*a`(TJ*vEBF@zrgoECR`?s^5VvTPrwZ5&?b=gX-cix|Oum$=$Wh=F2;iws| zo2X3=dzlT~VCSo`D;l!5VeW*~1vYb9-${oY{n{Gi_s+ApM8+2Vi}bE~<{+-~kL z>nz-2vIJU$7Bi3&$P{xeixOLXjm5QWh}h<9?5<^l#CBh!sh0H8TN>C5jM2_Eu*ikcmaaxK%+kYX zh9MXlO$^SP>nbeD@<^CzdRm)A;&84sd!<%*8j%agiUO?==ban}=Rafp;IYrt2 zha}*>7v!wW!E46+0%uO4Q@y7snB!%ftR+fh_ZeoHr7tjGNP-4|?dbh4$p z|1Qy@1CPaRrQmYvKQYxJ;1B(Gi8dA^s?%Svx&BEO5|x~@!d2kP@gzDuD5=F>x|W<| zW1`Z!ZOCcqx+AB8xBHfyUeL=Y+1uzO`h1BIX3Z%)m)uP`_oaE5&nS=tS%tS3~N z7pe?GzG4&5b3A^Q^@R^EV6agEXD_ga&=I_Kp2b@#EJQn3AU4-3WK-c@Xli6(!z;Qm zXS>3j6+UTi@3O@e>JGOnxMMNXBcEjsEY6%|;}N^M;MYwoIcje=TB~}}TIHoxDa1L# zelO^UUn838E_5I4M)$%0M)$#Pbk}sHyQT--HD25^&f$$^I~1Q|!-m&(gRkofU)KY^ z&I_N|j4oUHh9w3Z?FQXUk!!~x{Tmh_9rtb!bHj=hc6`I;VQ%>PH!P%2gN4{1EE+R< zynS?^#f*kVdZv z8ewA-i-msYSr{}nvHOQ#>PFa^uG>1(V_VH$ggF;jJREL96e0vQ$0sqmTkAd_{-=oI#gt%5|cbDrsn2&3eZvb{7$GZ8BJ{3 zx#W@s^Qy*WE|-mT_Jipn8w`2hGh;mFy%bkIcaHTLc$L0$ zg^i31Mr`-8^WdJWI{B8Y8a;lpFP?Qyi?2XPhEvn$Unoo>*(OdQ;}&H`X} z3wv#Sd^fpJkE|_wUwGy0lWVHPtF4V^vJC3Mp18u`q8*c6Py% zyaG)Bz}@Ri2OnKWQ%F^)8biWh&<`wO_=0XrZS13RX zhgHH0KVmF%iI-2t^zfMa4ykuplD6JV-c&-% z5IA@jslOzDc9%WJ?^~K;%MuKNeMVIS@xSZ)n=<{GV1XE-ZtsJ2T(avs+mHVO`?xer zgKhasE*%`DJ$<3_IeAOG)KBBBzB5_!>#wUPS|L>aI77-KNLpEeG|Pf^V$UmZt#aoU z6=oH$fohl3SI$``jn|Vh`Sm<0h>)%Fzw@O}2p+Fn3#B=zMHdRC$MNKHdZdMOG44sOfU1YekNM|K< z06tV=a$3n^3(XvMZIIT9EgVAsNBRjr3y|xdlC}}W2wWSaF=B^z0=}7S&EY@$#vKI4 zY(y$_@a9ITKfaB8x>0(VaCtrQXSO>>hYl@;RPuGCchxr(W^UO zK*%uo@&zdj)wk^hDGqPXBg&<vu~{Xo1c>(h$59m+g`67gO~xw1!WR{VSzlEmh;GI5{u9x99Yn$(Z{MGk*WYG(KfZ+Tn#4w--ZZD|==a$2=C6+bVj ztd>@bWUn0jH{2pB_3(G3kxDk}9cdI674JwhQ6v}Mkv#ah$Rb(Vgx`r=kfkaoRE-?NdVpMb zQ97wb7m@tEG*ENWa7HfpUMd&SbJ45PN^_I4{u=ufu56!6m06nv5@%6fB>~E5Upr!HvqqbtRQghmNZLa zGWtP$o7AoeG|oX)4v(K^lG^N@===XQQ4U z)bI&9)bE=T_n>|o-$wL9p+e6S{?pJI_&X@ie;W7{RPwt9K2FICFAwLLDAJiDxE&23 zdjzjQ$*vf~i{Y^-{%2jj5%~%`$MP6>D4GYNJAW{i-v?Wxc^|xUZH?svnNbwQb;c6- zS1ccmGB=OpuWB5n#^Hi>)-q2aQU6bfxc8o>a<L{n?5^DP3mVsZP&VMH81MH*x}jt|n2lQ4V|k4A+^pTsAludAHI4a!Y; z|789L^e;4l$6_%lfv@U&$rvTr=HcfU1;xZyO1927;b!c9%pb$| z-3`ecPbG)^!y=CEBofXn;SZ2zIXs;&(cni+8#3@qvls}=*%JrnoT1c{JufJ!l_c@V6KF)5IyqRq!|+nnK|YzQ>{XCT)~`bc%b59p870 zT*HUXbQUakEmZ0w@wrtV{W1!fWXo+kc?iXHrr)c4unxsD;WfS#EoAp=d<90FQWa`> z8N^oc*{JIqtB@@f=Izi@#W&$WzUFm43lH)SUgwP{y1j4k2T^q7O}-MPll>+?gwh$f zpMQspH2sB}2_7DyZ}BJb@Ywwp59N6K{pTSbuB7`m??E?y5{A_9xp<^JUW3}$C?Bao zW{XW`*!5>=ll;Ozcp1fOul?WLvA)?n;QpyATp26zL#IEw3UcS+hcd2$7W2l*YVW)} zSH?nDffC5Knys;F-~2Vco;I^JO6{5M%5fFC3){`3y)#nq#mt%I_7r!RM@MvG&v5B1 z8;7c`MK1U96&`NcI0p{@n+NnZSvDrB11b3ZY$oQG?#HiB1L4ZQaT8$>{~nrcB&>Rm zAHbk`@DZMfPocR-_L6T(wxIW-50gEk?XN|)W zu{0|;W91Tzu~v*ulb0RiQ$>r*GN2GY0&!(yrsgPf%ZEPXuZx-@OR;>TiHC^r#0m5P z>*U|=@Et^+)4;#eirSB%`U@Vo?G*3LeBaB;$4>EAw5Vsx&vK23f?nCk*K0Rev_~N7 z9PbZ)=g<;1z|eDi3v}G!(`C&!{EDb4^DdJsl7n*kxBNMR2SUpw{x<5yE8p=d%YN@Z zVyzgBd19+!^;JFt_FU!(7)4yT%%37hTJTLM%~uD1lZbMlHjvC#K2A0T#E;M7(AC_0JMYiI{tK6Q;hP9%rL znB~f``oTljd2nYdP+oVPZxoSe+R87XU0-WO$Uu4dO};=xVb$K^|D&>US{r{vv_)Gd ztaKGGQ9g=LuA8~J%aF|$dpsn!@uBjqpU@{zwI@yO0r7}t07PBjlHBqO#}H1Hg1<() z&LSlN>EHnsk^0Rfrs9x(42iG!LH`IE_8*DB!v1}z3PU+eXixbWi!eJdd)a-K<#UV4N4 MsTJ4Mlv;)V0}5B>2><{9 diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs index d37f1d760065..901c15e9756b 100644 --- a/substrate/frame/revive/rpc/src/client.rs +++ b/substrate/frame/revive/rpc/src/client.rs @@ -32,7 +32,7 @@ use pallet_revive::{ Block, BlockNumberOrTag, BlockNumberOrTagOrHash, Bytes256, GenericTransaction, Log, ReceiptInfo, SyncingProgress, SyncingStatus, TransactionSigned, H160, H256, U256, }, - EthContractResult, + EthTransactError, EthTransactInfo, }; use sp_core::keccak_256; use sp_weights::Weight; @@ -116,18 +116,42 @@ fn unwrap_call_err(err: &subxt::error::RpcError) -> Option { /// Extract the revert message from a revert("msg") solidity statement. fn extract_revert_message(exec_data: &[u8]) -> Option { - let function_selector = exec_data.get(0..4)?; - - // keccak256("Error(string)") - let expected_selector = [0x08, 0xC3, 0x79, 0xA0]; - if function_selector != expected_selector { - return None; - } + let error_selector = exec_data.get(0..4)?; + + match error_selector { + // assert(false) + [0x4E, 0x48, 0x7B, 0x71] => { + let panic_code: u32 = U256::from_big_endian(exec_data.get(4..36)?).try_into().ok()?; + + // See https://docs.soliditylang.org/en/latest/control-structures.html#panic-via-assert-and-error-via-require + let msg = match panic_code { + 0x00 => "generic panic", + 0x01 => "assert(false)", + 0x11 => "arithmetic underflow or overflow", + 0x12 => "division or modulo by zero", + 0x21 => "enum overflow", + 0x22 => "invalid encoded storage byte array accessed", + 0x31 => "out-of-bounds array access; popping on an empty array", + 0x32 => "out-of-bounds access of an array or bytesN", + 0x41 => "out of memory", + 0x51 => "uninitialized function", + code => return Some(format!("execution reverted: unknown panic code: {code:#x}")), + }; - let decoded = ethabi::decode(&[ethabi::ParamType::String], &exec_data[4..]).ok()?; - match decoded.first()? { - ethabi::Token::String(msg) => Some(msg.to_string()), - _ => None, + Some(format!("execution reverted: {msg}")) + }, + // revert(string) + [0x08, 0xC3, 0x79, 0xA0] => { + let decoded = ethabi::decode(&[ethabi::ParamType::String], &exec_data[4..]).ok()?; + if let Some(ethabi::Token::String(msg)) = decoded.first() { + return Some(format!("execution reverted: {msg}")) + } + Some("execution reverted".to_string()) + }, + _ => { + log::debug!(target: LOG_TARGET, "Unknown revert function selector: {error_selector:?}"); + Some("execution reverted".to_string()) + }, } } @@ -146,42 +170,46 @@ pub enum ClientError { /// A [`codec::Error`] wrapper error. #[error(transparent)] CodecError(#[from] codec::Error), - /// The dry run failed. - #[error("Dry run failed: {0}")] - DryRunFailed(String), /// Contract reverted - #[error("Execution reverted: {}", extract_revert_message(.0).unwrap_or_default())] - Reverted(Vec), + #[error("contract reverted")] + Reverted(EthTransactError), /// A decimal conversion failed. - #[error("Conversion failed")] + #[error("conversion failed")] ConversionFailed, /// The block hash was not found. - #[error("Hash not found")] + #[error("hash not found")] BlockNotFound, /// The transaction fee could not be found - #[error("TransactionFeePaid event not found")] + #[error("transactionFeePaid event not found")] TxFeeNotFound, /// The cache is empty. - #[error("Cache is empty")] + #[error("cache is empty")] CacheEmpty, } -// TODO convert error code to https://eips.ethereum.org/EIPS/eip-1474#error-codes +const REVERT_CODE: i32 = 3; impl From for ErrorObjectOwned { fn from(err: ClientError) -> Self { - let msg = err.to_string(); match err { ClientError::SubxtError(subxt::Error::Rpc(err)) | ClientError::RpcError(err) => { if let Some(err) = unwrap_call_err(&err) { return err; } - ErrorObjectOwned::owned::>(CALL_EXECUTION_FAILED_CODE, msg, None) + ErrorObjectOwned::owned::>( + CALL_EXECUTION_FAILED_CODE, + err.to_string(), + None, + ) }, - ClientError::Reverted(data) => { + ClientError::Reverted(EthTransactError::Data(data)) => { + let msg = extract_revert_message(&data).unwrap_or_default(); let data = format!("0x{}", hex::encode(data)); - ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, msg, Some(data)) + ErrorObjectOwned::owned::(REVERT_CODE, msg, Some(data)) }, - _ => ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, msg, None), + ClientError::Reverted(EthTransactError::Message(msg)) => + ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, msg, None), + _ => + ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, err.to_string(), None), } } } @@ -634,54 +662,25 @@ impl Client { Ok(result) } - /// Dry run a transaction and returns the [`EthContractResult`] for the transaction. + /// Dry run a transaction and returns the [`EthTransactInfo`] for the transaction. pub async fn dry_run( &self, - tx: &GenericTransaction, + tx: GenericTransaction, block: BlockNumberOrTagOrHash, - ) -> Result>, ClientError> { + ) -> Result, ClientError> { let runtime_api = self.runtime_api(&block).await?; + let payload = subxt_client::apis().revive_api().eth_transact(tx.into()); - // TODO: remove once subxt is updated - let value = subxt::utils::Static(tx.value.unwrap_or_default()); - let from = tx.from.map(|v| v.0.into()); - let to = tx.to.map(|v| v.0.into()); - - let payload = subxt_client::apis().revive_api().eth_transact( - from.unwrap_or_default(), - to, - value, - tx.input.clone().unwrap_or_default().0, - None, - None, - ); - - let EthContractResult { fee, gas_required, storage_deposit, result } = - runtime_api.call(payload).await?.0; + let result = runtime_api.call(payload).await?; match result { Err(err) => { log::debug!(target: LOG_TARGET, "Dry run failed {err:?}"); - Err(ClientError::DryRunFailed(format!("{err:?}"))) + Err(ClientError::Reverted(err.0)) }, - Ok(result) if result.did_revert() => { - log::debug!(target: LOG_TARGET, "Dry run reverted"); - Err(ClientError::Reverted(result.0.data)) - }, - Ok(result) => - Ok(EthContractResult { fee, gas_required, storage_deposit, result: result.0.data }), + Ok(result) => Ok(result.0), } } - /// Dry run a transaction and returns the gas estimate for the transaction. - pub async fn estimate_gas( - &self, - tx: &GenericTransaction, - block: BlockNumberOrTagOrHash, - ) -> Result { - let dry_run = self.dry_run(tx, block).await?; - Ok(U256::from(dry_run.fee / GAS_PRICE as u128) + GAS_PRICE) - } - /// Get the nonce of the given address. pub async fn nonce( &self, diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs index 6a324e63a857..ccd8bb043e90 100644 --- a/substrate/frame/revive/rpc/src/lib.rs +++ b/substrate/frame/revive/rpc/src/lib.rs @@ -23,7 +23,7 @@ use jsonrpsee::{ core::{async_trait, RpcResult}, types::{ErrorCode, ErrorObjectOwned}, }; -use pallet_revive::{evm::*, EthContractResult}; +use pallet_revive::evm::*; use sp_core::{keccak_256, H160, H256, U256}; use thiserror::Error; @@ -128,10 +128,22 @@ impl EthRpcServer for EthRpcServerImpl { async fn estimate_gas( &self, transaction: GenericTransaction, - _block: Option, + block: Option, ) -> RpcResult { - let result = self.client.estimate_gas(&transaction, BlockTag::Latest.into()).await?; - Ok(result) + let dry_run = self.client.dry_run(transaction, block.unwrap_or_default().into()).await?; + Ok(dry_run.eth_gas) + } + + async fn call( + &self, + transaction: GenericTransaction, + block: Option, + ) -> RpcResult { + let dry_run = self + .client + .dry_run(transaction, block.unwrap_or_else(|| BlockTag::Latest.into())) + .await?; + Ok(dry_run.data.into()) } async fn send_raw_transaction(&self, transaction: Bytes) -> RpcResult { @@ -150,15 +162,17 @@ impl EthRpcServer for EthRpcServerImpl { let tx = GenericTransaction::from_signed(tx, Some(eth_addr)); // Dry run the transaction to get the weight limit and storage deposit limit - let dry_run = self.client.dry_run(&tx, BlockTag::Latest.into()).await?; + let dry_run = self.client.dry_run(tx, BlockTag::Latest.into()).await?; - let EthContractResult { gas_required, storage_deposit, .. } = dry_run; let call = subxt_client::tx().revive().eth_transact( transaction.0, - gas_required.into(), - storage_deposit, + dry_run.gas_required.into(), + dry_run.storage_deposit, ); - self.client.submit(call).await?; + self.client.submit(call).await.map_err(|err| { + log::debug!(target: LOG_TARGET, "submit call failed: {err:?}"); + err + })?; log::debug!(target: LOG_TARGET, "send_raw_transaction hash: {hash:?}"); Ok(hash) } @@ -234,18 +248,6 @@ impl EthRpcServer for EthRpcServerImpl { Ok(self.accounts.iter().map(|account| account.address()).collect()) } - async fn call( - &self, - transaction: GenericTransaction, - block: Option, - ) -> RpcResult { - let dry_run = self - .client - .dry_run(&transaction, block.unwrap_or_else(|| BlockTag::Latest.into())) - .await?; - Ok(dry_run.result.into()) - } - async fn get_block_by_number( &self, block: BlockNumberOrTag, diff --git a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs index 339080368969..ad34dbfdfb49 100644 --- a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs +++ b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs @@ -14,6 +14,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + //! Generated JSON-RPC methods. #![allow(missing_docs)] diff --git a/substrate/frame/revive/rpc/src/subxt_client.rs b/substrate/frame/revive/rpc/src/subxt_client.rs index a232b231bc7c..1e1c395028a4 100644 --- a/substrate/frame/revive/rpc/src/subxt_client.rs +++ b/substrate/frame/revive/rpc/src/subxt_client.rs @@ -27,8 +27,16 @@ use subxt::config::{signed_extensions, Config, PolkadotConfig}; with = "::subxt::utils::Static<::sp_core::U256>" ), substitute_type( - path = "pallet_revive::primitives::EthContractResult", - with = "::subxt::utils::Static<::pallet_revive::EthContractResult>" + path = "pallet_revive::evm::api::rpc_types_gen::GenericTransaction", + with = "::subxt::utils::Static<::pallet_revive::evm::GenericTransaction>" + ), + substitute_type( + path = "pallet_revive::primitives::EthTransactInfo", + with = "::subxt::utils::Static<::pallet_revive::EthTransactInfo>" + ), + substitute_type( + path = "pallet_revive::primitives::EthTransactError", + with = "::subxt::utils::Static<::pallet_revive::EthTransactError>" ), substitute_type( path = "pallet_revive::primitives::ExecReturnValue", diff --git a/substrate/frame/revive/rpc/src/tests.rs b/substrate/frame/revive/rpc/src/tests.rs index 920318b26f71..7f2d4e683c31 100644 --- a/substrate/frame/revive/rpc/src/tests.rs +++ b/substrate/frame/revive/rpc/src/tests.rs @@ -238,7 +238,8 @@ async fn revert_call() -> anyhow::Result<()> { .unwrap_err(); let call_err = unwrap_call_err!(err.source().unwrap()); - assert_eq!(call_err.message(), "Execution reverted: revert message"); + assert_eq!(call_err.message(), "execution reverted: revert message"); + assert_eq!(call_err.code(), 3); Ok(()) } diff --git a/substrate/frame/revive/src/benchmarking/mod.rs b/substrate/frame/revive/src/benchmarking/mod.rs index 9c4d817a07de..b73815bfb9ea 100644 --- a/substrate/frame/revive/src/benchmarking/mod.rs +++ b/substrate/frame/revive/src/benchmarking/mod.rs @@ -103,7 +103,7 @@ where origin, 0u32.into(), Weight::MAX, - default_deposit_limit::(), + DepositLimit::Balance(default_deposit_limit::()), Code::Upload(module.code), data, salt, diff --git a/substrate/frame/revive/src/evm/api/rlp_codec.rs b/substrate/frame/revive/src/evm/api/rlp_codec.rs index 3442ed73acca..9b61cd042ec5 100644 --- a/substrate/frame/revive/src/evm/api/rlp_codec.rs +++ b/substrate/frame/revive/src/evm/api/rlp_codec.rs @@ -88,14 +88,14 @@ impl TransactionSigned { } } -impl TransactionLegacyUnsigned { - /// Get the rlp encoded bytes of a signed transaction with a dummy 65 bytes signature. +impl TransactionUnsigned { + /// Get a signed transaction payload with a dummy 65 bytes signature. pub fn dummy_signed_payload(&self) -> Vec { - let mut s = rlp::RlpStream::new(); - s.append(self); const DUMMY_SIGNATURE: [u8; 65] = [0u8; 65]; - s.append_raw(&DUMMY_SIGNATURE.as_ref(), 1); - s.out().to_vec() + self.unsigned_payload() + .into_iter() + .chain(DUMMY_SIGNATURE.iter().copied()) + .collect::>() } } @@ -567,7 +567,7 @@ mod test { #[test] fn dummy_signed_payload_works() { - let tx = TransactionLegacyUnsigned { + let tx: TransactionUnsigned = TransactionLegacyUnsigned { chain_id: Some(596.into()), gas: U256::from(21000), nonce: U256::from(1), @@ -576,10 +576,10 @@ mod test { value: U256::from(123123), input: Bytes(vec![]), r#type: TypeLegacy, - }; + } + .into(); let dummy_signed_payload = tx.dummy_signed_payload(); - let tx: TransactionUnsigned = tx.into(); let payload = Account::default().sign_transaction(tx).signed_payload(); assert_eq!(dummy_signed_payload.len(), payload.len()); } diff --git a/substrate/frame/revive/src/evm/api/rpc_types.rs b/substrate/frame/revive/src/evm/api/rpc_types.rs index 1cf8d984b68b..ed046cb4da44 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types.rs @@ -19,6 +19,27 @@ use super::*; use alloc::vec::Vec; use sp_core::{H160, U256}; +impl From for BlockNumberOrTagOrHash { + fn from(b: BlockNumberOrTag) -> Self { + match b { + BlockNumberOrTag::U256(n) => BlockNumberOrTagOrHash::U256(n), + BlockNumberOrTag::BlockTag(t) => BlockNumberOrTagOrHash::BlockTag(t), + } + } +} + +impl From for TransactionUnsigned { + fn from(tx: TransactionSigned) -> Self { + use TransactionSigned::*; + match tx { + Transaction4844Signed(tx) => tx.transaction_4844_unsigned.into(), + Transaction1559Signed(tx) => tx.transaction_1559_unsigned.into(), + Transaction2930Signed(tx) => tx.transaction_2930_unsigned.into(), + TransactionLegacySigned(tx) => tx.transaction_legacy_unsigned.into(), + } + } +} + impl TransactionInfo { /// Create a new [`TransactionInfo`] from a receipt and a signed transaction. pub fn new(receipt: ReceiptInfo, transaction_signed: TransactionSigned) -> Self { @@ -143,76 +164,69 @@ fn logs_bloom_works() { impl GenericTransaction { /// Create a new [`GenericTransaction`] from a signed transaction. pub fn from_signed(tx: TransactionSigned, from: Option) -> Self { - use TransactionSigned::*; + Self::from_unsigned(tx.into(), from) + } + + /// Create a new [`GenericTransaction`] from a unsigned transaction. + pub fn from_unsigned(tx: TransactionUnsigned, from: Option) -> Self { + use TransactionUnsigned::*; match tx { - TransactionLegacySigned(tx) => { - let tx = tx.transaction_legacy_unsigned; - GenericTransaction { - from, - r#type: Some(tx.r#type.as_byte()), - chain_id: tx.chain_id, - input: Some(tx.input), - nonce: Some(tx.nonce), - value: Some(tx.value), - to: tx.to, - gas: Some(tx.gas), - gas_price: Some(tx.gas_price), - ..Default::default() - } + TransactionLegacyUnsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: tx.chain_id, + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: tx.to, + gas: Some(tx.gas), + gas_price: Some(tx.gas_price), + ..Default::default() }, - Transaction4844Signed(tx) => { - let tx = tx.transaction_4844_unsigned; - GenericTransaction { - from, - r#type: Some(tx.r#type.as_byte()), - chain_id: Some(tx.chain_id), - input: Some(tx.input), - nonce: Some(tx.nonce), - value: Some(tx.value), - to: Some(tx.to), - gas: Some(tx.gas), - gas_price: Some(tx.max_fee_per_blob_gas), - access_list: Some(tx.access_list), - blob_versioned_hashes: Some(tx.blob_versioned_hashes), - max_fee_per_blob_gas: Some(tx.max_fee_per_blob_gas), - max_fee_per_gas: Some(tx.max_fee_per_gas), - max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), - ..Default::default() - } + Transaction4844Unsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: Some(tx.chain_id), + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: Some(tx.to), + gas: Some(tx.gas), + gas_price: Some(tx.max_fee_per_blob_gas), + access_list: Some(tx.access_list), + blob_versioned_hashes: tx.blob_versioned_hashes, + max_fee_per_blob_gas: Some(tx.max_fee_per_blob_gas), + max_fee_per_gas: Some(tx.max_fee_per_gas), + max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), + ..Default::default() }, - Transaction1559Signed(tx) => { - let tx = tx.transaction_1559_unsigned; - GenericTransaction { - from, - r#type: Some(tx.r#type.as_byte()), - chain_id: Some(tx.chain_id), - input: Some(tx.input), - nonce: Some(tx.nonce), - value: Some(tx.value), - to: tx.to, - gas: Some(tx.gas), - gas_price: Some(tx.gas_price), - access_list: Some(tx.access_list), - max_fee_per_gas: Some(tx.max_fee_per_gas), - max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), - ..Default::default() - } + Transaction1559Unsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: Some(tx.chain_id), + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: tx.to, + gas: Some(tx.gas), + gas_price: Some(tx.gas_price), + access_list: Some(tx.access_list), + max_fee_per_gas: Some(tx.max_fee_per_gas), + max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), + ..Default::default() }, - Transaction2930Signed(tx) => { - let tx = tx.transaction_2930_unsigned; - GenericTransaction { - from, - r#type: Some(tx.r#type.as_byte()), - chain_id: Some(tx.chain_id), - input: Some(tx.input), - nonce: Some(tx.nonce), - value: Some(tx.value), - to: tx.to, - gas: Some(tx.gas), - gas_price: Some(tx.gas_price), - access_list: Some(tx.access_list), - ..Default::default() - } + Transaction2930Unsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: Some(tx.chain_id), + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: tx.to, + gas: Some(tx.gas), + gas_price: Some(tx.gas_price), + access_list: Some(tx.access_list), + ..Default::default() }, } } @@ -269,7 +283,7 @@ impl GenericTransaction { max_fee_per_blob_gas: self.max_fee_per_blob_gas.unwrap_or_default(), max_priority_fee_per_gas: self.max_priority_fee_per_gas.unwrap_or_default(), access_list: self.access_list.unwrap_or_default(), - blob_versioned_hashes: self.blob_versioned_hashes.unwrap_or_default(), + blob_versioned_hashes: self.blob_versioned_hashes, } .into()), _ => Err(()), diff --git a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs index 5037ec05d881..1d65fdefdde6 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs @@ -94,8 +94,8 @@ pub struct Block { /// Uncles pub uncles: Vec, /// Withdrawals - #[serde(skip_serializing_if = "Option::is_none")] - pub withdrawals: Option>, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub withdrawals: Vec, /// Withdrawals root #[serde(rename = "withdrawalsRoot", skip_serializing_if = "Option::is_none")] pub withdrawals_root: Option, @@ -114,7 +114,7 @@ pub enum BlockNumberOrTag { } impl Default for BlockNumberOrTag { fn default() -> Self { - BlockNumberOrTag::U256(Default::default()) + BlockNumberOrTag::BlockTag(Default::default()) } } @@ -133,7 +133,7 @@ pub enum BlockNumberOrTagOrHash { } impl Default for BlockNumberOrTagOrHash { fn default() -> Self { - BlockNumberOrTagOrHash::U256(Default::default()) + BlockNumberOrTagOrHash::BlockTag(Default::default()) } } @@ -148,12 +148,12 @@ pub struct GenericTransaction { pub access_list: Option, /// blobVersionedHashes /// List of versioned blob hashes associated with the transaction's EIP-4844 data blobs. - #[serde(rename = "blobVersionedHashes", skip_serializing_if = "Option::is_none")] - pub blob_versioned_hashes: Option>, + #[serde(rename = "blobVersionedHashes", default, skip_serializing_if = "Vec::is_empty")] + pub blob_versioned_hashes: Vec, /// blobs /// Raw blob data. - #[serde(skip_serializing_if = "Option::is_none")] - pub blobs: Option>, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub blobs: Vec, /// chainId /// Chain ID that this transaction is valid on. #[serde(rename = "chainId", skip_serializing_if = "Option::is_none")] @@ -319,7 +319,7 @@ pub enum TransactionUnsigned { } impl Default for TransactionUnsigned { fn default() -> Self { - TransactionUnsigned::Transaction4844Unsigned(Default::default()) + TransactionUnsigned::TransactionLegacyUnsigned(Default::default()) } } @@ -341,13 +341,13 @@ pub type AccessList = Vec; )] pub enum BlockTag { #[serde(rename = "earliest")] - #[default] Earliest, #[serde(rename = "finalized")] Finalized, #[serde(rename = "safe")] Safe, #[serde(rename = "latest")] + #[default] Latest, #[serde(rename = "pending")] Pending, @@ -392,7 +392,7 @@ pub struct Log { #[serde(skip_serializing_if = "Option::is_none")] pub removed: Option, /// topics - #[serde(skip_serializing_if = "Vec::is_empty")] + #[serde(default, skip_serializing_if = "Vec::is_empty")] pub topics: Vec, /// transaction hash #[serde(rename = "transactionHash")] @@ -574,7 +574,7 @@ pub enum TransactionSigned { } impl Default for TransactionSigned { fn default() -> Self { - TransactionSigned::Transaction4844Signed(Default::default()) + TransactionSigned::TransactionLegacySigned(Default::default()) } } diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs index b5dc9a36065b..24b75de83569 100644 --- a/substrate/frame/revive/src/evm/runtime.rs +++ b/substrate/frame/revive/src/evm/runtime.rs @@ -455,236 +455,265 @@ mod test { /// A builder for creating an unchecked extrinsic, and test that the check function works. #[derive(Clone)] struct UncheckedExtrinsicBuilder { - tx: TransactionLegacyUnsigned, + tx: GenericTransaction, gas_limit: Weight, storage_deposit_limit: BalanceOf, + before_validate: Option>, } impl UncheckedExtrinsicBuilder { /// Create a new builder with default values. fn new() -> Self { Self { - tx: TransactionLegacyUnsigned { + tx: GenericTransaction { + from: Some(Account::default().address()), chain_id: Some(::ChainId::get().into()), - gas_price: U256::from(GAS_PRICE), + gas_price: Some(U256::from(GAS_PRICE)), ..Default::default() }, gas_limit: Weight::zero(), storage_deposit_limit: 0, + before_validate: None, } } fn estimate_gas(&mut self) { - let dry_run = crate::Pallet::::bare_eth_transact( - Account::default().substrate_account(), - self.tx.to, - self.tx.value.try_into().unwrap(), - self.tx.input.clone().0, - Weight::MAX, - u64::MAX, - |call| { + let dry_run = + crate::Pallet::::bare_eth_transact(self.tx.clone(), Weight::MAX, |call| { let call = RuntimeCall::Contracts(call); let uxt: Ex = sp_runtime::generic::UncheckedExtrinsic::new_bare(call).into(); uxt.encoded_size() as u32 + }); + + match dry_run { + Ok(dry_run) => { + log::debug!(target: LOG_TARGET, "Estimated gas: {:?}", dry_run.eth_gas); + self.tx.gas = Some(dry_run.eth_gas); + }, + Err(err) => { + log::debug!(target: LOG_TARGET, "Failed to estimate gas: {:?}", err); }, - crate::DebugInfo::Skip, - crate::CollectEvents::Skip, - ); - self.tx.gas = ((dry_run.fee + GAS_PRICE as u64) / (GAS_PRICE as u64)).into(); + } } /// Create a new builder with a call to the given address. fn call_with(dest: H160) -> Self { let mut builder = Self::new(); builder.tx.to = Some(dest); - builder.estimate_gas(); + ExtBuilder::default().build().execute_with(|| builder.estimate_gas()); builder } /// Create a new builder with an instantiate call. fn instantiate_with(code: Vec, data: Vec) -> Self { let mut builder = Self::new(); - builder.tx.input = Bytes(code.into_iter().chain(data.into_iter()).collect()); - builder.estimate_gas(); + builder.tx.input = Some(Bytes(code.into_iter().chain(data.into_iter()).collect())); + ExtBuilder::default().build().execute_with(|| builder.estimate_gas()); builder } /// Update the transaction with the given function. - fn update(mut self, f: impl FnOnce(&mut TransactionLegacyUnsigned) -> ()) -> Self { + fn update(mut self, f: impl FnOnce(&mut GenericTransaction) -> ()) -> Self { f(&mut self.tx); self } + /// Set before_validate function. + fn before_validate(mut self, f: impl Fn() + Send + Sync + 'static) -> Self { + self.before_validate = Some(std::sync::Arc::new(f)); + self + } /// Call `check` on the unchecked extrinsic, and `pre_dispatch` on the signed extension. fn check(&self) -> Result<(RuntimeCall, SignedExtra), TransactionValidityError> { - let UncheckedExtrinsicBuilder { tx, gas_limit, storage_deposit_limit } = self.clone(); - - // Fund the account. - let account = Account::default(); - let _ = ::Currency::set_balance( - &account.substrate_account(), - 100_000_000_000_000, - ); - - let payload = account.sign_transaction(tx.into()).signed_payload(); - let call = RuntimeCall::Contracts(crate::Call::eth_transact { - payload, - gas_limit, - storage_deposit_limit, - }); - - let encoded_len = call.encoded_size(); - let uxt: Ex = generic::UncheckedExtrinsic::new_bare(call).into(); - let result: CheckedExtrinsic<_, _, _> = uxt.check(&TestContext {})?; - let (account_id, extra): (AccountId32, SignedExtra) = match result.format { - ExtrinsicFormat::Signed(signer, extra) => (signer, extra), - _ => unreachable!(), - }; - - extra.clone().validate_and_prepare( - RuntimeOrigin::signed(account_id), - &result.function, - &result.function.get_dispatch_info(), - encoded_len, - 0, - )?; + ExtBuilder::default().build().execute_with(|| { + let UncheckedExtrinsicBuilder { + tx, + gas_limit, + storage_deposit_limit, + before_validate, + } = self.clone(); + + // Fund the account. + let account = Account::default(); + let _ = ::Currency::set_balance( + &account.substrate_account(), + 100_000_000_000_000, + ); + + let payload = + account.sign_transaction(tx.try_into_unsigned().unwrap()).signed_payload(); + let call = RuntimeCall::Contracts(crate::Call::eth_transact { + payload, + gas_limit, + storage_deposit_limit, + }); + + let encoded_len = call.encoded_size(); + let uxt: Ex = generic::UncheckedExtrinsic::new_bare(call).into(); + let result: CheckedExtrinsic<_, _, _> = uxt.check(&TestContext {})?; + let (account_id, extra): (AccountId32, SignedExtra) = match result.format { + ExtrinsicFormat::Signed(signer, extra) => (signer, extra), + _ => unreachable!(), + }; - Ok((result.function, extra)) + before_validate.map(|f| f()); + extra.clone().validate_and_prepare( + RuntimeOrigin::signed(account_id), + &result.function, + &result.function.get_dispatch_info(), + encoded_len, + 0, + )?; + + Ok((result.function, extra)) + }) } } #[test] fn check_eth_transact_call_works() { - ExtBuilder::default().build().execute_with(|| { - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); - assert_eq!( - builder.check().unwrap().0, - crate::Call::call:: { - dest: builder.tx.to.unwrap(), - value: builder.tx.value.as_u64(), - gas_limit: builder.gas_limit, - storage_deposit_limit: builder.storage_deposit_limit, - data: builder.tx.input.0 - } - .into() - ); - }); + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); + assert_eq!( + builder.check().unwrap().0, + crate::Call::call:: { + dest: builder.tx.to.unwrap(), + value: builder.tx.value.unwrap_or_default().as_u64(), + gas_limit: builder.gas_limit, + storage_deposit_limit: builder.storage_deposit_limit, + data: builder.tx.input.unwrap_or_default().0 + } + .into() + ); } #[test] fn check_eth_transact_instantiate_works() { - ExtBuilder::default().build().execute_with(|| { - let (code, _) = compile_module("dummy").unwrap(); - let data = vec![]; - let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); - - assert_eq!( - builder.check().unwrap().0, - crate::Call::instantiate_with_code:: { - value: builder.tx.value.as_u64(), - gas_limit: builder.gas_limit, - storage_deposit_limit: builder.storage_deposit_limit, - code, - data, - salt: None - } - .into() - ); - }); + let (code, _) = compile_module("dummy").unwrap(); + let data = vec![]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); + + assert_eq!( + builder.check().unwrap().0, + crate::Call::instantiate_with_code:: { + value: builder.tx.value.unwrap_or_default().as_u64(), + gas_limit: builder.gas_limit, + storage_deposit_limit: builder.storage_deposit_limit, + code, + data, + salt: None + } + .into() + ); } #[test] fn check_eth_transact_nonce_works() { - ExtBuilder::default().build().execute_with(|| { - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) - .update(|tx| tx.nonce = 1u32.into()); - - assert_eq!( - builder.check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Future)) - ); - - >::inc_account_nonce(Account::default().substrate_account()); - - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); - assert_eq!( - builder.check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)) - ); - }); + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) + .update(|tx| tx.nonce = Some(1u32.into())); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Future)) + ); + + let builder = + UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).before_validate(|| { + >::inc_account_nonce(Account::default().substrate_account()); + }); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)) + ); } #[test] fn check_eth_transact_chain_id_works() { - ExtBuilder::default().build().execute_with(|| { - let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) - .update(|tx| tx.chain_id = Some(42.into())); - - assert_eq!( - builder.check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) - ); - }); + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) + .update(|tx| tx.chain_id = Some(42.into())); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + ); } #[test] fn check_instantiate_data() { - ExtBuilder::default().build().execute_with(|| { - let code = b"invalid code".to_vec(); - let data = vec![1]; - let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); - - // Fail because the tx input fail to get the blob length - assert_eq!( - builder.clone().update(|tx| tx.input = Bytes(vec![1, 2, 3])).check(), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) - ); - }); + let code = b"invalid code".to_vec(); + let data = vec![1]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); + + // Fail because the tx input fail to get the blob length + assert_eq!( + builder.clone().update(|tx| tx.input = Some(Bytes(vec![1, 2, 3]))).check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + ); } #[test] fn check_transaction_fees() { - ExtBuilder::default().build().execute_with(|| { - let scenarios: [(_, Box, _); 5] = [ - ("Eth fees too low", Box::new(|tx| tx.gas_price /= 2), InvalidTransaction::Payment), - ("Gas fees too high", Box::new(|tx| tx.gas *= 2), InvalidTransaction::Call), - ("Gas fees too low", Box::new(|tx| tx.gas *= 2), InvalidTransaction::Call), - ( - "Diff > 10%", - Box::new(|tx| tx.gas = tx.gas * 111 / 100), - InvalidTransaction::Call, - ), - ( - "Diff < 10%", - Box::new(|tx| { - tx.gas_price *= 2; - tx.gas = tx.gas * 89 / 100 - }), - InvalidTransaction::Call, - ), - ]; - - for (msg, update_tx, err) in scenarios { - let builder = - UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).update(update_tx); - - assert_eq!(builder.check(), Err(TransactionValidityError::Invalid(err)), "{}", msg); - } - }); + let scenarios: [(_, Box, _); 5] = [ + ( + "Eth fees too low", + Box::new(|tx| { + tx.gas_price = Some(tx.gas_price.unwrap() / 2); + }), + InvalidTransaction::Payment, + ), + ( + "Gas fees too high", + Box::new(|tx| { + tx.gas = Some(tx.gas.unwrap() * 2); + }), + InvalidTransaction::Call, + ), + ( + "Gas fees too low", + Box::new(|tx| { + tx.gas = Some(tx.gas.unwrap() * 2); + }), + InvalidTransaction::Call, + ), + ( + "Diff > 10%", + Box::new(|tx| { + tx.gas = Some(tx.gas.unwrap() * 111 / 100); + }), + InvalidTransaction::Call, + ), + ( + "Diff < 10%", + Box::new(|tx| { + tx.gas_price = Some(tx.gas_price.unwrap() * 2); + tx.gas = Some(tx.gas.unwrap() * 89 / 100); + }), + InvalidTransaction::Call, + ), + ]; + + for (msg, update_tx, err) in scenarios { + let builder = + UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).update(update_tx); + + assert_eq!(builder.check(), Err(TransactionValidityError::Invalid(err)), "{}", msg); + } } #[test] fn check_transaction_tip() { - ExtBuilder::default().build().execute_with(|| { - let (code, _) = compile_module("dummy").unwrap(); - let data = vec![]; - let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()) - .update(|tx| tx.gas_price = tx.gas_price * 103 / 100); - - let tx = &builder.tx; - let expected_tip = tx.gas_price * tx.gas - U256::from(GAS_PRICE) * tx.gas; - let (_, extra) = builder.check().unwrap(); - assert_eq!(U256::from(extra.1.tip()), expected_tip); - }); + let (code, _) = compile_module("dummy").unwrap(); + let data = vec![]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()) + .update(|tx| { + tx.gas_price = Some(tx.gas_price.unwrap() * 103 / 100); + log::debug!(target: LOG_TARGET, "Gas price: {:?}", tx.gas_price); + }); + + let tx = &builder.tx; + let expected_tip = + tx.gas_price.unwrap() * tx.gas.unwrap() - U256::from(GAS_PRICE) * tx.gas.unwrap(); + let (_, extra) = builder.check().unwrap(); + assert_eq!(U256::from(extra.1.tip()), expected_tip); } } diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs index 49c08166483e..b23d7e4e60ef 100644 --- a/substrate/frame/revive/src/exec.rs +++ b/substrate/frame/revive/src/exec.rs @@ -562,6 +562,9 @@ pub struct Stack<'a, T: Config, E> { debug_message: Option<&'a mut DebugBuffer>, /// Transient storage used to store data, which is kept for the duration of a transaction. transient_storage: TransientStorage, + /// Whether or not actual transfer of funds should be performed. + /// This is set to `true` exclusively when we simulate a call through eth_transact. + skip_transfer: bool, /// No executable is held by the struct but influences its behaviour. _phantom: PhantomData, } @@ -777,6 +780,7 @@ where storage_meter: &'a mut storage::meter::Meter, value: U256, input_data: Vec, + skip_transfer: bool, debug_message: Option<&'a mut DebugBuffer>, ) -> ExecResult { let dest = T::AddressMapper::to_account_id(&dest); @@ -786,6 +790,7 @@ where gas_meter, storage_meter, value, + skip_transfer, debug_message, )? { stack.run(executable, input_data).map(|_| stack.first_frame.last_frame_output) @@ -812,6 +817,7 @@ where value: U256, input_data: Vec, salt: Option<&[u8; 32]>, + skip_transfer: bool, debug_message: Option<&'a mut DebugBuffer>, ) -> Result<(H160, ExecReturnValue), ExecError> { let (mut stack, executable) = Self::new( @@ -825,6 +831,7 @@ where gas_meter, storage_meter, value, + skip_transfer, debug_message, )? .expect(FRAME_ALWAYS_EXISTS_ON_INSTANTIATE); @@ -853,6 +860,7 @@ where gas_meter, storage_meter, value.into(), + false, debug_message, ) .unwrap() @@ -869,6 +877,7 @@ where gas_meter: &'a mut GasMeter, storage_meter: &'a mut storage::meter::Meter, value: U256, + skip_transfer: bool, debug_message: Option<&'a mut DebugBuffer>, ) -> Result, ExecError> { origin.ensure_mapped()?; @@ -896,6 +905,7 @@ where frames: Default::default(), debug_message, transient_storage: TransientStorage::new(limits::TRANSIENT_STORAGE_BYTES), + skip_transfer, _phantom: Default::default(), }; @@ -1073,6 +1083,7 @@ where &frame.account_id, frame.contract_info.get(&frame.account_id), executable.code_info(), + self.skip_transfer, )?; // Needs to be incremented before calling into the code so that it is visible // in case of recursion. @@ -2101,6 +2112,7 @@ mod tests { &mut storage_meter, value.into(), vec![], + false, None, ), Ok(_) @@ -2193,6 +2205,7 @@ mod tests { &mut storage_meter, value.into(), vec![], + false, None, ) .unwrap(); @@ -2233,6 +2246,7 @@ mod tests { &mut storage_meter, value.into(), vec![], + false, None, )); @@ -2269,6 +2283,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ), ExecError { @@ -2286,6 +2301,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -2314,6 +2330,7 @@ mod tests { &mut storage_meter, 55u64.into(), vec![], + false, None, ) .unwrap(); @@ -2363,6 +2380,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); @@ -2392,6 +2410,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); @@ -2421,6 +2440,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![1, 2, 3, 4], + false, None, ); assert_matches!(result, Ok(_)); @@ -2457,6 +2477,7 @@ mod tests { min_balance.into(), vec![1, 2, 3, 4], Some(&[0; 32]), + false, None, ); assert_matches!(result, Ok(_)); @@ -2511,6 +2532,7 @@ mod tests { &mut storage_meter, value.into(), vec![], + false, None, ); @@ -2575,6 +2597,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); @@ -2640,6 +2663,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); @@ -2672,6 +2696,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); assert_matches!(result, Ok(_)); @@ -2709,6 +2734,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2735,6 +2761,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2779,6 +2806,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2805,6 +2833,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2831,6 +2860,7 @@ mod tests { &mut storage_meter, 1u64.into(), vec![0], + false, None, ); assert_matches!(result, Err(_)); @@ -2875,6 +2905,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2920,6 +2951,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); @@ -2946,6 +2978,7 @@ mod tests { U256::zero(), // <- zero value vec![], Some(&[0; 32]), + false, None, ), Err(_) @@ -2981,6 +3014,7 @@ mod tests { min_balance.into(), vec![], Some(&[0 ;32]), + false, None, ), Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address @@ -3032,10 +3066,10 @@ mod tests { executable, &mut gas_meter, &mut storage_meter, - min_balance.into(), vec![], Some(&[0; 32]), + false, None, ), Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address @@ -3100,6 +3134,7 @@ mod tests { &mut storage_meter, (min_balance * 10).into(), vec![], + false, None, ), Ok(_) @@ -3180,6 +3215,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ), Ok(_) @@ -3223,6 +3259,7 @@ mod tests { 100u64.into(), vec![], Some(&[0; 32]), + false, None, ), Err(Error::::TerminatedInConstructor.into()) @@ -3287,6 +3324,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -3349,6 +3387,7 @@ mod tests { 10u64.into(), vec![], Some(&[0; 32]), + false, None, ); assert_matches!(result, Ok(_)); @@ -3395,6 +3434,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap(); @@ -3426,6 +3466,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, Some(&mut debug_buffer), ) .unwrap(); @@ -3459,6 +3500,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, Some(&mut debug_buffer), ); assert!(result.is_err()); @@ -3492,6 +3534,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, Some(&mut debug_buf_after), ) .unwrap(); @@ -3525,6 +3568,7 @@ mod tests { &mut storage_meter, U256::zero(), CHARLIE_ADDR.as_bytes().to_vec(), + false, None, )); @@ -3537,6 +3581,7 @@ mod tests { &mut storage_meter, U256::zero(), BOB_ADDR.as_bytes().to_vec(), + false, None, ) .map_err(|e| e.error), @@ -3587,6 +3632,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ) .map_err(|e| e.error), @@ -3621,6 +3667,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap(); @@ -3705,6 +3752,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap(); @@ -3831,6 +3879,7 @@ mod tests { (min_balance * 100).into(), vec![], Some(&[0; 32]), + false, None, ) .ok(); @@ -3844,6 +3893,7 @@ mod tests { (min_balance * 100).into(), vec![], Some(&[0; 32]), + false, None, )); assert_eq!(System::account_nonce(&ALICE), 1); @@ -3856,6 +3906,7 @@ mod tests { (min_balance * 200).into(), vec![], Some(&[0; 32]), + false, None, )); assert_eq!(System::account_nonce(&ALICE), 2); @@ -3868,6 +3919,7 @@ mod tests { (min_balance * 200).into(), vec![], Some(&[0; 32]), + false, None, )); assert_eq!(System::account_nonce(&ALICE), 3); @@ -3936,6 +3988,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4047,6 +4100,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4086,6 +4140,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4125,6 +4180,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4178,6 +4234,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4234,6 +4291,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4309,6 +4367,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4379,6 +4438,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -4417,6 +4477,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, )); }); @@ -4479,6 +4540,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -4512,6 +4574,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); assert_matches!(result, Ok(_)); @@ -4595,6 +4658,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap() @@ -4663,6 +4727,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -4734,6 +4799,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ); assert_matches!(result, Ok(_)); @@ -4785,6 +4851,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap() @@ -4854,6 +4921,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap() @@ -4900,6 +4968,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap() @@ -4944,6 +5013,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![], + false, None, ) .unwrap() @@ -4999,6 +5069,7 @@ mod tests { &mut storage_meter, U256::zero(), vec![0], + false, None, ), Ok(_) diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs index b55854e2eec5..1dee1da03bc4 100644 --- a/substrate/frame/revive/src/lib.rs +++ b/substrate/frame/revive/src/lib.rs @@ -41,13 +41,13 @@ pub mod test_utils; pub mod weights; use crate::{ - evm::{runtime::GAS_PRICE, TransactionLegacyUnsigned}, + evm::{runtime::GAS_PRICE, GenericTransaction}, exec::{AccountIdOf, ExecError, Executable, Ext, Key, Origin, Stack as ExecStack}, gas::GasMeter, storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager}, wasm::{CodeInfo, RuntimeCosts, WasmBlob}, }; -use alloc::boxed::Box; +use alloc::{boxed::Box, format, vec}; use codec::{Codec, Decode, Encode}; use environmental::*; use frame_support::{ @@ -74,7 +74,7 @@ use pallet_transaction_payment::OnChargeTransaction; use scale_info::TypeInfo; use sp_core::{H160, H256, U256}; use sp_runtime::{ - traits::{BadOrigin, Convert, Dispatchable, Saturating, Zero}, + traits::{BadOrigin, Bounded, Convert, Dispatchable, Saturating, Zero}, DispatchError, }; @@ -823,7 +823,7 @@ pub mod pallet { dest, value, gas_limit, - storage_deposit_limit, + DepositLimit::Balance(storage_deposit_limit), data, DebugInfo::Skip, CollectEvents::Skip, @@ -859,7 +859,7 @@ pub mod pallet { origin, value, gas_limit, - storage_deposit_limit, + DepositLimit::Balance(storage_deposit_limit), Code::Existing(code_hash), data, salt, @@ -925,7 +925,7 @@ pub mod pallet { origin, value, gas_limit, - storage_deposit_limit, + DepositLimit::Balance(storage_deposit_limit), Code::Upload(code), data, salt, @@ -1083,7 +1083,7 @@ fn dispatch_result( impl Pallet where - BalanceOf: Into + TryFrom, + BalanceOf: Into + TryFrom + Bounded, MomentOf: Into, T::Hash: frame_support::traits::IsType, { @@ -1098,7 +1098,7 @@ where dest: H160, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, data: Vec, debug: DebugInfo, collect_events: CollectEvents, @@ -1112,7 +1112,10 @@ where }; let try_call = || { let origin = Origin::from_runtime_origin(origin)?; - let mut storage_meter = StorageMeter::new(&origin, storage_deposit_limit, value)?; + let mut storage_meter = match storage_deposit_limit { + DepositLimit::Balance(limit) => StorageMeter::new(&origin, limit, value)?, + DepositLimit::Unchecked => StorageMeter::new_unchecked(BalanceOf::::max_value()), + }; let result = ExecStack::>::run_call( origin.clone(), dest, @@ -1120,9 +1123,14 @@ where &mut storage_meter, Self::convert_native_to_evm(value), data, + storage_deposit_limit.is_unchecked(), debug_message.as_mut(), )?; - storage_deposit = storage_meter.try_into_deposit(&origin)?; + storage_deposit = storage_meter + .try_into_deposit(&origin, storage_deposit_limit.is_unchecked()) + .inspect_err(|err| { + log::error!(target: LOG_TARGET, "Failed to transfer deposit: {err:?}"); + })?; Ok(result) }; let result = Self::run_guarded(try_call); @@ -1151,7 +1159,7 @@ where origin: OriginFor, value: BalanceOf, gas_limit: Weight, - mut storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, code: Code, data: Vec, salt: Option<[u8; 32]>, @@ -1162,13 +1170,24 @@ where let mut storage_deposit = Default::default(); let mut debug_message = if debug == DebugInfo::UnsafeDebug { Some(DebugBuffer::default()) } else { None }; + + let unchecked_deposit_limit = storage_deposit_limit.is_unchecked(); + let mut storage_deposit_limit = match storage_deposit_limit { + DepositLimit::Balance(limit) => limit, + DepositLimit::Unchecked => BalanceOf::::max_value(), + }; + let try_instantiate = || { let instantiate_account = T::InstantiateOrigin::ensure_origin(origin.clone())?; let (executable, upload_deposit) = match code { Code::Upload(code) => { let upload_account = T::UploadOrigin::ensure_origin(origin)?; - let (executable, upload_deposit) = - Self::try_upload_code(upload_account, code, storage_deposit_limit)?; + let (executable, upload_deposit) = Self::try_upload_code( + upload_account, + code, + storage_deposit_limit, + unchecked_deposit_limit, + )?; storage_deposit_limit.saturating_reduce(upload_deposit); (executable, upload_deposit) }, @@ -1176,8 +1195,12 @@ where (WasmBlob::from_storage(code_hash, &mut gas_meter)?, Default::default()), }; let instantiate_origin = Origin::from_account_id(instantiate_account.clone()); - let mut storage_meter = - StorageMeter::new(&instantiate_origin, storage_deposit_limit, value)?; + let mut storage_meter = if unchecked_deposit_limit { + StorageMeter::new_unchecked(storage_deposit_limit) + } else { + StorageMeter::new(&instantiate_origin, storage_deposit_limit, value)? + }; + let result = ExecStack::>::run_instantiate( instantiate_account, executable, @@ -1186,10 +1209,11 @@ where Self::convert_native_to_evm(value), data, salt.as_ref(), + unchecked_deposit_limit, debug_message.as_mut(), ); storage_deposit = storage_meter - .try_into_deposit(&instantiate_origin)? + .try_into_deposit(&instantiate_origin, unchecked_deposit_limit)? .saturating_add(&StorageDeposit::Charge(upload_deposit)); result }; @@ -1215,28 +1239,15 @@ where /// /// # Parameters /// - /// - `origin`: The origin of the call. - /// - `dest`: The destination address of the call. - /// - `value`: The EVM value to transfer. - /// - `input`: The input data. + /// - `tx`: The Ethereum transaction to simulate. /// - `gas_limit`: The gas limit enforced during contract execution. - /// - `storage_deposit_limit`: The maximum balance that can be charged to the caller for storage - /// usage. /// - `utx_encoded_size`: A function that takes a call and returns the encoded size of the /// unchecked extrinsic. - /// - `debug`: Debugging configuration. - /// - `collect_events`: Event collection configuration. pub fn bare_eth_transact( - origin: T::AccountId, - dest: Option, - value: U256, - input: Vec, + mut tx: GenericTransaction, gas_limit: Weight, - storage_deposit_limit: BalanceOf, utx_encoded_size: impl Fn(Call) -> u32, - debug: DebugInfo, - collect_events: CollectEvents, - ) -> EthContractResult> + ) -> Result>, EthTransactError> where T: pallet_transaction_payment::Config, ::RuntimeCall: @@ -1247,26 +1258,58 @@ where T::Nonce: Into, T::Hash: frame_support::traits::IsType, { - log::debug!(target: LOG_TARGET, "bare_eth_transact: dest: {dest:?} value: {value:?} - gas_limit: {gas_limit:?} storage_deposit_limit: {storage_deposit_limit:?}"); + log::debug!(target: LOG_TARGET, "bare_eth_transact: tx: {tx:?} gas_limit: {gas_limit:?}"); + + let from = tx.from.unwrap_or_default(); + let origin = T::AddressMapper::to_account_id(&from); - // Get the nonce to encode in the tx. - let nonce: T::Nonce = >::account_nonce(&origin); + let storage_deposit_limit = if tx.gas.is_some() { + DepositLimit::Balance(BalanceOf::::max_value()) + } else { + DepositLimit::Unchecked + }; + + // TODO remove once we have revisited how we encode the gas limit. + if tx.nonce.is_none() { + tx.nonce = Some(>::account_nonce(&origin).into()); + } + if tx.gas_price.is_none() { + tx.gas_price = Some(GAS_PRICE.into()); + } + if tx.chain_id.is_none() { + tx.chain_id = Some(T::ChainId::get().into()); + } // Convert the value to the native balance type. - let native_value = match Self::convert_evm_to_native(value) { + let evm_value = tx.value.unwrap_or_default(); + let native_value = match Self::convert_evm_to_native(evm_value) { Ok(v) => v, - Err(err) => - return EthContractResult { - gas_required: Default::default(), - storage_deposit: Default::default(), - fee: Default::default(), - result: Err(err.into()), - }, + Err(_) => return Err(EthTransactError::Message("Failed to convert value".into())), + }; + + let input = tx.input.clone().unwrap_or_default().0; + let debug = DebugInfo::Skip; + let collect_events = CollectEvents::Skip; + + let extract_error = |err| { + if err == Error::::TransferFailed.into() || + err == Error::::StorageDepositNotEnoughFunds.into() || + err == Error::::StorageDepositLimitExhausted.into() + { + let balance = Self::evm_balance(&from); + return Err(EthTransactError::Message( + format!("insufficient funds for gas * price + value: address {from:?} have {balance} (supplied gas {})", + tx.gas.unwrap_or_default())) + ); + } + + return Err(EthTransactError::Message(format!( + "Failed to instantiate contract: {err:?}" + ))); }; // Dry run the call - let (mut result, dispatch_info) = match dest { + let (mut result, dispatch_info) = match tx.to { // A contract call. Some(dest) => { // Dry run the call. @@ -1281,11 +1324,24 @@ where collect_events, ); - let result = EthContractResult { + let data = match result.result { + Ok(return_value) => { + if return_value.did_revert() { + return Err(EthTransactError::Data(return_value.data)); + } + return_value.data + }, + Err(err) => { + log::debug!(target: LOG_TARGET, "Failed to execute call: {err:?}"); + return extract_error(err) + }, + }; + + let result = EthTransactInfo { gas_required: result.gas_required, storage_deposit: result.storage_deposit.charge_or_zero(), - result: result.result, - fee: Default::default(), + data, + eth_gas: Default::default(), }; // Get the dispatch info of the call. let dispatch_call: ::RuntimeCall = crate::Call::::call { @@ -1326,11 +1382,24 @@ where collect_events, ); - let result = EthContractResult { + let returned_data = match result.result { + Ok(return_value) => { + if return_value.result.did_revert() { + return Err(EthTransactError::Data(return_value.result.data)); + } + return_value.result.data + }, + Err(err) => { + log::debug!(target: LOG_TARGET, "Failed to instantiate: {err:?}"); + return extract_error(err) + }, + }; + + let result = EthTransactInfo { gas_required: result.gas_required, storage_deposit: result.storage_deposit.charge_or_zero(), - result: result.result.map(|v| v.result), - fee: Default::default(), + data: returned_data, + eth_gas: Default::default(), }; // Get the dispatch info of the call. @@ -1348,23 +1417,18 @@ where }, }; - let mut tx = TransactionLegacyUnsigned { - value, - input: input.into(), - nonce: nonce.into(), - chain_id: Some(T::ChainId::get().into()), - gas_price: GAS_PRICE.into(), - to: dest, - ..Default::default() - }; - // The transaction fees depend on the extrinsic's length, which in turn is influenced by // the encoded length of the gas limit specified in the transaction (tx.gas). // We iteratively compute the fee by adjusting tx.gas until the fee stabilizes. // with a maximum of 3 iterations to avoid an infinite loop. for _ in 0..3 { + let Ok(unsigned_tx) = tx.clone().try_into_unsigned() else { + log::debug!(target: LOG_TARGET, "Failed to convert to unsigned"); + return Err(EthTransactError::Message("Invalid transaction".into())); + }; + let eth_dispatch_call = crate::Call::::eth_transact { - payload: tx.dummy_signed_payload(), + payload: unsigned_tx.dummy_signed_payload(), gas_limit: result.gas_required, storage_deposit_limit: result.storage_deposit, }; @@ -1375,17 +1439,18 @@ where 0u32.into(), ) .into(); + let eth_gas: U256 = (fee / GAS_PRICE.into()).into(); - if fee == result.fee { - log::trace!(target: LOG_TARGET, "bare_eth_call: encoded_len: {encoded_len:?} fee: {fee:?}"); + if eth_gas == result.eth_gas { + log::trace!(target: LOG_TARGET, "bare_eth_call: encoded_len: {encoded_len:?} eth_gas: {eth_gas:?}"); break; } - result.fee = fee; - tx.gas = (fee / GAS_PRICE.into()).into(); - log::debug!(target: LOG_TARGET, "Adjusting Eth gas to: {:?}", tx.gas); + result.eth_gas = eth_gas; + tx.gas = Some(eth_gas.into()); + log::debug!(target: LOG_TARGET, "Adjusting Eth gas to: {eth_gas:?}"); } - result + Ok(result) } /// Get the balance with EVM decimals of the given `address`. @@ -1403,7 +1468,7 @@ where storage_deposit_limit: BalanceOf, ) -> CodeUploadResult> { let origin = T::UploadOrigin::ensure_origin(origin)?; - let (module, deposit) = Self::try_upload_code(origin, code, storage_deposit_limit)?; + let (module, deposit) = Self::try_upload_code(origin, code, storage_deposit_limit, false)?; Ok(CodeUploadReturnValue { code_hash: *module.code_hash(), deposit }) } @@ -1421,9 +1486,10 @@ where origin: T::AccountId, code: Vec, storage_deposit_limit: BalanceOf, + skip_transfer: bool, ) -> Result<(WasmBlob, BalanceOf), DispatchError> { let mut module = WasmBlob::from_code(code, origin)?; - let deposit = module.store_code()?; + let deposit = module.store_code(skip_transfer)?; ensure!(storage_deposit_limit >= deposit, >::StorageDepositLimitExhausted); Ok((module, deposit)) } @@ -1527,14 +1593,7 @@ sp_api::decl_runtime_apis! { /// Perform an Ethereum call. /// /// See [`crate::Pallet::bare_eth_transact`] - fn eth_transact( - origin: H160, - dest: Option, - value: U256, - input: Vec, - gas_limit: Option, - storage_deposit_limit: Option, - ) -> EthContractResult; + fn eth_transact(tx: GenericTransaction) -> Result, EthTransactError>; /// Upload new code without instantiating a contract from it. /// diff --git a/substrate/frame/revive/src/primitives.rs b/substrate/frame/revive/src/primitives.rs index 024b1f3448e1..a7127f812b4b 100644 --- a/substrate/frame/revive/src/primitives.rs +++ b/substrate/frame/revive/src/primitives.rs @@ -17,8 +17,8 @@ //! A crate that hosts a common definitions that are relevant for the pallet-revive. -use crate::H160; -use alloc::vec::Vec; +use crate::{H160, U256}; +use alloc::{string::String, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::weights::Weight; use pallet_revive_uapi::ReturnFlags; @@ -28,6 +28,30 @@ use sp_runtime::{ DispatchError, RuntimeDebug, }; +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub enum DepositLimit { + /// Allows bypassing all balance transfer checks. + Unchecked, + + /// Specifies a maximum allowable balance for a deposit. + Balance(Balance), +} + +impl DepositLimit { + pub fn is_unchecked(&self) -> bool { + match self { + Self::Unchecked => true, + _ => false, + } + } +} + +impl From for DepositLimit { + fn from(value: T) -> Self { + Self::Balance(value) + } +} + /// Result type of a `bare_call` or `bare_instantiate` call as well as `ContractsApi::call` and /// `ContractsApi::instantiate`. /// @@ -84,15 +108,22 @@ pub struct ContractResult { /// The result of the execution of a `eth_transact` call. #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] -pub struct EthContractResult> { - /// The fee charged for the execution. - pub fee: Balance, +pub struct EthTransactInfo { /// The amount of gas that was necessary to execute the transaction. pub gas_required: Weight, /// Storage deposit charged. pub storage_deposit: Balance, - /// The execution result. - pub result: R, + /// The weight and deposit equivalent in EVM Gas. + pub eth_gas: U256, + /// The execution return value. + pub data: Vec, +} + +/// Error type of a `eth_transact` call. +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub enum EthTransactError { + Data(Vec), + Message(String), } /// Result type of a `bare_code_upload` call. diff --git a/substrate/frame/revive/src/storage/meter.rs b/substrate/frame/revive/src/storage/meter.rs index 712010bc8257..6eddf048be98 100644 --- a/substrate/frame/revive/src/storage/meter.rs +++ b/substrate/frame/revive/src/storage/meter.rs @@ -373,24 +373,36 @@ where } } + /// Create new storage meter without checking the limit. + pub fn new_unchecked(limit: BalanceOf) -> Self { + return Self { limit, ..Default::default() } + } + /// The total amount of deposit that should change hands as result of the execution /// that this meter was passed into. This will also perform all the charges accumulated /// in the whole contract stack. /// /// This drops the root meter in order to make sure it is only called when the whole /// execution did finish. - pub fn try_into_deposit(self, origin: &Origin) -> Result, DispatchError> { - // Only refund or charge deposit if the origin is not root. - let origin = match origin { - Origin::Root => return Ok(Deposit::Charge(Zero::zero())), - Origin::Signed(o) => o, - }; - for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Refund(_))) { - E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; - } - for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Charge(_))) { - E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; + pub fn try_into_deposit( + self, + origin: &Origin, + skip_transfer: bool, + ) -> Result, DispatchError> { + if !skip_transfer { + // Only refund or charge deposit if the origin is not root. + let origin = match origin { + Origin::Root => return Ok(Deposit::Charge(Zero::zero())), + Origin::Signed(o) => o, + }; + for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Refund(_))) { + E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; + } + for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Charge(_))) { + E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; + } } + Ok(self.total_deposit) } } @@ -425,13 +437,18 @@ impl> RawMeter { contract: &T::AccountId, contract_info: &mut ContractInfo, code_info: &CodeInfo, + skip_transfer: bool, ) -> Result<(), DispatchError> { debug_assert!(matches!(self.contract_state(), ContractState::Alive)); // We need to make sure that the contract's account exists. let ed = Pallet::::min_balance(); self.total_deposit = Deposit::Charge(ed); - T::Currency::transfer(origin, contract, ed, Preservation::Preserve)?; + if skip_transfer { + T::Currency::set_balance(contract, ed); + } else { + T::Currency::transfer(origin, contract, ed, Preservation::Preserve)?; + } // A consumer is added at account creation and removed it on termination, otherwise the // runtime could remove the account. As long as a contract exists its account must exist. @@ -479,6 +496,7 @@ impl> RawMeter { } if let Deposit::Charge(amount) = total_deposit { if amount > self.limit { + log::debug!( target: LOG_TARGET, "Storage deposit limit exhausted: {:?} > {:?}", amount, self.limit); return Err(>::StorageDepositLimitExhausted.into()) } } @@ -811,7 +829,10 @@ mod tests { nested0.enforce_limit(Some(&mut nested0_info)).unwrap(); meter.absorb(nested0, &BOB, Some(&mut nested0_info)); - assert_eq!(meter.try_into_deposit(&test_case.origin).unwrap(), test_case.deposit); + assert_eq!( + meter.try_into_deposit(&test_case.origin, false).unwrap(), + test_case.deposit + ); assert_eq!(nested0_info.extra_deposit(), 112); assert_eq!(nested1_info.extra_deposit(), 110); @@ -882,7 +903,10 @@ mod tests { nested0.absorb(nested1, &CHARLIE, None); meter.absorb(nested0, &BOB, None); - assert_eq!(meter.try_into_deposit(&test_case.origin).unwrap(), test_case.deposit); + assert_eq!( + meter.try_into_deposit(&test_case.origin, false).unwrap(), + test_case.deposit + ); assert_eq!(TestExtTestValue::get(), test_case.expected) } } diff --git a/substrate/frame/revive/src/test_utils/builder.rs b/substrate/frame/revive/src/test_utils/builder.rs index e64f58894432..8ba5e7384070 100644 --- a/substrate/frame/revive/src/test_utils/builder.rs +++ b/substrate/frame/revive/src/test_utils/builder.rs @@ -18,7 +18,8 @@ use super::{deposit_limit, GAS_LIMIT}; use crate::{ address::AddressMapper, AccountIdOf, BalanceOf, Code, CollectEvents, Config, ContractResult, - DebugInfo, EventRecordOf, ExecReturnValue, InstantiateReturnValue, OriginFor, Pallet, Weight, + DebugInfo, DepositLimit, EventRecordOf, ExecReturnValue, InstantiateReturnValue, OriginFor, + Pallet, Weight, }; use frame_support::pallet_prelude::DispatchResultWithPostInfo; use paste::paste; @@ -133,7 +134,7 @@ builder!( origin: OriginFor, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, code: Code, data: Vec, salt: Option<[u8; 32]>, @@ -159,7 +160,7 @@ builder!( origin, value: 0u32.into(), gas_limit: GAS_LIMIT, - storage_deposit_limit: deposit_limit::(), + storage_deposit_limit: DepositLimit::Balance(deposit_limit::()), code, data: vec![], salt: Some([0; 32]), @@ -198,7 +199,7 @@ builder!( dest: H160, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, data: Vec, debug: DebugInfo, collect_events: CollectEvents, @@ -216,7 +217,7 @@ builder!( dest, value: 0u32.into(), gas_limit: GAS_LIMIT, - storage_deposit_limit: deposit_limit::(), + storage_deposit_limit: DepositLimit::Balance(deposit_limit::()), data: vec![], debug: DebugInfo::UnsafeDebug, collect_events: CollectEvents::Skip, diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs index 34afe8aabfe6..1df300f031a7 100644 --- a/substrate/frame/revive/src/tests.rs +++ b/substrate/frame/revive/src/tests.rs @@ -1249,7 +1249,7 @@ fn transfer_expendable_cannot_kill_account() { test_utils::contract_info_storage_deposit(&addr) ); - // Some ot the total balance is held, so it can't be transferred. + // Some or the total balance is held, so it can't be transferred. assert_err!( <::Currency as Mutate>::transfer( &account, @@ -2290,7 +2290,7 @@ fn gas_estimation_for_subcalls() { // Make the same call using the estimated gas. Should succeed. let result = builder::bare_call(addr_caller) .gas_limit(result_orig.gas_required) - .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero()) + .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero().into()) .data(input.clone()) .build(); assert_ok!(&result.result); @@ -2298,7 +2298,7 @@ fn gas_estimation_for_subcalls() { // Check that it fails with too little ref_time let result = builder::bare_call(addr_caller) .gas_limit(result_orig.gas_required.sub_ref_time(1)) - .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero()) + .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero().into()) .data(input.clone()) .build(); assert_err!(result.result, error); @@ -2306,7 +2306,7 @@ fn gas_estimation_for_subcalls() { // Check that it fails with too little proof_size let result = builder::bare_call(addr_caller) .gas_limit(result_orig.gas_required.sub_proof_size(1)) - .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero()) + .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero().into()) .data(input.clone()) .build(); assert_err!(result.result, error); @@ -3592,7 +3592,7 @@ fn deposit_limit_in_nested_instantiate() { // Set enough deposit limit for the child instantiate. This should succeed. let result = builder::bare_call(addr_caller) .origin(RuntimeOrigin::signed(BOB)) - .storage_deposit_limit(callee_info_len + 2 + ED + 4 + 2) + .storage_deposit_limit((callee_info_len + 2 + ED + 4 + 2).into()) .data((1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 3 + 2)).encode()) .build(); @@ -3879,7 +3879,7 @@ fn locking_delegate_dependency_works() { // Locking a dependency with a storage limit too low should fail. assert_err!( builder::bare_call(addr_caller) - .storage_deposit_limit(dependency_deposit - 1) + .storage_deposit_limit((dependency_deposit - 1).into()) .data((1u32, hash2addr(&callee_hashes[0]), callee_hashes[0]).encode()) .build() .result, diff --git a/substrate/frame/revive/src/tests/test_debug.rs b/substrate/frame/revive/src/tests/test_debug.rs index 7c4fbba71f65..c9e19e52ace1 100644 --- a/substrate/frame/revive/src/tests/test_debug.rs +++ b/substrate/frame/revive/src/tests/test_debug.rs @@ -21,6 +21,7 @@ use crate::{ debug::{CallInterceptor, CallSpan, ExecResult, ExportedFunction, Tracing}, primitives::ExecReturnValue, test_utils::*, + DepositLimit, }; use frame_support::traits::Currency; use pretty_assertions::assert_eq; @@ -114,7 +115,7 @@ fn debugging_works() { RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, - deposit_limit::(), + DepositLimit::Balance(deposit_limit::()), Code::Upload(wasm), vec![], Some([0u8; 32]), @@ -198,7 +199,7 @@ fn call_interception_works() { RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, - deposit_limit::(), + deposit_limit::().into(), Code::Upload(wasm), vec![], // some salt to ensure that the address of this contract is unique among all tests diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs index d87ec7112286..54fb02c866e1 100644 --- a/substrate/frame/revive/src/wasm/mod.rs +++ b/substrate/frame/revive/src/wasm/mod.rs @@ -183,7 +183,7 @@ where } /// Puts the module blob into storage, and returns the deposit collected for the storage. - pub fn store_code(&mut self) -> Result, Error> { + pub fn store_code(&mut self, skip_transfer: bool) -> Result, Error> { let code_hash = *self.code_hash(); >::mutate(code_hash, |stored_code_info| { match stored_code_info { @@ -195,15 +195,16 @@ where // the `owner` is always the origin of the current transaction. None => { let deposit = self.code_info.deposit; - T::Currency::hold( + + if !skip_transfer { + T::Currency::hold( &HoldReason::CodeUploadDepositReserve.into(), &self.code_info.owner, deposit, - ) - .map_err(|err| { - log::debug!(target: LOG_TARGET, "failed to store code for owner: {:?}: {err:?}", self.code_info.owner); + ) .map_err(|err| { log::debug!(target: LOG_TARGET, "failed to store code for owner: {:?}: {err:?}", self.code_info.owner); >::StorageDepositNotEnoughFunds })?; + } self.code_info.refcount = 0; >::insert(code_hash, &self.code); From c0921339f9d486981b3681760ee83ba9237f2eaa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 07:20:56 +0000 Subject: [PATCH 12/29] Bump the ci_dependencies group across 1 directory with 3 updates (#6516) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps the ci_dependencies group with 3 updates in the / directory: [lycheeverse/lychee-action](https://github.com/lycheeverse/lychee-action), [actions/attest-build-provenance](https://github.com/actions/attest-build-provenance) and [codecov/codecov-action](https://github.com/codecov/codecov-action). Updates `lycheeverse/lychee-action` from 2.0.2 to 2.1.0
Commits

Updates `actions/attest-build-provenance` from 1.4.3 to 1.4.4
Release notes

Sourced from actions/attest-build-provenance's releases.

v1.4.4

What's Changed

Full Changelog: https://github.com/actions/attest-build-provenance/compare/v1.4.3...v1.4.4

Commits
  • ef24412 bump predicate from 1.1.3 to 1.1.4 (#310)
  • 36fa7d0 bump @​actions/attest from 1.4.2 to 1.5.0 (#309)
  • 390c0bb Bump @​types/node from 22.8.1 to 22.8.7 in the npm-development group (#305)
  • 21da615 Bump the npm-development group with 3 updates (#299)
  • 0704961 Bump actions/publish-immutable-action in the actions-minor group (#298)
  • d01b070 Bump the npm-development group with 3 updates (#278)
  • b1d65e4 Add workflow file for publishing releases to immutable action package (#277)
  • 3a27694 Bump @​actions/core from 1.10.1 to 1.11.1 (#275)
  • dff1ae6 prevent e2e workflows on forks (#272)
  • e5892d0 Bump the npm-development group with 3 updates (#263)
  • Additional commits viewable in compare view

Updates `codecov/codecov-action` from 4 to 5
Release notes

Sourced from codecov/codecov-action's releases.

v5.0.0

v5 Release

v5 of the Codecov GitHub Action will use the Codecov Wrapper to encapsulate the CLI. This will help ensure that the Action gets updates quicker.

Migration Guide

The v5 release also coincides with the opt-out feature for tokens for public repositories. In the Global Upload Token section of the settings page of an organization in codecov.io, you can set the ability for Codecov to receive a coverage reports from any source. This will allow contributors or other members of a repository to upload without needing access to the Codecov token. For more details see how to upload without a token.

[!WARNING]
The following arguments have been changed

  • file (this has been deprecated in favor of files)
  • plugin (this has been deprecated in favor of plugins)

The following arguments have been added:

  • binary
  • gcov_args
  • gcov_executable
  • gcov_ignore
  • gcov_include
  • report_type
  • skip_validation
  • swift_project

You can see their usage in the action.yml file.

What's Changed

... (truncated)

Changelog

Sourced from codecov/codecov-action's changelog.

4.0.0-beta.2

Fixes

  • #1085 not adding -n if empty to do-upload command

4.0.0-beta.1

v4 represents a move from the universal uploader to the Codecov CLI. Although this will unlock new features for our users, the CLI is not yet at feature parity with the universal uploader.

Breaking Changes

  • No current support for aarch64 and alpine architectures.
  • Tokenless uploading is unsuported
  • Various arguments to the Action have been removed

3.1.4

Fixes

  • #967 Fix typo in README.md
  • #971 fix: add back in working dir
  • #969 fix: CLI option names for uploader

Dependencies

  • #970 build(deps-dev): bump @​types/node from 18.15.12 to 18.16.3
  • #979 build(deps-dev): bump @​types/node from 20.1.0 to 20.1.2
  • #981 build(deps-dev): bump @​types/node from 20.1.2 to 20.1.4

3.1.3

Fixes

  • #960 fix: allow for aarch64 build

Dependencies

  • #957 build(deps-dev): bump jest-junit from 15.0.0 to 16.0.0
  • #958 build(deps): bump openpgp from 5.7.0 to 5.8.0
  • #959 build(deps-dev): bump @​types/node from 18.15.10 to 18.15.12

3.1.2

Fixes

  • #718 Update README.md
  • #851 Remove unsupported path_to_write_report argument
  • #898 codeql-analysis.yml
  • #901 Update README to contain correct information - inputs and negate feature
  • #955 fix: add in all the extra arguments for uploader

Dependencies

  • #819 build(deps): bump openpgp from 5.4.0 to 5.5.0
  • #835 build(deps): bump node-fetch from 3.2.4 to 3.2.10
  • #840 build(deps): bump ossf/scorecard-action from 1.1.1 to 2.0.4
  • #841 build(deps): bump @​actions/core from 1.9.1 to 1.10.0
  • #843 build(deps): bump @​actions/github from 5.0.3 to 5.1.1
  • #869 build(deps): bump node-fetch from 3.2.10 to 3.3.0
  • #872 build(deps-dev): bump jest-junit from 13.2.0 to 15.0.0
  • #879 build(deps): bump decode-uri-component from 0.2.0 to 0.2.2

... (truncated)

Commits

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/check-links.yml | 2 +- .github/workflows/release-reusable-rc-buid.yml | 6 +++--- .github/workflows/tests-linux-stable-coverage.yml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index dd9d3eaf824f..cea6b9a8636a 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -33,7 +33,7 @@ jobs: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.0 (22. Sep 2023) - name: Lychee link checker - uses: lycheeverse/lychee-action@7cd0af4c74a61395d455af97419279d86aafaede # for v1.9.1 (10. Jan 2024) + uses: lycheeverse/lychee-action@f81112d0d2814ded911bd23e3beaa9dda9093915 # for v1.9.1 (10. Jan 2024) with: args: >- --config .config/lychee.toml diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml index 7e31a4744b59..f5240878cba2 100644 --- a/.github/workflows/release-reusable-rc-buid.yml +++ b/.github/workflows/release-reusable-rc-buid.yml @@ -104,7 +104,7 @@ jobs: ./.github/scripts/release/build-linux-release.sh ${{ matrix.binaries }} ${{ inputs.package }} - name: Generate artifact attestation - uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 with: subject-path: /artifacts/${{ matrix.binaries }}/${{ matrix.binaries }} @@ -220,7 +220,7 @@ jobs: ./.github/scripts/release/build-macos-release.sh ${{ matrix.binaries }} ${{ inputs.package }} - name: Generate artifact attestation - uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 with: subject-path: ${{ env.ARTIFACTS_PATH }}/${{ matrix.binaries }} @@ -278,7 +278,7 @@ jobs: . "${GITHUB_WORKSPACE}"/.github/scripts/release/build-deb.sh ${{ inputs.package }} ${VERSION} - name: Generate artifact attestation - uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 with: subject-path: target/production/*.deb diff --git a/.github/workflows/tests-linux-stable-coverage.yml b/.github/workflows/tests-linux-stable-coverage.yml index c5af6bcae77f..61e01cda4428 100644 --- a/.github/workflows/tests-linux-stable-coverage.yml +++ b/.github/workflows/tests-linux-stable-coverage.yml @@ -102,7 +102,7 @@ jobs: merge-multiple: true - run: ls -al reports/ - name: Upload to Codecov - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} verbose: true From 0845044454c005b577eab7afaea18583bd7e3dd3 Mon Sep 17 00:00:00 2001 From: clangenb <37865735+clangenb@users.noreply.github.com> Date: Mon, 2 Dec 2024 17:07:21 +0100 Subject: [PATCH 13/29] migrate pallet-session-benchmarking to bench V2 syntax (#6294) Migrates pallet-session-benchmarking to bench V2 syntax. Part of: * #6202 --------- Co-authored-by: Shawn Tabrizi Co-authored-by: Giuseppe Re --- .../frame/session/benchmarking/src/inner.rs | 68 ++++++++++++------- .../frame/session/benchmarking/src/mock.rs | 6 +- 2 files changed, 48 insertions(+), 26 deletions(-) diff --git a/substrate/frame/session/benchmarking/src/inner.rs b/substrate/frame/session/benchmarking/src/inner.rs index 9ba47b34ed7a..9789b6bb593d 100644 --- a/substrate/frame/session/benchmarking/src/inner.rs +++ b/substrate/frame/session/benchmarking/src/inner.rs @@ -22,7 +22,7 @@ use alloc::{vec, vec::Vec}; use sp_runtime::traits::{One, StaticLookup, TrailingZeroInput}; use codec::Decode; -use frame_benchmarking::v1::benchmarks; +use frame_benchmarking::v2::*; use frame_support::traits::{Get, KeyOwnerProofSystem, OnInitialize}; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_session::{historical::Pallet as Historical, Pallet as Session, *}; @@ -45,8 +45,12 @@ impl OnInitialize> for Pallet { } } -benchmarks! { - set_keys { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn set_keys() -> Result<(), BenchmarkError> { let n = MaxNominationsOf::::get(); let (v_stash, _) = create_validator_with_nominators::( n, @@ -58,13 +62,19 @@ benchmarks! { let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; + let proof: Vec = vec![0, 1, 2, 3]; // Whitelist controller account from further DB operations. let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); - }: _(RawOrigin::Signed(v_controller), keys, proof) - purge_keys { + #[extrinsic_call] + _(RawOrigin::Signed(v_controller), keys, proof); + + Ok(()) + } + + #[benchmark] + fn purge_keys() -> Result<(), BenchmarkError> { let n = MaxNominationsOf::::get(); let (v_stash, _) = create_validator_with_nominators::( n, @@ -75,30 +85,33 @@ benchmarks! { )?; let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; + let proof: Vec = vec![0, 1, 2, 3]; Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; // Whitelist controller account from further DB operations. let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); - }: _(RawOrigin::Signed(v_controller)) - #[extra] - check_membership_proof_current_session { - let n in 2 .. MAX_VALIDATORS as u32; + #[extrinsic_call] + _(RawOrigin::Signed(v_controller)); + Ok(()) + } + + #[benchmark(extra)] + fn check_membership_proof_current_session(n: Linear<2, MAX_VALIDATORS>) { let (key, key_owner_proof1) = check_membership_proof_setup::(n); let key_owner_proof2 = key_owner_proof1.clone(); - }: { - Historical::::check_proof(key, key_owner_proof1); - } - verify { + + #[block] + { + Historical::::check_proof(key, key_owner_proof1); + } + assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); } - #[extra] - check_membership_proof_historical_session { - let n in 2 .. MAX_VALIDATORS as u32; - + #[benchmark(extra)] + fn check_membership_proof_historical_session(n: Linear<2, MAX_VALIDATORS>) { let (key, key_owner_proof1) = check_membership_proof_setup::(n); // skip to the next session so that the session is historical @@ -106,14 +119,21 @@ benchmarks! { Session::::rotate_session(); let key_owner_proof2 = key_owner_proof1.clone(); - }: { - Historical::::check_proof(key, key_owner_proof1); - } - verify { + + #[block] + { + Historical::::check_proof(key, key_owner_proof1); + } + assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); } - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false); + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(), + crate::mock::Test, + extra = false + ); } /// Sets up the benchmark for checking a membership proof. It creates the given diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 2aec58cceded..346cd04c0fa9 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -27,7 +27,7 @@ use frame_support::{ derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; -use sp_runtime::{traits::IdentityLookup, BuildStorage}; +use sp_runtime::{traits::IdentityLookup, BuildStorage, KeyTypeId}; type AccountId = u64; type Nonce = u32; @@ -42,6 +42,7 @@ frame_support::construct_runtime!( Balances: pallet_balances, Staking: pallet_staking, Session: pallet_session, + Historical: pallet_session::historical } ); @@ -79,7 +80,8 @@ sp_runtime::impl_opaque_keys! { pub struct TestSessionHandler; impl pallet_session::SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[]; + // corresponds to the opaque key id above + const KEY_TYPE_IDS: &'static [KeyTypeId] = &[KeyTypeId([100u8, 117u8, 109u8, 121u8])]; fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} From 3d8da815ecd12b8f04daf87d6ffba5ec4a181806 Mon Sep 17 00:00:00 2001 From: clangenb <37865735+clangenb@users.noreply.github.com> Date: Mon, 2 Dec 2024 17:36:52 +0100 Subject: [PATCH 14/29] migrate pallet-offences-benchmarking to benchmark v2 syntax (#6300) Migrates pallet-offences-benchmarking to benchmark v2 syntax. Part of: * #6202 --------- Co-authored-by: Giuseppe Re --- .../frame/offences/benchmarking/src/inner.rs | 107 +++++++++++------- .../frame/offences/benchmarking/src/mock.rs | 5 +- 2 files changed, 66 insertions(+), 46 deletions(-) diff --git a/substrate/frame/offences/benchmarking/src/inner.rs b/substrate/frame/offences/benchmarking/src/inner.rs index 573114de0742..75f3e9931e34 100644 --- a/substrate/frame/offences/benchmarking/src/inner.rs +++ b/substrate/frame/offences/benchmarking/src/inner.rs @@ -19,7 +19,7 @@ use alloc::{vec, vec::Vec}; -use frame_benchmarking::v1::{account, benchmarks}; +use frame_benchmarking::v2::*; use frame_support::traits::Get; use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; @@ -144,7 +144,7 @@ fn create_offender(n: u32, nominators: u32) -> Result, &' fn make_offenders( num_offenders: u32, num_nominators: u32, -) -> Result<(Vec>, Vec>), &'static str> { +) -> Result>, &'static str> { Staking::::new_session(0); let mut offenders = vec![]; @@ -167,21 +167,50 @@ fn make_offenders( .expect("failed to convert validator id to full identification") }) .collect::>>(); - Ok((id_tuples, offenders)) + Ok(id_tuples) } -benchmarks! { - where_clause { - where +#[cfg(test)] +fn assert_all_slashes_applied(offender_count: usize) +where + T: Config, + ::RuntimeEvent: TryInto>, + ::RuntimeEvent: TryInto>, + ::RuntimeEvent: TryInto, + ::RuntimeEvent: TryInto>, +{ + // make sure that all slashes have been applied + // (n nominators + one validator) * (slashed + unlocked) + deposit to reporter + + // reporter account endowed + some funds rescinded from issuance. + assert_eq!( + System::::read_events_for_pallet::>().len(), + 2 * (offender_count + 1) + 3 + ); + // (n nominators + one validator) * slashed + Slash Reported + assert_eq!( + System::::read_events_for_pallet::>().len(), + 1 * (offender_count + 1) + 1 + ); + // offence + assert_eq!(System::::read_events_for_pallet::().len(), 1); + // reporter new account + assert_eq!(System::::read_events_for_pallet::>().len(), 1); +} + +#[benchmarks( + where ::RuntimeEvent: TryInto>, ::RuntimeEvent: TryInto>, ::RuntimeEvent: TryInto, ::RuntimeEvent: TryInto>, - } - - report_offence_grandpa { - let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); - +)] +mod benchmarks { + use super::*; + + #[benchmark] + pub fn report_offence_grandpa( + n: Linear<0, { MAX_NOMINATORS.min(MaxNominationsOf::::get()) }>, + ) -> Result<(), BenchmarkError> { // for grandpa equivocation reports the number of reporters // and offenders is always 1 let reporters = vec![account("reporter", 1, SEED)]; @@ -189,7 +218,7 @@ benchmarks! { // make sure reporters actually get rewarded Staking::::set_slash_reward_fraction(Perbill::one()); - let (mut offenders, raw_offenders) = make_offenders::(1, n)?; + let mut offenders = make_offenders::(1, n)?; let validator_set_count = Session::::validators().len() as u32; let offence = GrandpaEquivocationOffence { @@ -199,28 +228,24 @@ benchmarks! { offender: T::convert(offenders.pop().unwrap()), }; assert_eq!(System::::event_count(), 0); - }: { - let _ = Offences::::report_offence(reporters, offence); - } - verify { + + #[block] + { + let _ = Offences::::report_offence(reporters, offence); + } + #[cfg(test)] { - // make sure that all slashes have been applied - // (n nominators + one validator) * (slashed + unlocked) + deposit to reporter + reporter - // account endowed + some funds rescinded from issuance. - assert_eq!(System::::read_events_for_pallet::>().len(), 2 * (n + 1) as usize + 3); - // (n nominators + one validator) * slashed + Slash Reported - assert_eq!(System::::read_events_for_pallet::>().len(), 1 * (n + 1) as usize + 1); - // offence - assert_eq!(System::::read_events_for_pallet::().len(), 1); - // reporter new account - assert_eq!(System::::read_events_for_pallet::>().len(), 1); + assert_all_slashes_applied::(n as usize); } - } - report_offence_babe { - let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); + Ok(()) + } + #[benchmark] + fn report_offence_babe( + n: Linear<0, { MAX_NOMINATORS.min(MaxNominationsOf::::get()) }>, + ) -> Result<(), BenchmarkError> { // for babe equivocation reports the number of reporters // and offenders is always 1 let reporters = vec![account("reporter", 1, SEED)]; @@ -228,7 +253,7 @@ benchmarks! { // make sure reporters actually get rewarded Staking::::set_slash_reward_fraction(Perbill::one()); - let (mut offenders, raw_offenders) = make_offenders::(1, n)?; + let mut offenders = make_offenders::(1, n)?; let validator_set_count = Session::::validators().len() as u32; let offence = BabeEquivocationOffence { @@ -238,23 +263,17 @@ benchmarks! { offender: T::convert(offenders.pop().unwrap()), }; assert_eq!(System::::event_count(), 0); - }: { - let _ = Offences::::report_offence(reporters, offence); - } - verify { + + #[block] + { + let _ = Offences::::report_offence(reporters, offence); + } #[cfg(test)] { - // make sure that all slashes have been applied - // (n nominators + one validator) * (slashed + unlocked) + deposit to reporter + reporter - // account endowed + some funds rescinded from issuance. - assert_eq!(System::::read_events_for_pallet::>().len(), 2 * (n + 1) as usize + 3); - // (n nominators + one validator) * slashed + Slash Reported - assert_eq!(System::::read_events_for_pallet::>().len(), 1 * (n + 1) as usize + 1); - // offence - assert_eq!(System::::read_events_for_pallet::().len(), 1); - // reporter new account - assert_eq!(System::::read_events_for_pallet::>().len(), 1); + assert_all_slashes_applied::(n as usize); } + + Ok(()) } impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index efaec49a65b3..c5c178aa4443 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -29,7 +29,7 @@ use frame_system as system; use pallet_session::historical as pallet_session_historical; use sp_runtime::{ testing::{Header, UintAuthorityId}, - BuildStorage, Perbill, + BuildStorage, KeyTypeId, Perbill, }; type AccountId = u64; @@ -66,7 +66,8 @@ sp_runtime::impl_opaque_keys! { pub struct TestSessionHandler; impl pallet_session::SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[]; + // corresponds to the opaque key id above + const KEY_TYPE_IDS: &'static [KeyTypeId] = &[KeyTypeId([100u8, 117u8, 109u8, 121u8])]; fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} From 8f1606e9f9bd6269a4c2631a161dcc73e969a302 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Tue, 3 Dec 2024 12:55:50 +0200 Subject: [PATCH 15/29] Rococo People <> Bulletin bridge fixes (#6708) --- .../chains/chain-polkadot-bulletin/src/lib.rs | 2 +- bridges/relays/utils/src/initialize.rs | 7 ++-- .../src/bridge_to_bulletin_config.rs | 41 ++++--------------- .../src/genesis_config_presets.rs | 10 +++++ .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 1 - .../bridge-hub-rococo/tests/tests.rs | 12 +++--- 6 files changed, 28 insertions(+), 45 deletions(-) diff --git a/bridges/chains/chain-polkadot-bulletin/src/lib.rs b/bridges/chains/chain-polkadot-bulletin/src/lib.rs index c5c18beb2cad..070bc7b0ba3d 100644 --- a/bridges/chains/chain-polkadot-bulletin/src/lib.rs +++ b/bridges/chains/chain-polkadot-bulletin/src/lib.rs @@ -225,4 +225,4 @@ impl ChainWithMessages for PolkadotBulletin { } decl_bridge_finality_runtime_apis!(polkadot_bulletin, grandpa); -decl_bridge_messages_runtime_apis!(polkadot_bulletin, bp_messages::HashedLaneId); +decl_bridge_messages_runtime_apis!(polkadot_bulletin, bp_messages::LegacyLaneId); diff --git a/bridges/relays/utils/src/initialize.rs b/bridges/relays/utils/src/initialize.rs index 564ed1f0e5cc..deb9b9d059d5 100644 --- a/bridges/relays/utils/src/initialize.rs +++ b/bridges/relays/utils/src/initialize.rs @@ -52,9 +52,10 @@ pub fn initialize_logger(with_timestamp: bool) { format, ); - let env_filter = EnvFilter::from_default_env() - .add_directive(Level::WARN.into()) - .add_directive("bridge=info".parse().expect("static filter string is valid")); + let env_filter = EnvFilter::builder() + .with_default_directive(Level::WARN.into()) + .with_default_directive("bridge=info".parse().expect("static filter string is valid")) + .from_env_lossy(); let builder = SubscriberBuilder::default().with_env_filter(env_filter); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs index b284fa9e7af7..1e733503f43b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs @@ -22,14 +22,13 @@ use crate::{ bridge_common_config::RelayersForPermissionlessLanesInstance, weights, xcm_config::UniversalLocation, AccountId, Balance, Balances, BridgeRococoBulletinGrandpa, - BridgeRococoBulletinMessages, PolkadotXcm, Runtime, RuntimeEvent, RuntimeHoldReason, - XcmOverRococoBulletin, XcmRouter, + BridgeRococoBulletinMessages, Runtime, RuntimeEvent, RuntimeHoldReason, XcmOverRococoBulletin, + XcmRouter, }; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, - target_chain::FromBridgedChainMessagesProof, HashedLaneId, + target_chain::FromBridgedChainMessagesProof, LegacyLaneId, }; -use bridge_hub_common::xcm_version::XcmVersionOfDestAndRemoteBridge; use frame_support::{ parameter_types, @@ -46,6 +45,7 @@ use testnet_parachains_constants::rococo::currency::UNITS as ROC; use xcm::{ latest::prelude::*, prelude::{InteriorLocation, NetworkId}, + AlwaysV5, }; use xcm_builder::{BridgeBlobDispatcher, ParentIsPreset, SiblingParachainConvertsVia}; @@ -120,7 +120,7 @@ impl pallet_bridge_messages::Config for Runt type OutboundPayload = XcmAsPlainPayload; type InboundPayload = XcmAsPlainPayload; - type LaneId = HashedLaneId; + type LaneId = LegacyLaneId; type DeliveryPayments = (); type DeliveryConfirmationPayments = (); @@ -139,8 +139,7 @@ impl pallet_xcm_bridge_hub::Config for Runtime type BridgeMessagesPalletInstance = WithRococoBulletinMessagesInstance; type MessageExportPrice = (); - type DestinationVersion = - XcmVersionOfDestAndRemoteBridge; + type DestinationVersion = AlwaysV5; type ForceOrigin = EnsureRoot; // We don't want to allow creating bridges for this instance. @@ -253,7 +252,7 @@ where let universal_source = [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), Parachain(sibling_para_id)].into(); let universal_destination = - [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get()), Parachain(2075)].into(); + [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get())].into(); let bridge_id = BridgeId::new(&universal_source, &universal_destination); // insert only bridge metadata, because the benchmarks create lanes @@ -279,29 +278,3 @@ where universal_source } - -/// Contains the migration for the PeopleRococo<>RococoBulletin bridge. -pub mod migration { - use super::*; - use frame_support::traits::ConstBool; - - parameter_types! { - pub BulletinRococoLocation: InteriorLocation = [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get())].into(); - pub RococoPeopleToRococoBulletinMessagesLane: HashedLaneId = pallet_xcm_bridge_hub::Pallet::< Runtime, XcmOverPolkadotBulletinInstance >::bridge_locations( - PeopleRococoLocation::get(), - BulletinRococoLocation::get() - ) - .unwrap() - .calculate_lane_id(xcm::latest::VERSION).expect("Valid locations"); - } - - /// Ensure that the existing lanes for the People<>Bulletin bridge are correctly configured. - pub type StaticToDynamicLanes = pallet_xcm_bridge_hub::migration::OpenBridgeForLane< - Runtime, - XcmOverPolkadotBulletinInstance, - RococoPeopleToRococoBulletinMessagesLane, - ConstBool, - PeopleRococoLocation, - BulletinRococoLocation, - >; -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs index 98e2450ee832..55fd499c2f54 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs @@ -61,10 +61,20 @@ fn bridge_hub_rococo_genesis( .collect(), }, polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, + bridge_polkadot_bulletin_grandpa: BridgePolkadotBulletinGrandpaConfig { + owner: bridges_pallet_owner.clone(), + }, bridge_westend_grandpa: BridgeWestendGrandpaConfig { owner: bridges_pallet_owner.clone() }, bridge_westend_messages: BridgeWestendMessagesConfig { owner: bridges_pallet_owner.clone(), }, + xcm_over_polkadot_bulletin: XcmOverPolkadotBulletinConfig { + opened_bridges: vec![( + Location::new(1, [Parachain(1004)]), + Junctions::from([GlobalConsensus(NetworkId::PolkadotBulletin).into()]), + Some(bp_messages::LegacyLaneId([0, 0, 0, 0])), + )], + }, xcm_over_bridge_hub_westend: XcmOverBridgeHubWestendConfig { opened_bridges }, ethereum_system: EthereumSystemConfig { para_id: id, asset_hub_para_id }, }) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 598afeddb984..d87ff9b43fef 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -169,7 +169,6 @@ pub type Migrations = ( bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, >, bridge_to_westend_config::migration::StaticToDynamicLanes, - bridge_to_bulletin_config::migration::StaticToDynamicLanes, frame_support::migrations::RemoveStorage< BridgeWestendMessagesPalletName, OutboundLanesCongestedSignalsKey, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 29f9615bff6a..44e69c31a560 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -501,10 +501,10 @@ mod bridge_hub_westend_tests { mod bridge_hub_bulletin_tests { use super::*; - use bp_messages::{HashedLaneId, LaneIdType}; + use bp_messages::LegacyLaneId; use bridge_common_config::BridgeGrandpaRococoBulletinInstance; use bridge_hub_rococo_runtime::{ - bridge_common_config::RelayersForPermissionlessLanesInstance, + bridge_common_config::RelayersForLegacyLaneIdsMessagesInstance, xcm_config::LocationToAccountId, }; use bridge_hub_test_utils::test_cases::from_grandpa_chain; @@ -528,7 +528,7 @@ mod bridge_hub_bulletin_tests { AllPalletsWithoutSystem, BridgeGrandpaRococoBulletinInstance, WithRococoBulletinMessagesInstance, - RelayersForPermissionlessLanesInstance, + RelayersForLegacyLaneIdsMessagesInstance, >; #[test] @@ -599,7 +599,7 @@ mod bridge_hub_bulletin_tests { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverPolkadotBulletinInstance - >(locations, HashedLaneId::try_new(1, 2).unwrap()) + >(locations, LegacyLaneId([0, 0, 0, 0])) } ).1 }, @@ -663,7 +663,7 @@ mod bridge_hub_bulletin_tests { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverPolkadotBulletinInstance, - >(locations, HashedLaneId::try_new(1, 2).unwrap()) + >(locations, LegacyLaneId([0, 0, 0, 0])) }, ) .1 @@ -697,7 +697,7 @@ mod bridge_hub_bulletin_tests { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverPolkadotBulletinInstance, - >(locations, HashedLaneId::try_new(1, 2).unwrap()) + >(locations, LegacyLaneId([0, 0, 0, 0])) }, ) .1 From 592bb3205be7569cf2d705b31a272340038bbed7 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Tue, 3 Dec 2024 13:06:43 +0100 Subject: [PATCH 16/29] [Release/CICD] Re-worked Create Release Draft flow (#6734) This PR contains following changes in release pipelines: - re-built Create Release Draft workflow - binaries builds are moved completely to the `Release - Build node release candidate` flow - added upload of all the release artefacts to the S3 - adjusted `Release - Publish Docker Image` workflow, so that it will match now the new release flow. --- .github/scripts/common/lib.sh | 45 +++- .github/scripts/release/release_lib.sh | 22 ++ ...le.yml => release-10_branchoff-stable.yml} | 0 ...ation.yml => release-11_rc-automation.yml} | 0 ...e-build-rc.yml => release-20_build-rc.yml} | 96 +++++++- .../release-30_publish_release_draft.yml | 206 +++++++++++------- .../workflows/release-50_publish-docker.yml | 97 +++------ .../workflows/release-reusable-rc-buid.yml | 53 ++++- .github/workflows/release-srtool.yml | 18 +- 9 files changed, 373 insertions(+), 164 deletions(-) rename .github/workflows/{release-branchoff-stable.yml => release-10_branchoff-stable.yml} (100%) rename .github/workflows/{release-10_rc-automation.yml => release-11_rc-automation.yml} (100%) rename .github/workflows/{release-build-rc.yml => release-20_build-rc.yml} (62%) diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index 6b8f70a26d7e..41dc0ba06dd2 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -270,20 +270,19 @@ fetch_debian_package_from_s3() { } # Fetch the release artifacts like binary and signatures from S3. Assumes the ENV are set: -# - RELEASE_ID -# - GITHUB_TOKEN -# - REPO in the form paritytech/polkadot +# inputs: binary (polkadot), target(aarch64-apple-darwin) fetch_release_artifacts_from_s3() { BINARY=$1 - OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${BINARY}"} + TARGET=$2 + OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${TARGET}/${BINARY}"} echo "OUTPUT_DIR : $OUTPUT_DIR" URL_BASE=$(get_s3_url_base $BINARY) echo "URL_BASE=$URL_BASE" - URL_BINARY=$URL_BASE/$VERSION/$BINARY - URL_SHA=$URL_BASE/$VERSION/$BINARY.sha256 - URL_ASC=$URL_BASE/$VERSION/$BINARY.asc + URL_BINARY=$URL_BASE/$VERSION/$TARGET/$BINARY + URL_SHA=$URL_BASE/$VERSION/$TARGET/$BINARY.sha256 + URL_ASC=$URL_BASE/$VERSION/$TARGET/$BINARY.asc # Fetch artifacts mkdir -p "$OUTPUT_DIR" @@ -306,15 +305,26 @@ fetch_release_artifacts_from_s3() { function get_s3_url_base() { name=$1 case $name in - polkadot | polkadot-execute-worker | polkadot-prepare-worker | staking-miner) + polkadot | polkadot-execute-worker | polkadot-prepare-worker ) printf "https://releases.parity.io/polkadot" ;; - polkadot-parachain) - printf "https://releases.parity.io/cumulus" + polkadot-parachain) + printf "https://releases.parity.io/polkadot-parachain" + ;; + + polkadot-omni-node) + printf "https://releases.parity.io/polkadot-omni-node" + ;; + + chain-spec-builder) + printf "https://releases.parity.io/chain-spec-builder" ;; - *) + frame-omni-bencher) + printf "https://releases.parity.io/frame-omni-bencher" + ;; + *) printf "UNSUPPORTED BINARY $name" exit 1 ;; @@ -497,3 +507,16 @@ validate_stable_tag() { exit 1 fi } + +# Prepare docker stable tag form the polkadot stable tag +# input: tag (polkaodot-stableYYMM(-X) or polkadot-stableYYMM(-X)-rcX) +# output: stableYYMM(-X) or stableYYMM(-X)-rcX +prepare_docker_stable_tag() { + tag="$1" + if [[ "$tag" =~ stable[0-9]{4}(-[0-9]+)?(-rc[0-9]+)? ]]; then + echo "${BASH_REMATCH[0]}" + else + echo "Tag is invalid: $tag" + exit 1 + fi +} diff --git a/.github/scripts/release/release_lib.sh b/.github/scripts/release/release_lib.sh index 8b9254ec3f29..43227180cb7c 100644 --- a/.github/scripts/release/release_lib.sh +++ b/.github/scripts/release/release_lib.sh @@ -139,3 +139,25 @@ upload_s3_release() { aws s3 ls "s3://releases.parity.io/${product}/${version}/${target}" --recursive --human-readable --summarize echo "✅ The release should be at https://releases.parity.io/${product}/${version}/${target}" } + +# Upload runtimes artifacts to s3 release bucket +# +# input: version (stable release tage.g. polkadot-stable2412 or polkadot-stable2412-rc1) +# output: none +upload_s3_runtimes_release_artifacts() { + alias aws='podman run --rm -it docker.io/paritytech/awscli -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_BUCKET aws' + + version=$1 + + echo "Working on version: $version " + + echo "Current content, should be empty on new uploads:" + aws s3 ls "s3://releases.parity.io/polkadot/runtimes/${version}/" --recursive --human-readable --summarize || true + echo "Content to be uploaded:" + artifacts="artifacts/runtimes/" + ls "$artifacts" + aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/polkadot/runtimes/${version}/" + echo "Uploaded files:" + aws s3 ls "s3://releases.parity.io/polkadot/runtimes/${version}/" --recursive --human-readable --summarize + echo "✅ The release should be at https://releases.parity.io/polkadot/runtimes/${version}" +} diff --git a/.github/workflows/release-branchoff-stable.yml b/.github/workflows/release-10_branchoff-stable.yml similarity index 100% rename from .github/workflows/release-branchoff-stable.yml rename to .github/workflows/release-10_branchoff-stable.yml diff --git a/.github/workflows/release-10_rc-automation.yml b/.github/workflows/release-11_rc-automation.yml similarity index 100% rename from .github/workflows/release-10_rc-automation.yml rename to .github/workflows/release-11_rc-automation.yml diff --git a/.github/workflows/release-build-rc.yml b/.github/workflows/release-20_build-rc.yml similarity index 62% rename from .github/workflows/release-build-rc.yml rename to .github/workflows/release-20_build-rc.yml index a43c2b282a8d..d4c7055c37c5 100644 --- a/.github/workflows/release-build-rc.yml +++ b/.github/workflows/release-20_build-rc.yml @@ -11,10 +11,12 @@ on: - polkadot - polkadot-parachain - polkadot-omni-node + - frame-omni-bencher + - chain-spec-builder - all release_tag: - description: Tag matching the actual release candidate with the format stableYYMM-rcX or stableYYMM + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM(-X) type: string jobs: @@ -106,6 +108,50 @@ jobs: attestations: write contents: read + build-frame-omni-bencher-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["frame-omni-bencher"]' + package: "frame-omni-bencher" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-chain-spec-builder-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["chain-spec-builder"]' + package: staging-chain-spec-builder + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + build-polkadot-macos-binary: needs: [validate-inputs] if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} @@ -134,7 +180,7 @@ jobs: uses: "./.github/workflows/release-reusable-rc-buid.yml" with: binary: '["polkadot-parachain"]' - package: "polkadot-parachain-bin" + package: polkadot-parachain-bin release_tag: ${{ needs.validate-inputs.outputs.release_tag }} target: aarch64-apple-darwin secrets: @@ -156,7 +202,51 @@ jobs: uses: "./.github/workflows/release-reusable-rc-buid.yml" with: binary: '["polkadot-omni-node"]' - package: "polkadot-omni-node" + package: polkadot-omni-node + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-frame-omni-bencher-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["frame-omni-bencher"]' + package: frame-omni-bencher + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-chain-spec-builder-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["chain-spec-builder"]' + package: staging-chain-spec-builder release_tag: ${{ needs.validate-inputs.outputs.release_tag }} target: aarch64-apple-darwin secrets: diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index 4364b4f80457..78ceea91f100 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -1,19 +1,46 @@ name: Release - Publish draft -on: - push: - tags: - # Catches v1.2.3 and v1.2.3-rc1 - - v[0-9]+.[0-9]+.[0-9]+* - # - polkadot-stable[0-9]+* Activate when the release process from release org is setteled +# This workflow runs in paritytech-release and creates full release draft with: +# - release notes +# - info about the runtimes +# - attached artifacts: +# - runtimes +# - binaries +# - signatures +on: workflow_dispatch: inputs: - version: - description: Current release/rc version + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM(-X) + required: true + type: string jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [ check-synchronization ] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + get-rust-versions: + needs: [ validate-inputs ] runs-on: ubuntu-latest outputs: rustc-stable: ${{ steps.get-rust-versions.outputs.stable }} @@ -24,47 +51,28 @@ jobs: echo "stable=$RUST_STABLE_VERSION" >> $GITHUB_OUTPUT build-runtimes: + needs: [ validate-inputs ] uses: "./.github/workflows/release-srtool.yml" with: excluded_runtimes: "asset-hub-rococo bridge-hub-rococo contracts-rococo coretime-rococo people-rococo rococo rococo-parachain substrate-test bp cumulus-test kitchensink minimal-template parachain-template penpal polkadot-test seedling shell frame-try sp solochain-template polkadot-sdk-docs-first" build_opts: "--features on-chain-release-build" - - build-binaries: - runs-on: ubuntu-latest - strategy: - matrix: - # Tuples of [package, binary-name] - binary: [ [frame-omni-bencher, frame-omni-bencher], [staging-chain-spec-builder, chain-spec-builder] ] - steps: - - name: Checkout sources - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 - - - name: Install protobuf-compiler - run: | - sudo apt update - sudo apt install -y protobuf-compiler - - - name: Build ${{ matrix.binary[1] }} binary - run: | - cargo build --locked --profile=production -p ${{ matrix.binary[0] }} --bin ${{ matrix.binary[1] }} - target/production/${{ matrix.binary[1] }} --version - - - name: Upload ${{ matrix.binary[1] }} binary - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 - with: - name: ${{ matrix.binary[1] }} - path: target/production/${{ matrix.binary[1] }} - + profile: production + permissions: + id-token: write + attestations: write + contents: read publish-release-draft: runs-on: ubuntu-latest - needs: [ get-rust-versions, build-runtimes ] + environment: release + needs: [ validate-inputs, get-rust-versions, build-runtimes ] outputs: release_url: ${{ steps.create-release.outputs.html_url }} asset_upload_url: ${{ steps.create-release.outputs.upload_url }} + steps: - name: Checkout - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 @@ -87,20 +95,21 @@ jobs: GLUTTON_WESTEND_DIGEST: ${{ github.workspace}}/glutton-westend-runtime/glutton-westend-srtool-digest.json PEOPLE_WESTEND_DIGEST: ${{ github.workspace}}/people-westend-runtime/people-westend-srtool-digest.json WESTEND_DIGEST: ${{ github.workspace}}/westend-runtime/westend-srtool-digest.json + RELEASE_TAG: ${{ needs.validate-inputs.outputs.release_tag }} shell: bash run: | . ./.github/scripts/common/lib.sh export REF1=$(get_latest_release_tag) - if [[ -z "${{ inputs.version }}" ]]; then + if [[ -z "$RELEASE_TAG" ]]; then export REF2="${{ github.ref_name }}" echo "REF2: ${REF2}" else - export REF2="${{ inputs.version }}" + export REF2="$RELEASE_TAG" echo "REF2: ${REF2}" fi echo "REL_TAG=$REF2" >> $GITHUB_ENV - export VERSION=$(echo "$REF2" | sed -E 's/.*(stable[0-9]+).*$/\1/') + export VERSION=$(echo "$REF2" | sed -E 's/.*(stable[0-9]{4}(-[0-9]+)?).*$/\1/') ./scripts/release/build-changelogs.sh @@ -112,19 +121,29 @@ jobs: scripts/release/context.json **/*-srtool-digest.json + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} + private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} + owner: paritytech + repositories: polkadot-sdk + - name: Create draft release id: create-release - uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4 env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ env.REL_TAG }} - release_name: Polkadot ${{ env.REL_TAG }} - body_path: ${{ github.workspace}}/scripts/release/RELEASE_DRAFT.md - draft: true + GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} + run: | + gh release create ${{ env.REL_TAG }} \ + --repo paritytech/polkadot-sdk \ + --draft \ + --title "Polkadot ${{ env.REL_TAG }}" \ + --notes-file ${{ github.workspace}}/scripts/release/RELEASE_DRAFT.md publish-runtimes: - needs: [ build-runtimes, publish-release-draft ] + needs: [ validate-inputs, build-runtimes, publish-release-draft ] + environment: release continue-on-error: true runs-on: ubuntu-latest strategy: @@ -132,7 +151,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 @@ -144,44 +163,83 @@ jobs: >>$GITHUB_ENV echo ASSET=$(find ${{ matrix.chain }}-runtime -name '*.compact.compressed.wasm') >>$GITHUB_ENV echo SPEC=$(<${JSON} jq -r .runtimes.compact.subwasm.core_version.specVersion) + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} + private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} + owner: paritytech + repositories: polkadot-sdk + - name: Upload compressed ${{ matrix.chain }} v${{ env.SPEC }} wasm - if: ${{ matrix.chain != 'rococo-parachain' }} - uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2 env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.publish-release-draft.outputs.asset_upload_url }} - asset_path: ${{ env.ASSET }} - asset_name: ${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm - asset_content_type: application/wasm + GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} + run: | + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ + --repo paritytech/polkadot-sdk \ + '${{ env.ASSET }}#${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm' - publish-binaries: - needs: [ publish-release-draft, build-binaries ] + publish-release-artifacts: + needs: [ validate-inputs, publish-release-draft ] + environment: release continue-on-error: true runs-on: ubuntu-latest strategy: matrix: - binary: [frame-omni-bencher, chain-spec-builder] + binary: [ polkadot, polkadot-execute-worker, polkadot-prepare-worker, polkadot-parachain, polkadot-omni-node, frame-omni-bencher, chain-spec-builder ] + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] steps: - - name: Download artifacts - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Fetch binaries from s3 based on version + run: | + . ./.github/scripts/common/lib.sh + + VERSION="${{ needs.validate-inputs.outputs.release_tag }}" + fetch_release_artifacts_from_s3 ${{ matrix.binary }} ${{ matrix.target }} + + - name: Rename aarch64-apple-darwin binaries + if: ${{ matrix.target == 'aarch64-apple-darwin' }} + working-directory: ${{ github.workspace}}/release-artifacts/${{ matrix.target }}/${{ matrix.binary }} + run: | + mv ${{ matrix.binary }} ${{ matrix.binary }}-aarch64-apple-darwin + mv ${{ matrix.binary }}.asc ${{ matrix.binary }}-aarch64-apple-darwin.asc + mv ${{ matrix.binary }}.sha256 ${{ matrix.binary }}-aarch64-apple-darwin.sha256 + + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 with: - name: ${{ matrix.binary }} + app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} + private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} + owner: paritytech + repositories: polkadot-sdk - - name: Upload ${{ matrix.binary }} binary - uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2 + - name: Upload ${{ matrix.binary }} binary to release draft env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.publish-release-draft.outputs.asset_upload_url }} - asset_path: ${{ github.workspace}}/${{ matrix.binary }} - asset_name: ${{ matrix.binary }} - asset_content_type: application/octet-stream + GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} + working-directory: ${{ github.workspace}}/release-artifacts/${{ matrix.target }}/${{ matrix.binary }} + run: | + if [[ ${{ matrix.target }} == "aarch64-apple-darwin" ]]; then + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ + --repo paritytech/polkadot-sdk \ + ${{ matrix.binary }}-aarch64-apple-darwin \ + ${{ matrix.binary }}-aarch64-apple-darwin.asc \ + ${{ matrix.binary }}-aarch64-apple-darwin.sha256 + else + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ + --repo paritytech/polkadot-sdk \ + ${{ matrix.binary }} \ + ${{ matrix.binary }}.asc \ + ${{ matrix.binary }}.sha256 + fi post_to_matrix: runs-on: ubuntu-latest - needs: publish-release-draft + needs: [ validate-inputs, publish-release-draft ] environment: release strategy: matrix: @@ -197,5 +255,5 @@ jobs: access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} server: m.parity.io message: | - **New version of polkadot tagged**: ${{ github.ref_name }}
- Draft release created: ${{ needs.publish-release-draft.outputs.release_url }} + **New version of polkadot tagged**: ${{ needs.validate-inputs.outputs.release_tag }}
+ And release draft is release created in [polkadot-sdk repo](https://github.com/paritytech/polkadot-sdk/releases) diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 627e53bacd88..5c3c3a6e854d 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -4,10 +4,6 @@ name: Release - Publish Docker Image # It builds and published releases and rc candidates. on: - #TODO: activate automated run later - # release: - # types: - # - published workflow_dispatch: inputs: image_type: @@ -30,16 +26,6 @@ on: - polkadot-parachain - chain-spec-builder - release_id: - description: | - Release ID. - You can find it using the command: - curl -s \ - -H "Authorization: Bearer ${GITHUB_TOKEN}" https://api.github.com/repos/$OWNER/$REPO/releases | \ - jq '.[] | { name: .name, id: .id }' - required: true - type: number - registry: description: Container registry required: true @@ -55,7 +41,7 @@ on: default: parity version: - description: version to build/release + description: Version of the polkadot node release in format v1.16.0 or v1.16.0-rc1 default: v0.9.18 required: true @@ -78,11 +64,15 @@ env: IMAGE_TYPE: ${{ inputs.image_type }} jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + validate-inputs: + needs: [check-synchronization] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' runs-on: ubuntu-latest outputs: version: ${{ steps.validate_inputs.outputs.VERSION }} - release_id: ${{ steps.validate_inputs.outputs.RELEASE_ID }} stable_tag: ${{ steps.validate_inputs.outputs.stable_tag }} steps: @@ -97,11 +87,6 @@ jobs: VERSION=$(filter_version_from_input "${{ inputs.version }}") echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT - RELEASE_ID=$(check_release_id "${{ inputs.release_id }}") - echo "RELEASE_ID=${RELEASE_ID}" >> $GITHUB_OUTPUT - - echo "Release ID: $RELEASE_ID" - STABLE_TAG=$(validate_stable_tag ${{ inputs.stable_tag }}) echo "stable_tag=${STABLE_TAG}" >> $GITHUB_OUTPUT @@ -114,50 +99,26 @@ jobs: - name: Checkout sources uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - #TODO: this step will be needed when automated triggering will work - #this step runs only if the workflow is triggered automatically when new release is published - # if: ${{ env.EVENT_NAME == 'release' && env.EVENT_ACTION != '' && env.EVENT_ACTION == 'published' }} - # run: | - # mkdir -p release-artifacts && cd release-artifacts - - # for f in $BINARY $BINARY.asc $BINARY.sha256; do - # URL="https://github.com/${{ github.event.repository.full_name }}/releases/download/${{ github.event.release.tag_name }}/$f" - # echo " - Fetching $f from $URL" - # wget "$URL" -O "$f" - # done - # chmod a+x $BINARY - # ls -al - - name: Fetch rc artifacts or release artifacts from s3 based on version - #this step runs only if the workflow is triggered manually - if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary != 'polkadot-omni-node' && inputs.binary != 'chain-spec-builder'}} + # if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary != 'polkadot-omni-node' && inputs.binary != 'chain-spec-builder'}} run: | . ./.github/scripts/common/lib.sh - VERSION="${{ needs.validate-inputs.outputs.VERSION }}" + VERSION="${{ needs.validate-inputs.outputs.stable_tag }}" if [[ ${{ inputs.binary }} == 'polkadot' ]]; then bins=(polkadot polkadot-prepare-worker polkadot-execute-worker) for bin in "${bins[@]}"; do - fetch_release_artifacts_from_s3 $bin + fetch_release_artifacts_from_s3 $bin x86_64-unknown-linux-gnu done else - fetch_release_artifacts_from_s3 $BINARY + fetch_release_artifacts_from_s3 $BINARY x86_64-unknown-linux-gnu fi - - name: Fetch polkadot-omni-node/chain-spec-builder rc artifacts or release artifacts based on release id - #this step runs only if the workflow is triggered manually and only for chain-spec-builder - if: ${{ env.EVENT_NAME == 'workflow_dispatch' && (inputs.binary == 'polkadot-omni-node' || inputs.binary == 'chain-spec-builder') }} - run: | - . ./.github/scripts/common/lib.sh - - RELEASE_ID="${{ needs.validate-inputs.outputs.RELEASE_ID }}" - fetch_release_artifacts - - name: Upload artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: release-artifacts - path: release-artifacts/${{ env.BINARY }}/**/* + path: release-artifacts/x86_64-unknown-linux-gnu/${{ env.BINARY }}/**/* build-container: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'polkadot-parachain' || inputs.binary == 'chain-spec-builder' || inputs.image_type == 'rc' }} @@ -173,7 +134,7 @@ jobs: uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - name: Check sha256 ${{ env.BINARY }} - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + # if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} working-directory: release-artifacts run: | . ../.github/scripts/common/lib.sh @@ -182,7 +143,7 @@ jobs: check_sha256 $BINARY && echo "OK" || echo "ERR" - name: Check GPG ${{ env.BINARY }} - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + # if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} working-directory: release-artifacts run: | . ../.github/scripts/common/lib.sh @@ -190,35 +151,29 @@ jobs: check_gpg $BINARY - name: Fetch rc commit and tag + working-directory: release-artifacts if: ${{ env.IMAGE_TYPE == 'rc' }} id: fetch_rc_refs + shell: bash run: | - . ./.github/scripts/common/lib.sh - - echo "release=${{ needs.validate-inputs.outputs.stable_tag }}" >> $GITHUB_OUTPUT + . ../.github/scripts/common/lib.sh commit=$(git rev-parse --short HEAD) && \ echo "commit=${commit}" >> $GITHUB_OUTPUT - - echo "tag=${{ needs.validate-inputs.outputs.version }}" >> $GITHUB_OUTPUT + echo "release=$(echo ${{ needs.validate-inputs.outputs.version }})" >> $GITHUB_OUTPUT + echo "tag=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - name: Fetch release tags working-directory: release-artifacts if: ${{ env.IMAGE_TYPE == 'release'}} id: fetch_release_refs + shell: bash run: | - chmod a+rx $BINARY - - if [[ $BINARY != 'chain-spec-builder' ]]; then - VERSION=$(./$BINARY --version | awk '{ print $2 }' ) - release=$( echo $VERSION | cut -f1 -d- ) - else - release=$(echo ${{ needs.validate-inputs.outputs.VERSION }} | sed 's/^v//') - fi + . ../.github/scripts/common/lib.sh echo "tag=latest" >> $GITHUB_OUTPUT - echo "release=${release}" >> $GITHUB_OUTPUT - echo "stable=${{ needs.validate-inputs.outputs.stable_tag }}" >> $GITHUB_OUTPUT + echo "release=$(echo ${{ needs.validate-inputs.outputs.version }})" >> $GITHUB_OUTPUT + echo "stable=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - name: Build Injected Container image for polkadot rc if: ${{ env.BINARY == 'polkadot' }} @@ -342,8 +297,10 @@ jobs: - name: Fetch values id: fetch-data run: | + . ./.github/scripts/common/lib.sh date=$(date -u '+%Y-%m-%dT%H:%M:%SZ') echo "date=$date" >> $GITHUB_OUTPUT + echo "stable=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - name: Build and push id: docker_build @@ -354,9 +311,9 @@ jobs: # TODO: The owner should be used below but buildx does not resolve the VARs # TODO: It would be good to get rid of this GHA that we don't really need. tags: | - parity/polkadot:${{ needs.validate-inputs.outputs.stable_tag }} - parity/polkadot:latest - parity/polkadot:${{ needs.fetch-latest-debian-package-version.outputs.polkadot_container_tag }} + egorpop/polkadot:${{ steps.fetch-data.outputs.stable }} + egorpop/polkadot:latest + egorpop/polkadot:${{ needs.fetch-latest-debian-package-version.outputs.polkadot_container_tag }} build-args: | VCS_REF=${{ github.ref }} POLKADOT_VERSION=${{ needs.fetch-latest-debian-package-version.outputs.polkadot_apt_version }} diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml index f5240878cba2..dc1b4553eb9b 100644 --- a/.github/workflows/release-reusable-rc-buid.yml +++ b/.github/workflows/release-reusable-rc-buid.yml @@ -302,7 +302,6 @@ jobs: AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} - upload-polkadot-parachain-artifacts-to-s3: if: ${{ inputs.package == 'polkadot-parachain-bin' && inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [build-rc] @@ -329,6 +328,32 @@ jobs: AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + upload-frame-omni-bencher-artifacts-to-s3: + if: ${{ inputs.package == 'frame-omni-bencher' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-chain-spec-builder-artifacts-to-s3: + if: ${{ inputs.package == 'staging-chain-spec-builder' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: chain-spec-builder + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + upload-polkadot-macos-artifacts-to-s3: if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} # TODO: add and use a `build-polkadot-homebrew-package` which packs all `polkadot` binaries: @@ -395,3 +420,29 @@ jobs: AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-frame-omni-bencher-macos-artifacts-to-s3: + if: ${{ inputs.package == 'frame-omni-bencher' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-chain-spec-builder-macos-artifacts-to-s3: + if: ${{ inputs.package == 'staging-chain-spec-builder' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: chain-spec-builder + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/release-srtool.yml b/.github/workflows/release-srtool.yml index 9a29b46d2fc3..fc10496d481b 100644 --- a/.github/workflows/release-srtool.yml +++ b/.github/workflows/release-srtool.yml @@ -1,7 +1,7 @@ name: Srtool build env: - SUBWASM_VERSION: 0.20.0 + SUBWASM_VERSION: 0.21.0 TOML_CLI_VERSION: 0.2.4 on: @@ -11,14 +11,16 @@ on: type: string build_opts: type: string + profile: + type: string outputs: published_runtimes: value: ${{ jobs.find-runtimes.outputs.runtime }} - schedule: - - cron: "00 02 * * 1" # 2AM weekly on monday - - workflow_dispatch: +permissions: + id-token: write + attestations: write + contents: read jobs: find-runtimes: @@ -75,6 +77,7 @@ jobs: with: chain: ${{ matrix.chain }} runtime_dir: ${{ matrix.runtime_dir }} + profile: ${{ inputs.profile }} - name: Summary run: | @@ -83,6 +86,11 @@ jobs: echo "Compact Runtime: ${{ steps.srtool_build.outputs.wasm }}" echo "Compressed Runtime: ${{ steps.srtool_build.outputs.wasm_compressed }}" + - name: Generate artifact attestation + uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + with: + subject-path: ${{ steps.srtool_build.outputs.wasm }} + # We now get extra information thanks to subwasm - name: Install subwasm run: | From 76a292b23bf6f35156fd3dd832e9c4ec31b24b2c Mon Sep 17 00:00:00 2001 From: Lulu Date: Tue, 3 Dec 2024 13:22:45 +0100 Subject: [PATCH 17/29] Update parity-publish (#6549) --- .github/workflows/check-semver.yml | 4 +- .github/workflows/publish-check-crates.yml | 2 +- .github/workflows/publish-claim-crates.yml | 2 +- .../snowbridge/runtime/test-common/Cargo.toml | 2 + cumulus/client/cli/Cargo.toml | 2 + cumulus/client/collator/Cargo.toml | 2 + cumulus/client/consensus/aura/Cargo.toml | 2 + cumulus/client/consensus/common/Cargo.toml | 2 + cumulus/client/consensus/proposer/Cargo.toml | 2 + .../client/consensus/relay-chain/Cargo.toml | 2 + cumulus/client/network/Cargo.toml | 2 + cumulus/client/parachain-inherent/Cargo.toml | 2 + cumulus/client/pov-recovery/Cargo.toml | 2 + .../Cargo.toml | 2 + .../client/relay-chain-interface/Cargo.toml | 2 + .../relay-chain-minimal-node/Cargo.toml | 2 + .../relay-chain-rpc-interface/Cargo.toml | 2 + cumulus/client/service/Cargo.toml | 2 + cumulus/pallets/aura-ext/Cargo.toml | 2 + cumulus/pallets/parachain-system/Cargo.toml | 2 + .../parachain-system/proc-macro/Cargo.toml | 2 + cumulus/pallets/solo-to-para/Cargo.toml | 2 + cumulus/pallets/xcm/Cargo.toml | 2 + cumulus/pallets/xcmp-queue/Cargo.toml | 2 + cumulus/parachains/common/Cargo.toml | 2 + .../emulated/common/Cargo.toml | 2 + .../pallets/collective-content/Cargo.toml | 2 + .../pallets/parachain-info/Cargo.toml | 2 + cumulus/parachains/pallets/ping/Cargo.toml | 2 + .../assets/asset-hub-rococo/Cargo.toml | 2 + .../assets/asset-hub-westend/Cargo.toml | 2 + .../runtimes/assets/common/Cargo.toml | 2 + .../runtimes/assets/test-utils/Cargo.toml | 2 + .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 2 + .../bridge-hubs/bridge-hub-westend/Cargo.toml | 2 + .../runtimes/bridge-hubs/common/Cargo.toml | 2 + .../bridge-hubs/test-utils/Cargo.toml | 2 + .../collectives-westend/Cargo.toml | 2 + .../parachains/runtimes/constants/Cargo.toml | 2 + .../contracts/contracts-rococo/Cargo.toml | 2 + .../coretime/coretime-rococo/Cargo.toml | 2 + .../coretime/coretime-westend/Cargo.toml | 2 + .../glutton/glutton-westend/Cargo.toml | 2 + .../runtimes/people/people-rococo/Cargo.toml | 2 + .../runtimes/people/people-westend/Cargo.toml | 2 + .../parachains/runtimes/test-utils/Cargo.toml | 2 + .../testing/rococo-parachain/Cargo.toml | 2 + cumulus/polkadot-omni-node/Cargo.toml | 2 + cumulus/polkadot-omni-node/lib/Cargo.toml | 2 + cumulus/polkadot-parachain/Cargo.toml | 2 + cumulus/primitives/aura/Cargo.toml | 2 + cumulus/primitives/core/Cargo.toml | 2 + .../primitives/parachain-inherent/Cargo.toml | 2 + .../proof-size-hostfunction/Cargo.toml | 2 + .../storage-weight-reclaim/Cargo.toml | 2 + cumulus/primitives/timestamp/Cargo.toml | 2 + cumulus/primitives/utility/Cargo.toml | 2 + cumulus/test/relay-sproof-builder/Cargo.toml | 2 + cumulus/xcm/xcm-emulator/Cargo.toml | 2 + polkadot/Cargo.toml | 2 + polkadot/cli/Cargo.toml | 2 + polkadot/core-primitives/Cargo.toml | 2 + polkadot/erasure-coding/Cargo.toml | 2 + polkadot/node/collation-generation/Cargo.toml | 2 + .../core/approval-voting-parallel/Cargo.toml | 2 + polkadot/node/core/approval-voting/Cargo.toml | 2 + polkadot/node/core/av-store/Cargo.toml | 2 + polkadot/node/core/backing/Cargo.toml | 2 + .../node/core/bitfield-signing/Cargo.toml | 2 + .../node/core/candidate-validation/Cargo.toml | 2 + polkadot/node/core/chain-api/Cargo.toml | 2 + polkadot/node/core/chain-selection/Cargo.toml | 2 + .../node/core/dispute-coordinator/Cargo.toml | 2 + .../node/core/parachains-inherent/Cargo.toml | 2 + .../core/prospective-parachains/Cargo.toml | 2 + polkadot/node/core/provisioner/Cargo.toml | 2 + polkadot/node/core/pvf-checker/Cargo.toml | 2 + polkadot/node/core/pvf/Cargo.toml | 2 + polkadot/node/core/pvf/common/Cargo.toml | 2 + .../node/core/pvf/execute-worker/Cargo.toml | 2 + .../node/core/pvf/prepare-worker/Cargo.toml | 2 + polkadot/node/core/runtime-api/Cargo.toml | 2 + polkadot/node/gum/Cargo.toml | 2 + polkadot/node/gum/proc-macro/Cargo.toml | 2 + polkadot/node/metrics/Cargo.toml | 2 + .../network/approval-distribution/Cargo.toml | 2 + .../availability-distribution/Cargo.toml | 2 + .../network/availability-recovery/Cargo.toml | 2 + .../network/bitfield-distribution/Cargo.toml | 2 + polkadot/node/network/bridge/Cargo.toml | 2 + .../node/network/collator-protocol/Cargo.toml | 2 + .../network/dispute-distribution/Cargo.toml | 2 + .../node/network/gossip-support/Cargo.toml | 2 + polkadot/node/network/protocol/Cargo.toml | 2 + .../network/statement-distribution/Cargo.toml | 2 + polkadot/node/overseer/Cargo.toml | 2 + polkadot/node/primitives/Cargo.toml | 2 + polkadot/node/service/Cargo.toml | 2 + polkadot/node/subsystem-types/Cargo.toml | 2 + polkadot/node/subsystem-util/Cargo.toml | 2 + polkadot/node/subsystem/Cargo.toml | 2 + polkadot/node/tracking-allocator/Cargo.toml | 2 + polkadot/parachain/Cargo.toml | 2 + polkadot/primitives/Cargo.toml | 2 + polkadot/rpc/Cargo.toml | 2 + polkadot/runtime/common/Cargo.toml | 2 + .../common/slot_range_helper/Cargo.toml | 2 + polkadot/runtime/metrics/Cargo.toml | 2 + polkadot/runtime/parachains/Cargo.toml | 2 + polkadot/runtime/rococo/Cargo.toml | 2 + polkadot/runtime/rococo/constants/Cargo.toml | 2 + polkadot/runtime/westend/Cargo.toml | 2 + polkadot/runtime/westend/constants/Cargo.toml | 2 + polkadot/statement-table/Cargo.toml | 2 + polkadot/utils/generate-bags/Cargo.toml | 2 + polkadot/xcm/Cargo.toml | 2 + polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml | 2 + polkadot/xcm/pallet-xcm/Cargo.toml | 2 + polkadot/xcm/procedural/Cargo.toml | 2 + polkadot/xcm/xcm-builder/Cargo.toml | 2 + polkadot/xcm/xcm-executor/Cargo.toml | 2 + polkadot/xcm/xcm-simulator/Cargo.toml | 2 + polkadot/xcm/xcm-simulator/example/Cargo.toml | 2 + prdoc/pr_6549.prdoc | 247 ++++++++++++++++++ scripts/generate-umbrella.py | 2 + substrate/frame/revive/fixtures/Cargo.toml | 2 + umbrella/Cargo.toml | 6 + 127 files changed, 501 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_6549.prdoc diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index 8d77b6a31b75..e9bedd16e6d1 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -11,7 +11,7 @@ concurrency: cancel-in-progress: true env: - TOOLCHAIN: nightly-2024-10-19 + TOOLCHAIN: nightly-2024-11-19 jobs: preflight: @@ -74,7 +74,7 @@ jobs: - name: install parity-publish # Set the target dir to cache the build. - run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.10.1 --locked -q + run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.10.2 --locked -q - name: check semver run: | diff --git a/.github/workflows/publish-check-crates.yml b/.github/workflows/publish-check-crates.yml index 3fad3b641474..1e5a8054e2c7 100644 --- a/.github/workflows/publish-check-crates.yml +++ b/.github/workflows/publish-check-crates.yml @@ -24,7 +24,7 @@ jobs: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish@0.8.0 --locked -q + run: cargo install parity-publish@0.10.2 --locked -q - name: parity-publish check run: parity-publish --color always check --allow-unpublished diff --git a/.github/workflows/publish-claim-crates.yml b/.github/workflows/publish-claim-crates.yml index 37bf06bb82d8..845b57a61b96 100644 --- a/.github/workflows/publish-claim-crates.yml +++ b/.github/workflows/publish-claim-crates.yml @@ -18,7 +18,7 @@ jobs: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish@0.8.0 --locked -q + run: cargo install parity-publish@0.10.2 --locked -q - name: parity-publish claim env: diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 6f8e586bf5ff..9f47f158ed4a 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -6,6 +6,8 @@ authors = ["Snowfork "] edition.workspace = true license = "Apache-2.0" categories = ["cryptography::cryptocurrencies"] +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml index 9b6f6b73960b..198f9428f1dd 100644 --- a/cumulus/client/cli/Cargo.toml +++ b/cumulus/client/cli/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Parachain node CLI utilities." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index 6ebde0c2c653..83a3f2661e7a 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Common node-side functionality and glue code to collate parachain blocks." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 0bb2de6bb9b8..6e0c124591cb 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 4bc2f1d1e600..0f532a2101c4 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index bb760ae03f4d..e391481bc445 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/consensus/relay-chain/Cargo.toml b/cumulus/client/consensus/relay-chain/Cargo.toml index f3ee6fc2f7d2..7f0f4333c961 100644 --- a/cumulus/client/consensus/relay-chain/Cargo.toml +++ b/cumulus/client/consensus/relay-chain/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index bc67678eedeb..b78df8d73eae 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true description = "Cumulus-specific networking protocol" edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/parachain-inherent/Cargo.toml b/cumulus/client/parachain-inherent/Cargo.toml index 0d82cf648743..4f53e2bc1bc2 100644 --- a/cumulus/client/parachain-inherent/Cargo.toml +++ b/cumulus/client/parachain-inherent/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Inherent that needs to be present in every parachain block. Contains messages and a relay chain storage-proof." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [dependencies] async-trait = { workspace = true } diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index 3127dd26fcaa..762837e0bb11 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true description = "Parachain PoV recovery" edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index 6f1b74191be7..9e6e8da929bb 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" edition.workspace = true description = "Implementation of the RelayChainInterface trait for Polkadot full-nodes." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index a496fab050dd..2b9e72bbeca6 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" edition.workspace = true description = "Common interface for different relay chain datasources." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 95ecadc8bd06..0fad188bb1ab 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" edition.workspace = true description = "Minimal node implementation to be used in tandem with RPC or light-client mode." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index fb4cb4ceed4e..162f5ad0e9e8 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" edition.workspace = true description = "Implementation of the RelayChainInterface trait that connects to a remote RPC-node." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index 0a77b465d96a..193283648f19 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Common functions used to assemble the components of a parachain node." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index c08148928b7c..fcda79f1d5c1 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "AURA consensus extension pallet for parachains" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 3cb0394c4b95..05498a474e42 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Base pallet for cumulus-based parachains" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index da6f0fd03efb..629818f9c4cc 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Proc macros provided by the parachain-system pallet" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index 5fd1939e93a0..2088361bf11a 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Adds functionality to migrate from a Solo to a Parachain" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index 35d7a083b061..ff9be866d48f 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -5,6 +5,8 @@ name = "cumulus-pallet-xcm" version = "0.7.0" license = "Apache-2.0" description = "Pallet for stuff specific to parachains' usage of XCM" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index 9c7470eda6da..af70a3169d8e 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Pallet to queue outbound and inbound XCMP messages." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index 6d436bdf799a..641693a6a01b 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Logic which is common to all parachain runtimes" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index 23edaf6bfe65..8282d12d317f 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Common resources for integration testing with xcm-emulator" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index c52021f67e36..09301bd738f3 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -5,6 +5,8 @@ authors = ["Parity Technologies "] edition.workspace = true description = "Managed content" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index e0bed23c4f8c..604441c65f29 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -5,6 +5,8 @@ name = "staging-parachain-info" version = "0.7.0" license = "Apache-2.0" description = "Pallet to store the parachain ID" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index 51fc384a4f14..ceb38f39fd80 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -5,6 +5,8 @@ name = "cumulus-ping" version = "0.7.0" license = "Apache-2.0" description = "Ping Pallet for Cumulus XCM/UMP testing." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index bfe8ed869758..949640dd4be6 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo variant of Asset Hub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index a3eaebb59153..8e47146a06c3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Westend variant of Asset Hub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index fb66f0de2322..fa9efbca7a39 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Assets common utilities" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index f6b3c13e8102..393d06f95b15 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Test utils for Asset Hub runtimes." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 3eb06e3a18c1..a7710783a1e0 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo's BridgeHub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 871bf44ec5b2..91900c830ba6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Westend's BridgeHub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml index 9cb24a2b2820..76a89bcb2e72 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Bridge hub common utilities" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [dependencies] codec = { features = ["derive"], workspace = true } diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index 915b3090092f..16fef951f328 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Utils for BridgeHub testing" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index 810abcf572d4..dc4b73db69e3 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Westend Collectives Parachain Runtime" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/constants/Cargo.toml b/cumulus/parachains/runtimes/constants/Cargo.toml index d54f1e7db6c1..01b023e0fb89 100644 --- a/cumulus/parachains/runtimes/constants/Cargo.toml +++ b/cumulus/parachains/runtimes/constants/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Common constants for Testnet Parachains runtimes" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index c98ca7ba3e74..1aeff5eb2e48 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -5,6 +5,8 @@ description = "Parachain testnet runtime for FRAME Contracts pallet." authors.workspace = true edition.workspace = true license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index 02807827cf92..ab621134b252 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo's Coretime parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 34353d312b1f..44dfbf93c30e 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Westend's Coretime parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index 09b4ef679d24..9bbdb8d2ee08 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Glutton parachain runtime." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index a55143b62071..893133bf3c1a 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo's People parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index 4d66332e96dd..66b324b51af4 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Westend's People parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index e9d666617ee2..17c81ae4921a 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Utils for Runtimes testing" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index b0581c8d43ff..4713f4398eaa 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Simple runtime used by the rococo parachain(s)" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/polkadot-omni-node/Cargo.toml b/cumulus/polkadot-omni-node/Cargo.toml index a736e1ef80c5..8b46bc882868 100644 --- a/cumulus/polkadot-omni-node/Cargo.toml +++ b/cumulus/polkadot-omni-node/Cargo.toml @@ -6,6 +6,8 @@ edition.workspace = true build = "build.rs" description = "Generic binary that can run a parachain node with u32 block number and Aura consensus" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/polkadot-omni-node/lib/Cargo.toml b/cumulus/polkadot-omni-node/lib/Cargo.toml index a690229f1695..cca4ac3b2b69 100644 --- a/cumulus/polkadot-omni-node/lib/Cargo.toml +++ b/cumulus/polkadot-omni-node/lib/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Helper library that can be used to build a parachain node" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 5520126d0742..f5ce040bb530 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -6,6 +6,8 @@ edition.workspace = true build = "build.rs" description = "Runs a polkadot parachain node" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index 185b2d40833f..715ce3e1a03e 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Core primitives for Aura in Cumulus" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 533d368d3b00..b5bfe4fbc889 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Cumulus related core primitive types and traits" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index a4271d3fd9cc..2ff990b8d514 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Inherent that needs to be present in every parachain block. Contains messages and a relay chain storage-proof." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/proof-size-hostfunction/Cargo.toml b/cumulus/primitives/proof-size-hostfunction/Cargo.toml index e61c865d05fb..6e8168091892 100644 --- a/cumulus/primitives/proof-size-hostfunction/Cargo.toml +++ b/cumulus/primitives/proof-size-hostfunction/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Hostfunction exposing storage proof size to the runtime." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index e1ae6743335a..3c358bc25edb 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Utilities to reclaim storage weight." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/timestamp/Cargo.toml b/cumulus/primitives/timestamp/Cargo.toml index cb328e2f2cc6..70cb3e607b98 100644 --- a/cumulus/primitives/timestamp/Cargo.toml +++ b/cumulus/primitives/timestamp/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Provides timestamp related functionality for parachains." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 2ca8b82001d5..1444571edbe0 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Helper datatypes for Cumulus" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/test/relay-sproof-builder/Cargo.toml b/cumulus/test/relay-sproof-builder/Cargo.toml index e266b5807081..c1efa141a45d 100644 --- a/cumulus/test/relay-sproof-builder/Cargo.toml +++ b/cumulus/test/relay-sproof-builder/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Mocked relay state proof builder for testing Cumulus." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/xcm/xcm-emulator/Cargo.toml b/cumulus/xcm/xcm-emulator/Cargo.toml index 8598481fae76..d0c637d64d01 100644 --- a/cumulus/xcm/xcm-emulator/Cargo.toml +++ b/cumulus/xcm/xcm-emulator/Cargo.toml @@ -5,6 +5,8 @@ version = "0.5.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index 3a939464868f..101caac0e313 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -20,6 +20,8 @@ authors.workspace = true edition.workspace = true version = "6.0.0" default-run = "polkadot" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index da37f6062c57..3eff525b7b1e 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index 42ca27953738..33869f216f78 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -5,6 +5,8 @@ description = "Core Polkadot types used by Relay Chains and parachains." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index 969742c5bb0a..528b955c4db3 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -5,6 +5,8 @@ description = "Erasure coding used for Polkadot's availability system" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index 777458673f5b..c1716e2e6eb8 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Collator-side subsystem that handles incoming candidate submissions from the parachain." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/approval-voting-parallel/Cargo.toml b/polkadot/node/core/approval-voting-parallel/Cargo.toml index 3a98cce80e92..995687fb4c11 100644 --- a/polkadot/node/core/approval-voting-parallel/Cargo.toml +++ b/polkadot/node/core/approval-voting-parallel/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Approval Voting Subsystem running approval work in parallel" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index f9754d2babc9..80f5dcb7f318 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Approval Voting Subsystem of the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index 1d14e4cfba37..9f6864269cef 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index cd1acf9daa93..a81fe9486c63 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "The Candidate Backing Subsystem. Tracks parachain candidates that can be backed, as well as the issuance of statements about candidates." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index 126a18a14166..f00ba5712661 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Bitfield signing subsystem for the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml index 87855dbce415..fea16b1c7604 100644 --- a/polkadot/node/core/candidate-validation/Cargo.toml +++ b/polkadot/node/core/candidate-validation/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index a8e911e0c5c9..0f443868dada 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "The Chain API subsystem provides access to chain related utility functions like block number to hash conversions." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index 755d5cadeaaf..d2cc425a4816 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index 344b66af1933..11b4ac645c23 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -5,6 +5,8 @@ description = "The node-side components that participate in disputes" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index 1e4953f40d0b..b1cd5e971b00 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Parachains inherent data provider for Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 5629e4ef7fbe..ced6c30c64b6 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "The Prospective Parachains subsystem. Tracks and handles prospective parachain fragments." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 64a598b420f7..26dca1adbc79 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -5,6 +5,8 @@ description = "Responsible for assembling a relay chain block from a set of avai authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml index 73ef17a2843a..cb7e3eadcf0a 100644 --- a/polkadot/node/core/pvf-checker/Cargo.toml +++ b/polkadot/node/core/pvf-checker/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 37d5878ea597..1b2a16ae8b55 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 903c8dd1af29..d058d582fc26 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index 6ad340d25612..8327cf8058cd 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index 56235bd82192..9dc800a8ef56 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml index 834e4b300b9e..15cbf4665d06 100644 --- a/polkadot/node/core/runtime-api/Cargo.toml +++ b/polkadot/node/core/runtime-api/Cargo.toml @@ -5,6 +5,8 @@ description = "Wrapper around the parachain-related runtime APIs" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/gum/Cargo.toml b/polkadot/node/gum/Cargo.toml index 9b2df435a06a..84875ea121b6 100644 --- a/polkadot/node/gum/Cargo.toml +++ b/polkadot/node/gum/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Stick logs together with the TraceID as provided by tempo" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index da6364977cae..b4a3401b15e4 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Generate an overseer including builder pattern and message wrapper from a single annotated struct definition." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index 41b08b66e9b4..05344993a75e 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml index 8d674a733470..abf345552f89 100644 --- a/polkadot/node/network/approval-distribution/Cargo.toml +++ b/polkadot/node/network/approval-distribution/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Approval Distribution subsystem for the distribution of authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 8c5574f244e4..e87103d99f72 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 41f09b1f7044..be4323e74f02 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/bitfield-distribution/Cargo.toml b/polkadot/node/network/bitfield-distribution/Cargo.toml index 6d007255c574..2ff30489b6c1 100644 --- a/polkadot/node/network/bitfield-distribution/Cargo.toml +++ b/polkadot/node/network/bitfield-distribution/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Bitfiled Distribution subsystem, which gossips signed av authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index b4b5743853cd..c4b46c1dc001 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -5,6 +5,8 @@ description = "The Network Bridge Subsystem — protocol multiplexer for Polkado authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index 304cb23bb6aa..a51d24c70807 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Collator Protocol subsystem. Allows collators and valida authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index b4dcafe09eb6..4f2f9ccadf8b 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Dispute Distribution subsystem, which ensures all concer authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml index c8c19e5de070..7d17ea45eab9 100644 --- a/polkadot/node/network/gossip-support/Cargo.toml +++ b/polkadot/node/network/gossip-support/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Gossip Support subsystem. Responsible for keeping track authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index 3d51d3c0a565..0bcf224332bc 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Primitives types for the Node-side" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index de07937ffb0a..d737c7bf8968 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index 2253a5ae0c66..62634c1da090 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "System overseer of the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index 7185205f905b..50ee3a80ddb8 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 6e8eade21a43..7f58a56d5d16 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -6,6 +6,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Utils to tie different Polkadot components together and allow instantiation of a node." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index b5686ec96be1..44bb7036d63d 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index d12daa572055..9c21fede1c47 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/subsystem/Cargo.toml b/polkadot/node/subsystem/Cargo.toml index ce4bceec7336..4f30d3ce9c09 100644 --- a/polkadot/node/subsystem/Cargo.toml +++ b/polkadot/node/subsystem/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/tracking-allocator/Cargo.toml b/polkadot/node/tracking-allocator/Cargo.toml index d98377e53759..0fbf526ccb8b 100644 --- a/polkadot/node/tracking-allocator/Cargo.toml +++ b/polkadot/node/tracking-allocator/Cargo.toml @@ -5,6 +5,8 @@ version = "2.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 9d0518fd46ad..ea6c4423dc19 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "6.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index dd269caa2d60..150aaf153fa7 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Shared primitives used by Polkadot runtime" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/rpc/Cargo.toml b/polkadot/rpc/Cargo.toml index d01528d4dee0..48980dde4bbc 100644 --- a/polkadot/rpc/Cargo.toml +++ b/polkadot/rpc/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Polkadot specific RPC functionality." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 01b56b31cf20..1646db54455a 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -5,6 +5,8 @@ description = "Pallets and constants used in Relay Chain networks." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/common/slot_range_helper/Cargo.toml b/polkadot/runtime/common/slot_range_helper/Cargo.toml index 02810b75283f..3f110bdd76c6 100644 --- a/polkadot/runtime/common/slot_range_helper/Cargo.toml +++ b/polkadot/runtime/common/slot_range_helper/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Helper crate for generating slot ranges for the Polkadot runtime." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/metrics/Cargo.toml b/polkadot/runtime/metrics/Cargo.toml index 3709e1eb697e..0415e4754009 100644 --- a/polkadot/runtime/metrics/Cargo.toml +++ b/polkadot/runtime/metrics/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Runtime metric interface for the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index a3eec3f9d961..b01778eeb424 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -5,6 +5,8 @@ description = "Relay Chain runtime code responsible for Parachains." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 3b11c977edf3..764c53abbfcb 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -6,6 +6,8 @@ description = "Rococo testnet Relay Chain runtime." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/rococo/constants/Cargo.toml b/polkadot/runtime/rococo/constants/Cargo.toml index 1d0adac44af4..921bc8f5fe92 100644 --- a/polkadot/runtime/rococo/constants/Cargo.toml +++ b/polkadot/runtime/rococo/constants/Cargo.toml @@ -5,6 +5,8 @@ description = "Constants used throughout the Rococo network." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [package.metadata.polkadot-sdk] exclude-from-umbrella = true diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index f94301baab09..584f5855b7a4 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -6,6 +6,8 @@ description = "Westend testnet Relay Chain runtime." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/runtime/westend/constants/Cargo.toml b/polkadot/runtime/westend/constants/Cargo.toml index 27d5b19b8e77..a50e2f9cc639 100644 --- a/polkadot/runtime/westend/constants/Cargo.toml +++ b/polkadot/runtime/westend/constants/Cargo.toml @@ -5,6 +5,8 @@ description = "Constants used throughout the Westend network." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [package.metadata.polkadot-sdk] exclude-from-umbrella = true diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index 53ea0b74463b..d9519dafe12d 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Stores messages other authorities issue about candidates in Polkadot." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/utils/generate-bags/Cargo.toml b/polkadot/utils/generate-bags/Cargo.toml index 16205b0f51f5..3006d8325ef9 100644 --- a/polkadot/utils/generate-bags/Cargo.toml +++ b/polkadot/utils/generate-bags/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "CLI to generate voter bags for Polkadot runtimes" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index 86c7067ad6fa..113e72c27ae1 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index b07bdfdca3d1..fe2b78163223 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -5,6 +5,8 @@ edition.workspace = true license.workspace = true version = "7.0.0" description = "Benchmarks for the XCM pallet" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index 4d44d75e34dd..e8cdd3b4931b 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -5,6 +5,8 @@ description = "A pallet for handling XCM programs." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index 83b35d19cf7e..3167766158ff 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -6,6 +6,8 @@ edition.workspace = true license.workspace = true version = "7.0.0" publish = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index eaa115740f3e..2819a0b0a555 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "7.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index cc966f91fe4d..20ca40de5faa 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "7.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/xcm-simulator/Cargo.toml b/polkadot/xcm/xcm-simulator/Cargo.toml index c7caa49393ed..47900e226d48 100644 --- a/polkadot/xcm/xcm-simulator/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/xcm/xcm-simulator/example/Cargo.toml b/polkadot/xcm/xcm-simulator/example/Cargo.toml index e0aff9b7782a..6fbe9243944a 100644 --- a/polkadot/xcm/xcm-simulator/example/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/example/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "7.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/prdoc/pr_6549.prdoc b/prdoc/pr_6549.prdoc new file mode 100644 index 000000000000..61a64c724185 --- /dev/null +++ b/prdoc/pr_6549.prdoc @@ -0,0 +1,247 @@ +doc: [] + +crates: + - name: polkadot-sdk + bump: none + - name: asset-test-utils + bump: none + - name: cumulus-pallet-parachain-system + bump: none + - name: cumulus-pallet-parachain-system-proc-macro + bump: none + - name: cumulus-primitives-core + bump: none + - name: polkadot-core-primitives + bump: none + - name: polkadot-parachain-primitives + bump: none + - name: polkadot-primitives + bump: none + - name: staging-xcm + bump: none + - name: xcm-procedural + bump: none + - name: cumulus-primitives-parachain-inherent + bump: none + - name: cumulus-primitives-proof-size-hostfunction + bump: none + - name: polkadot-runtime-common + bump: none + - name: polkadot-runtime-parachains + bump: none + - name: polkadot-runtime-metrics + bump: none + - name: staging-xcm-executor + bump: none + - name: slot-range-helper + bump: none + - name: staging-xcm-builder + bump: none + - name: pallet-xcm + bump: none + - name: cumulus-primitives-storage-weight-reclaim + bump: none + - name: cumulus-pallet-aura-ext + bump: none + - name: cumulus-primitives-aura + bump: none + - name: staging-parachain-info + bump: none + - name: cumulus-test-relay-sproof-builder + bump: none + - name: cumulus-client-cli + bump: none + - name: cumulus-client-collator + bump: none + - name: cumulus-client-consensus-common + bump: none + - name: cumulus-client-pov-recovery + bump: none + - name: cumulus-relay-chain-interface + bump: none + - name: polkadot-overseer + bump: none + - name: tracing-gum + bump: none + - name: tracing-gum-proc-macro + bump: none + - name: polkadot-node-metrics + bump: none + - name: polkadot-node-primitives + bump: none + - name: polkadot-erasure-coding + bump: none + - name: polkadot-node-subsystem + bump: none + - name: polkadot-node-subsystem-types + bump: none + - name: polkadot-node-network-protocol + bump: none + - name: polkadot-statement-table + bump: none + - name: polkadot-rpc + bump: none + - name: polkadot-service + bump: none + - name: cumulus-client-parachain-inherent + bump: none + - name: westend-runtime + bump: none + - name: pallet-xcm-benchmarks + bump: none + - name: westend-runtime-constants + bump: none + - name: polkadot-approval-distribution + bump: none + - name: polkadot-node-subsystem-util + bump: none + - name: polkadot-availability-bitfield-distribution + bump: none + - name: polkadot-availability-distribution + bump: none + - name: polkadot-availability-recovery + bump: none + - name: polkadot-node-core-approval-voting + bump: none + - name: polkadot-node-core-approval-voting-parallel + bump: none + - name: polkadot-node-core-av-store + bump: none + - name: polkadot-node-core-chain-api + bump: none + - name: polkadot-statement-distribution + bump: none + - name: polkadot-collator-protocol + bump: none + - name: polkadot-dispute-distribution + bump: none + - name: polkadot-gossip-support + bump: none + - name: polkadot-network-bridge + bump: none + - name: polkadot-node-collation-generation + bump: none + - name: polkadot-node-core-backing + bump: none + - name: polkadot-node-core-bitfield-signing + bump: none + - name: polkadot-node-core-candidate-validation + bump: none + - name: polkadot-node-core-pvf + bump: none + - name: polkadot-node-core-pvf-common + bump: none + - name: polkadot-node-core-pvf-execute-worker + bump: none + - name: polkadot-node-core-pvf-prepare-worker + bump: none + - name: staging-tracking-allocator + bump: none + - name: rococo-runtime + bump: none + - name: rococo-runtime-constants + bump: none + - name: polkadot-node-core-chain-selection + bump: none + - name: polkadot-node-core-dispute-coordinator + bump: none + - name: polkadot-node-core-parachains-inherent + bump: none + - name: polkadot-node-core-prospective-parachains + bump: none + - name: polkadot-node-core-provisioner + bump: none + - name: polkadot-node-core-pvf-checker + bump: none + - name: polkadot-node-core-runtime-api + bump: none + - name: cumulus-client-network + bump: none + - name: cumulus-relay-chain-inprocess-interface + bump: none + - name: polkadot-cli + bump: none + - name: cumulus-client-consensus-aura + bump: none + - name: cumulus-client-consensus-proposer + bump: none + - name: cumulus-client-consensus-relay-chain + bump: none + - name: cumulus-client-service + bump: none + - name: cumulus-relay-chain-minimal-node + bump: none + - name: cumulus-relay-chain-rpc-interface + bump: none + - name: parachains-common + bump: none + - name: cumulus-primitives-utility + bump: none + - name: cumulus-pallet-xcmp-queue + bump: none + - name: parachains-runtimes-test-utils + bump: none + - name: assets-common + bump: none + - name: bridge-hub-common + bump: none + - name: bridge-hub-test-utils + bump: none + - name: cumulus-pallet-solo-to-para + bump: none + - name: cumulus-pallet-xcm + bump: none + - name: cumulus-ping + bump: none + - name: cumulus-primitives-timestamp + bump: none + - name: emulated-integration-tests-common + bump: none + - name: xcm-emulator + bump: none + - name: pallet-collective-content + bump: none + - name: xcm-simulator + bump: none + - name: pallet-revive-fixtures + bump: none + - name: polkadot-omni-node-lib + bump: none + - name: snowbridge-runtime-test-common + bump: none + - name: testnet-parachains-constants + bump: none + - name: asset-hub-rococo-runtime + bump: none + - name: asset-hub-westend-runtime + bump: none + - name: bridge-hub-rococo-runtime + bump: none + - name: bridge-hub-westend-runtime + bump: none + - name: collectives-westend-runtime + bump: none + - name: coretime-rococo-runtime + bump: none + - name: coretime-westend-runtime + bump: none + - name: people-rococo-runtime + bump: none + - name: people-westend-runtime + bump: none + - name: contracts-rococo-runtime + bump: none + - name: glutton-westend-runtime + bump: none + - name: rococo-parachain-runtime + bump: none + - name: polkadot-omni-node + bump: none + - name: polkadot-parachain-bin + bump: none + - name: polkadot + bump: none + - name: polkadot-voter-bags + bump: none + - name: xcm-simulator-example + bump: none diff --git a/scripts/generate-umbrella.py b/scripts/generate-umbrella.py index 8326909c3449..ae3873180553 100644 --- a/scripts/generate-umbrella.py +++ b/scripts/generate-umbrella.py @@ -120,6 +120,8 @@ def main(path, version): "edition": { "workspace": True }, "authors": { "workspace": True }, "description": "Polkadot SDK umbrella crate.", + "homepage": { "workspace": True }, + "repository": { "workspace": True }, "license": "Apache-2.0", "metadata": { "docs": { "rs": { "features": ["runtime-full", "node"], diff --git a/substrate/frame/revive/fixtures/Cargo.toml b/substrate/frame/revive/fixtures/Cargo.toml index 798ed8c75a5a..9fd434db6179 100644 --- a/substrate/frame/revive/fixtures/Cargo.toml +++ b/substrate/frame/revive/fixtures/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Fixtures for testing and benchmarking" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 7f50658c4e16..9affcffd2ade 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -617,6 +617,12 @@ workspace = true [package.authors] workspace = true +[package.homepage] +workspace = true + +[package.repository] +workspace = true + [dependencies.assets-common] path = "../cumulus/parachains/runtimes/assets/common" default-features = false From c56a98b991e2cdce7419813886a74d5280b66d2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Tue, 3 Dec 2024 13:44:52 +0100 Subject: [PATCH 18/29] pallet-revive-fixtures: Try not to re-create fixture dir (#6735) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On some systems trying to re-create the output directory will lead to an error. Fixes https://github.com/paritytech/subxt/issues/1876 --------- Co-authored-by: Bastian Köcher --- substrate/frame/revive/fixtures/build.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/substrate/frame/revive/fixtures/build.rs b/substrate/frame/revive/fixtures/build.rs index 46cd5760ca4e..eca547bc6ddd 100644 --- a/substrate/frame/revive/fixtures/build.rs +++ b/substrate/frame/revive/fixtures/build.rs @@ -204,10 +204,15 @@ fn create_out_dir() -> Result { .join("pallet-revive-fixtures"); // clean up some leftover symlink from previous versions of this script - if out_dir.exists() && !out_dir.is_dir() { + let mut out_exists = out_dir.exists(); + if out_exists && !out_dir.is_dir() { fs::remove_file(&out_dir)?; + out_exists = false; + } + + if !out_exists { + fs::create_dir(&out_dir).context("Failed to create output directory")?; } - fs::create_dir_all(&out_dir).context("Failed to create output directory")?; // write the location of the out dir so it can be found later let mut file = fs::File::create(temp_dir.join("fixture_location.rs")) From d1d92ab76004ce349a97fc5d325eaf9a4a7101b7 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Tue, 3 Dec 2024 13:45:35 +0100 Subject: [PATCH 19/29] Bump Westend AH (#6583) Bump Asset-Hub westend spec version --------- Co-authored-by: GitHub Action --- .../runtimes/assets/asset-hub-westend/src/lib.rs | 2 +- prdoc/pr_6583.prdoc | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 prdoc/pr_6583.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 98d647d868db..21368e9c2b4b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -124,7 +124,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("westmint"), impl_name: alloc::borrow::Cow::Borrowed("westmint"), authoring_version: 1, - spec_version: 1_016_008, + spec_version: 1_017_002, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, diff --git a/prdoc/pr_6583.prdoc b/prdoc/pr_6583.prdoc new file mode 100644 index 000000000000..0e67ed33e27c --- /dev/null +++ b/prdoc/pr_6583.prdoc @@ -0,0 +1,7 @@ +title: Bump Westend AH +doc: +- audience: Runtime Dev + description: Bump Asset-Hub westend spec version +crates: +- name: asset-hub-westend-runtime + bump: minor From 896c81440c1dd169bd2f5e65aba46eca228609f8 Mon Sep 17 00:00:00 2001 From: Lulu Date: Tue, 3 Dec 2024 14:18:05 +0100 Subject: [PATCH 20/29] Add publish-check-compile workflow (#6556) Add publish-check-compile workflow This Applies staged prdocs then configures crate deps to pull from crates.io for our already published crates and local paths for things to be published. Then runs cargo check on the result. This results in a build state consitent with that of publish time and should catch compile errors that we would of otherwise ran into mid pubish. This acts as a supplement to the check-semver job. check-semver works on a high level and judges what changes are incorrect and why. This job just runs the change, sees if it compiles, and if not spits out a compile error. --- .github/workflows/publish-check-compile.yml | 48 +++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 .github/workflows/publish-check-compile.yml diff --git a/.github/workflows/publish-check-compile.yml b/.github/workflows/publish-check-compile.yml new file mode 100644 index 000000000000..83cd3ff8fa90 --- /dev/null +++ b/.github/workflows/publish-check-compile.yml @@ -0,0 +1,48 @@ +name: Check publish build + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + merge_group: + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + preflight: + uses: ./.github/workflows/reusable-preflight.yml + + check-publish: + timeout-minutes: 90 + needs: [preflight] + runs-on: ${{ needs.preflight.outputs.RUNNER }} + container: + image: ${{ needs.preflight.outputs.IMAGE }} + steps: + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 + + - name: Rust Cache + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + with: + cache-on-failure: true + + - name: install parity-publish + run: cargo install parity-publish@0.10.2 --locked -q + + - name: parity-publish update plan + run: parity-publish --color always plan --skip-check --prdoc prdoc/ + + - name: parity-publish apply plan + run: parity-publish --color always apply --registry + + - name: parity-publish check compile + run: | + packages="$(parity-publish apply --print)" + + if [ -n "$packages" ]; then + cargo --color always check $(printf -- '-p %s ' $packages) + fi From 41a5d8ec5f3d3d0ff82899be66113b223395ade5 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 3 Dec 2024 18:02:03 +0100 Subject: [PATCH 21/29] `fatxpool`: handling limits and priorities improvements (#6405) This PR provides a number of improvements around handling limits and priorities in the fork-aware transaction pool. #### Notes to reviewers. #### Following are the notable changes: 1. #### [Better support](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/414ec3ccad154c9a2aab0586bfa2d2c884fd140f) for `Usurped` transactions When any view reports an `Usurped` transaction (replaced by other with higher priority) it is removed from all the views (also inactive). Removal is implemented by simply submitting usurper transaction to all the views. It is also ensured that usurped tx will not sneak into the `view_store` in newly created view (this is why `ViewStore::pending_txs_replacements` was added). 1. #### [`TimedTransactionSource`](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/f10590f3bde69b31250761a5b10802fb139ab2b2) introduced: Every view now has an information when the transaction entered the pool. Enforce limits (now only for future txs) uses this timestamp to find worst transactions. Having common timestamp ensures coherent assessment of the transaction's importance across different views. This also could later be used to select which ready transaction shall be dropped. 1. #### `DroppedWatcher`: [improved logic](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/560db28c987dd1e634119788ebc8318967df206b) for future transactions For future transaction - if the last referencing view is removed, the transaction will be dropped from the pool. This prevents future unincluded and un-promoted transactions from staying in the pool for long time. #### And some minor changes: 1. [simplified](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/2d0bbf83e2df2b4c641ef84c1188907c4bfad3c6) the flow in `update_view_with_mempool` (code duplication + minor bug fix). 2. `graph::BasePool`: [handling priorities](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/c9f2d39355853d034fdbc6ea31e4e0e5bf34cb6a) for future transaction improved (previously transaction with lower prio was reported as failed), 3. `graph::listener`: dedicated `limit_enforced`/`usurped`/`dropped` [calls added](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/7b58a68cccfcf372321ea41826fbe9d4222829cf), 4. flaky test [fixed](https://github.com/paritytech/polkadot-sdk/pull/6405/commits/e0a7bc6c048245943796839b166505e2aecdbd7d) 5. new tests added, related to: #5809 --------- Co-authored-by: GitHub Action Co-authored-by: Iulian Barbu <14218860+iulianbarbu@users.noreply.github.com> --- prdoc/pr_6405.prdoc | 9 + .../client/transaction-pool/benches/basics.rs | 4 +- .../src/fork_aware_txpool/dropped_watcher.rs | 291 +++++++++++++----- .../fork_aware_txpool/fork_aware_txpool.rs | 199 +++++++----- .../import_notification_sink.rs | 19 +- .../fork_aware_txpool/multi_view_listener.rs | 38 ++- .../fork_aware_txpool/revalidation_worker.rs | 9 +- .../src/fork_aware_txpool/tx_mem_pool.rs | 88 ++++-- .../src/fork_aware_txpool/view.rs | 31 +- .../src/fork_aware_txpool/view_store.rs | 262 ++++++++++++++-- .../transaction-pool/src/graph/base_pool.rs | 159 +++++++++- .../transaction-pool/src/graph/listener.rs | 47 ++- .../client/transaction-pool/src/graph/pool.rs | 30 +- .../transaction-pool/src/graph/ready.rs | 5 +- .../transaction-pool/src/graph/rotator.rs | 5 +- .../src/graph/validated_pool.rs | 27 +- .../transaction-pool/src/graph/watcher.rs | 6 + substrate/client/transaction-pool/src/lib.rs | 5 +- .../src/single_state_txpool/revalidation.rs | 25 +- .../single_state_txpool.rs | 46 ++- .../client/transaction-pool/tests/fatp.rs | 14 +- .../transaction-pool/tests/fatp_common/mod.rs | 14 + .../transaction-pool/tests/fatp_limits.rs | 189 ++++++++++++ .../transaction-pool/tests/fatp_prios.rs | 249 +++++++++++++++ .../client/transaction-pool/tests/pool.rs | 28 +- 25 files changed, 1420 insertions(+), 379 deletions(-) create mode 100644 prdoc/pr_6405.prdoc create mode 100644 substrate/client/transaction-pool/tests/fatp_prios.rs diff --git a/prdoc/pr_6405.prdoc b/prdoc/pr_6405.prdoc new file mode 100644 index 000000000000..9e4e0b3c6c20 --- /dev/null +++ b/prdoc/pr_6405.prdoc @@ -0,0 +1,9 @@ +title: '`fatxpool`: handling limits and priorities improvements' +doc: +- audience: Node Dev + description: |- + This PR provides a number of improvements and fixes around handling limits and priorities in the fork-aware transaction pool. + +crates: +- name: sc-transaction-pool + bump: major diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs index 0d8c1cbba9b4..5e40b0fb72d6 100644 --- a/substrate/client/transaction-pool/benches/basics.rs +++ b/substrate/client/transaction-pool/benches/basics.rs @@ -152,7 +152,7 @@ fn uxt(transfer: TransferData) -> Extrinsic { } fn bench_configured(pool: Pool, number: u64, api: Arc) { - let source = TransactionSource::External; + let source = TimedTransactionSource::new_external(false); let mut futures = Vec::new(); let mut tags = Vec::new(); let at = HashAndNumber { @@ -171,7 +171,7 @@ fn bench_configured(pool: Pool, number: u64, api: Arc) { tags.push(to_tag(nonce, AccountId::from_h256(H256::from_low_u64_be(1)))); - futures.push(pool.submit_one(&at, source, xt)); + futures.push(pool.submit_one(&at, source.clone(), xt)); } let res = block_on(futures::future::join_all(futures.into_iter())); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index ecae21395c91..7679e3b169d2 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -24,7 +24,7 @@ use crate::{ common::log_xt::log_xt_trace, fork_aware_txpool::stream_map_util::next_event, - graph::{BlockHash, ChainApi, ExtrinsicHash}, + graph::{self, BlockHash, ExtrinsicHash}, LOG_TARGET, }; use futures::stream::StreamExt; @@ -33,12 +33,44 @@ use sc_transaction_pool_api::TransactionStatus; use sc_utils::mpsc; use sp_runtime::traits::Block as BlockT; use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, + collections::{ + hash_map::{Entry, OccupiedEntry}, + HashMap, HashSet, + }, fmt::{self, Debug, Formatter}, pin::Pin, }; use tokio_stream::StreamMap; +/// Represents a transaction that was removed from the transaction pool, including the reason of its +/// removal. +#[derive(Debug, PartialEq)] +pub struct DroppedTransaction { + /// Hash of the dropped extrinsic. + pub tx_hash: Hash, + /// Reason of the transaction being dropped. + pub reason: DroppedReason, +} + +impl DroppedTransaction { + fn new_usurped(tx_hash: Hash, by: Hash) -> Self { + Self { reason: DroppedReason::Usurped(by), tx_hash } + } + + fn new_enforced_by_limts(tx_hash: Hash) -> Self { + Self { reason: DroppedReason::LimitsEnforced, tx_hash } + } +} + +/// Provides reason of why transactions was dropped. +#[derive(Debug, PartialEq)] +pub enum DroppedReason { + /// Transaction was replaced by other transaction (e.g. because of higher priority). + Usurped(Hash), + /// Transaction was dropped because of internal pool limits being enforced. + LimitsEnforced, +} + /// Dropped-logic related event from the single view. pub type ViewStreamEvent = crate::graph::DroppedByLimitsEvent, BlockHash>; @@ -47,7 +79,8 @@ type ViewStream = Pin> + Se /// Stream of extrinsic hashes that were dropped by the views and have no references by existing /// views. -pub(crate) type StreamOfDropped = Pin> + Send>>; +pub(crate) type StreamOfDropped = + Pin>> + Send>>; /// A type alias for a sender used as the controller of the [`MultiViewDropWatcherContext`]. /// Used to send control commands from the [`MultiViewDroppedWatcherController`] to @@ -59,24 +92,24 @@ type Controller = mpsc::TracingUnboundedSender; type CommandReceiver = mpsc::TracingUnboundedReceiver; /// Commands to control the instance of dropped transactions stream [`StreamOfDropped`]. -enum Command +enum Command where - C: ChainApi, + ChainApi: graph::ChainApi, { /// Adds a new stream of dropped-related events originating in a view with a specific block /// hash - AddView(BlockHash, ViewStream), + AddView(BlockHash, ViewStream), /// Removes an existing view's stream associated with a specific block hash. - RemoveView(BlockHash), - /// Removes internal states for given extrinsic hashes. + RemoveView(BlockHash), + /// Removes referencing views for given extrinsic hashes. /// /// Intended to ba called on finalization. - RemoveFinalizedTxs(Vec>), + RemoveFinalizedTxs(Vec>), } -impl Debug for Command +impl Debug for Command where - C: ChainApi, + ChainApi: graph::ChainApi, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { @@ -92,30 +125,114 @@ where /// /// This struct maintains a mapping of active views and their corresponding streams, as well as the /// state of each transaction with respect to these views. -struct MultiViewDropWatcherContext +struct MultiViewDropWatcherContext where - C: ChainApi, + ChainApi: graph::ChainApi, { /// A map that associates the views identified by corresponding block hashes with their streams /// of dropped-related events. This map is used to keep track of active views and their event /// streams. - stream_map: StreamMap, ViewStream>, + stream_map: StreamMap, ViewStream>, /// A receiver for commands to control the state of the stream, allowing the addition and /// removal of views. This is used to dynamically update which views are being tracked. - command_receiver: CommandReceiver>, - + command_receiver: CommandReceiver>, /// For each transaction hash we keep the set of hashes representing the views that see this - /// transaction as ready or future. + /// transaction as ready or in_block. + /// + /// Even if all views referencing a ready transactions are removed, we still want to keep + /// transaction, there can be a fork which sees the transaction as ready. /// /// Once transaction is dropped, dropping view is removed from the set. - transaction_states: HashMap, HashSet>>, + ready_transaction_views: HashMap, HashSet>>, + /// For each transaction hash we keep the set of hashes representing the views that see this + /// transaction as future. + /// + /// Once all views referencing a future transactions are removed, the future can be dropped. + /// + /// Once transaction is dropped, dropping view is removed from the set. + future_transaction_views: HashMap, HashSet>>, + + /// Transactions that need to be notified as dropped. + pending_dropped_transactions: Vec>, } impl MultiViewDropWatcherContext where - C: ChainApi + 'static, - <::Block as BlockT>::Hash: Unpin, + C: graph::ChainApi + 'static, + <::Block as BlockT>::Hash: Unpin, { + /// Provides the ready or future `HashSet` containing views referencing given transaction. + fn transaction_views( + &mut self, + tx_hash: ExtrinsicHash, + ) -> Option, HashSet>>> { + if let Entry::Occupied(views_keeping_tx_valid) = self.ready_transaction_views.entry(tx_hash) + { + return Some(views_keeping_tx_valid) + } + if let Entry::Occupied(views_keeping_tx_valid) = + self.future_transaction_views.entry(tx_hash) + { + return Some(views_keeping_tx_valid) + } + None + } + + /// Processes the command and updates internal state accordingly. + fn handle_command(&mut self, cmd: Command) { + match cmd { + Command::AddView(key, stream) => { + trace!( + target: LOG_TARGET, + "dropped_watcher: Command::AddView {key:?} views:{:?}", + self.stream_map.keys().collect::>() + ); + self.stream_map.insert(key, stream); + }, + Command::RemoveView(key) => { + trace!( + target: LOG_TARGET, + "dropped_watcher: Command::RemoveView {key:?} views:{:?}", + self.stream_map.keys().collect::>() + ); + self.stream_map.remove(&key); + self.ready_transaction_views.iter_mut().for_each(|(tx_hash, views)| { + trace!( + target: LOG_TARGET, + "[{:?}] dropped_watcher: Command::RemoveView ready views: {:?}", + tx_hash, + views + ); + views.remove(&key); + }); + + self.future_transaction_views.iter_mut().for_each(|(tx_hash, views)| { + trace!( + target: LOG_TARGET, + "[{:?}] dropped_watcher: Command::RemoveView future views: {:?}", + tx_hash, + views + ); + views.remove(&key); + if views.is_empty() { + self.pending_dropped_transactions.push(*tx_hash); + } + }); + }, + Command::RemoveFinalizedTxs(xts) => { + log_xt_trace!( + target: LOG_TARGET, + xts.clone(), + "[{:?}] dropped_watcher: finalized xt removed" + ); + xts.iter().for_each(|xt| { + self.ready_transaction_views.remove(xt); + self.future_transaction_views.remove(xt); + }); + }, + } + } + /// Processes a `ViewStreamEvent` from a specific view and updates the internal state /// accordingly. /// @@ -125,41 +242,69 @@ where &mut self, block_hash: BlockHash, event: ViewStreamEvent, - ) -> Option> { + ) -> Option>> { trace!( target: LOG_TARGET, - "dropped_watcher: handle_event: event:{:?} views:{:?}, ", - event, + "dropped_watcher: handle_event: event:{event:?} from:{block_hash:?} future_views:{:?} ready_views:{:?} stream_map views:{:?}, ", + self.future_transaction_views.get(&event.0), + self.ready_transaction_views.get(&event.0), self.stream_map.keys().collect::>(), ); let (tx_hash, status) = event; match status { - TransactionStatus::Ready | TransactionStatus::Future => { - self.transaction_states.entry(tx_hash).or_default().insert(block_hash); + TransactionStatus::Future => { + self.future_transaction_views.entry(tx_hash).or_default().insert(block_hash); + }, + TransactionStatus::Ready | TransactionStatus::InBlock(..) => { + // note: if future transaction was once seens as the ready we may want to treat it + // as ready transactions. Unreferenced future transactions are more likely to be + // removed when the last referencing view is removed then ready transactions. + // Transcaction seen as ready is likely quite close to be included in some + // future fork. + if let Some(mut views) = self.future_transaction_views.remove(&tx_hash) { + views.insert(block_hash); + self.ready_transaction_views.insert(tx_hash, views); + } else { + self.ready_transaction_views.entry(tx_hash).or_default().insert(block_hash); + } }, - TransactionStatus::Dropped | TransactionStatus::Usurped(_) => { - if let Entry::Occupied(mut views_keeping_tx_valid) = - self.transaction_states.entry(tx_hash) - { + TransactionStatus::Dropped => { + if let Some(mut views_keeping_tx_valid) = self.transaction_views(tx_hash) { views_keeping_tx_valid.get_mut().remove(&block_hash); - if views_keeping_tx_valid.get().is_empty() || - views_keeping_tx_valid - .get() - .iter() - .all(|h| !self.stream_map.contains_key(h)) - { - return Some(tx_hash) + if views_keeping_tx_valid.get().is_empty() { + return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) } } else { debug!("[{:?}] dropped_watcher: removing (non-tracked) tx", tx_hash); - return Some(tx_hash) + return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) } }, + TransactionStatus::Usurped(by) => + return Some(DroppedTransaction::new_usurped(tx_hash, by)), _ => {}, }; None } + /// Gets pending dropped transactions if any. + fn get_pending_dropped_transaction(&mut self) -> Option>> { + while let Some(tx_hash) = self.pending_dropped_transactions.pop() { + // never drop transaction that was seen as ready. It may not have a referencing + // view now, but such fork can appear. + if self.ready_transaction_views.get(&tx_hash).is_some() { + continue + } + + if let Some(views) = self.future_transaction_views.get(&tx_hash) { + if views.is_empty() { + self.future_transaction_views.remove(&tx_hash); + return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) + } + } + } + None + } + /// Creates a new `StreamOfDropped` and its associated event stream controller. /// /// This method initializes the internal structures and unfolds the stream of dropped @@ -176,42 +321,29 @@ where let ctx = Self { stream_map: StreamMap::new(), command_receiver, - transaction_states: Default::default(), + ready_transaction_views: Default::default(), + future_transaction_views: Default::default(), + pending_dropped_transactions: Default::default(), }; let stream_map = futures::stream::unfold(ctx, |mut ctx| async move { loop { + if let Some(dropped) = ctx.get_pending_dropped_transaction() { + debug!("dropped_watcher: sending out (pending): {dropped:?}"); + return Some((dropped, ctx)); + } tokio::select! { biased; - cmd = ctx.command_receiver.next() => { - match cmd? { - Command::AddView(key,stream) => { - trace!(target: LOG_TARGET,"dropped_watcher: Command::AddView {key:?} views:{:?}",ctx.stream_map.keys().collect::>()); - ctx.stream_map.insert(key,stream); - }, - Command::RemoveView(key) => { - trace!(target: LOG_TARGET,"dropped_watcher: Command::RemoveView {key:?} views:{:?}",ctx.stream_map.keys().collect::>()); - ctx.stream_map.remove(&key); - ctx.transaction_states.iter_mut().for_each(|(_,state)| { - state.remove(&key); - }); - }, - Command::RemoveFinalizedTxs(xts) => { - log_xt_trace!(target: LOG_TARGET, xts.clone(), "[{:?}] dropped_watcher: finalized xt removed"); - xts.iter().for_each(|xt| { - ctx.transaction_states.remove(xt); - }); - - }, - } - }, - Some(event) = next_event(&mut ctx.stream_map) => { if let Some(dropped) = ctx.handle_event(event.0, event.1) { debug!("dropped_watcher: sending out: {dropped:?}"); return Some((dropped, ctx)); } + }, + cmd = ctx.command_receiver.next() => { + ctx.handle_command(cmd?); } + } } }) @@ -225,30 +357,30 @@ where /// /// This struct provides methods to add and remove streams associated with views to and from the /// stream. -pub struct MultiViewDroppedWatcherController { +pub struct MultiViewDroppedWatcherController { /// A controller allowing to update the state of the associated [`StreamOfDropped`]. - controller: Controller>, + controller: Controller>, } -impl Clone for MultiViewDroppedWatcherController { +impl Clone for MultiViewDroppedWatcherController { fn clone(&self) -> Self { Self { controller: self.controller.clone() } } } -impl MultiViewDroppedWatcherController +impl MultiViewDroppedWatcherController where - C: ChainApi + 'static, - <::Block as BlockT>::Hash: Unpin, + ChainApi: graph::ChainApi + 'static, + <::Block as BlockT>::Hash: Unpin, { /// Creates new [`StreamOfDropped`] and its controller. - pub fn new() -> (MultiViewDroppedWatcherController, StreamOfDropped) { - let (stream_map, ctrl) = MultiViewDropWatcherContext::::event_stream(); + pub fn new() -> (MultiViewDroppedWatcherController, StreamOfDropped) { + let (stream_map, ctrl) = MultiViewDropWatcherContext::::event_stream(); (Self { controller: ctrl }, stream_map.boxed()) } /// Notifies the [`StreamOfDropped`] that new view was created. - pub fn add_view(&self, key: BlockHash, view: ViewStream) { + pub fn add_view(&self, key: BlockHash, view: ViewStream) { let _ = self.controller.unbounded_send(Command::AddView(key, view)).map_err(|e| { trace!(target: LOG_TARGET, "dropped_watcher: add_view {key:?} send message failed: {e}"); }); @@ -256,14 +388,17 @@ where /// Notifies the [`StreamOfDropped`] that the view was destroyed and shall be removed the /// stream map. - pub fn remove_view(&self, key: BlockHash) { + pub fn remove_view(&self, key: BlockHash) { let _ = self.controller.unbounded_send(Command::RemoveView(key)).map_err(|e| { trace!(target: LOG_TARGET, "dropped_watcher: remove_view {key:?} send message failed: {e}"); }); } /// Removes status info for finalized transactions. - pub fn remove_finalized_txs(&self, xts: impl IntoIterator> + Clone) { + pub fn remove_finalized_txs( + &self, + xts: impl IntoIterator> + Clone, + ) { let _ = self .controller .unbounded_send(Command::RemoveFinalizedTxs(xts.into_iter().collect())) @@ -298,7 +433,7 @@ mod dropped_watcher_tests { watcher.add_view(block_hash, view_stream); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash]); + assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); } #[tokio::test] @@ -348,7 +483,10 @@ mod dropped_watcher_tests { watcher.add_view(block_hash0, view_stream0); watcher.add_view(block_hash1, view_stream1); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash1]); + assert_eq!( + handle.await.unwrap(), + vec![DroppedTransaction::new_enforced_by_limts(tx_hash1)] + ); } #[tokio::test] @@ -373,10 +511,11 @@ mod dropped_watcher_tests { watcher.add_view(block_hash0, view_stream0); assert!(output_stream.next().now_or_never().is_none()); + watcher.remove_view(block_hash0); watcher.add_view(block_hash1, view_stream1); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash]); + assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); } #[tokio::test] @@ -419,6 +558,6 @@ mod dropped_watcher_tests { let block_hash2 = H256::repeat_byte(0x03); watcher.add_view(block_hash2, view_stream2); let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); - assert_eq!(handle.await.unwrap(), vec![tx_hash]); + assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 065d0cb3a274..4ec87f1fefa4 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -23,7 +23,7 @@ use super::{ import_notification_sink::MultiViewImportNotificationSink, metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener, - tx_mem_pool::{TxInMemPool, TxMemPool, TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER}, + tx_mem_pool::{InsertionInfo, TxInMemPool, TxMemPool, TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER}, view::View, view_store::ViewStore, }; @@ -31,8 +31,12 @@ use crate::{ api::FullChainApi, common::log_xt::log_xt_trace, enactment_state::{EnactmentAction, EnactmentState}, - fork_aware_txpool::revalidation_worker, - graph::{self, base_pool::Transaction, ExtrinsicFor, ExtrinsicHash, IsValidator, Options}, + fork_aware_txpool::{dropped_watcher::DroppedReason, revalidation_worker}, + graph::{ + self, + base_pool::{TimedTransactionSource, Transaction}, + ExtrinsicFor, ExtrinsicHash, IsValidator, Options, + }, ReadyIteratorFor, LOG_TARGET, }; use async_trait::async_trait; @@ -197,9 +201,14 @@ where let (dropped_stream_controller, dropped_stream) = MultiViewDroppedWatcherController::::new(); + + let view_store = + Arc::new(ViewStore::new(pool_api.clone(), listener, dropped_stream_controller)); + let dropped_monitor_task = Self::dropped_monitor_task( dropped_stream, mempool.clone(), + view_store.clone(), import_notification_sink.clone(), ); @@ -216,8 +225,8 @@ where ( Self { mempool, - api: pool_api.clone(), - view_store: Arc::new(ViewStore::new(pool_api, listener, dropped_stream_controller)), + api: pool_api, + view_store, ready_poll: Arc::from(Mutex::from(ReadyPoll::new())), enactment_state: Arc::new(Mutex::new(EnactmentState::new( best_block_hash, @@ -233,14 +242,17 @@ where ) } - /// Monitors the stream of dropped transactions and removes them from the mempool. + /// Monitors the stream of dropped transactions and removes them from the mempool and + /// view_store. /// /// This asynchronous task continuously listens for dropped transaction notifications provided /// within `dropped_stream` and ensures that these transactions are removed from the `mempool` - /// and `import_notification_sink` instances. + /// and `import_notification_sink` instances. For Usurped events, the transaction is also + /// removed from the view_store. async fn dropped_monitor_task( mut dropped_stream: StreamOfDropped, mempool: Arc>, + view_store: Arc>, import_notification_sink: MultiViewImportNotificationSink< Block::Hash, ExtrinsicHash, @@ -251,9 +263,33 @@ where log::debug!(target: LOG_TARGET, "fatp::dropped_monitor_task: terminated..."); break; }; - log::trace!(target: LOG_TARGET, "[{:?}] fatp::dropped notification, removing", dropped); - mempool.remove_dropped_transactions(&[dropped]).await; - import_notification_sink.clean_notified_items(&[dropped]); + let dropped_tx_hash = dropped.tx_hash; + log::trace!(target: LOG_TARGET, "[{:?}] fatp::dropped notification {:?}, removing", dropped_tx_hash,dropped.reason); + match dropped.reason { + DroppedReason::Usurped(new_tx_hash) => { + if let Some(new_tx) = mempool.get_by_hash(new_tx_hash) { + view_store + .replace_transaction( + new_tx.source(), + new_tx.tx(), + dropped_tx_hash, + new_tx.is_watched(), + ) + .await; + } else { + log::trace!( + target:LOG_TARGET, + "error: dropped_monitor_task: no entry in mempool for new transaction {:?}", + new_tx_hash, + ); + } + }, + DroppedReason::LimitsEnforced => {}, + }; + + mempool.remove_dropped_transaction(&dropped_tx_hash).await; + view_store.listener.transaction_dropped(dropped); + import_notification_sink.clean_notified_items(&[dropped_tx_hash]); } } @@ -288,9 +324,13 @@ where let (dropped_stream_controller, dropped_stream) = MultiViewDroppedWatcherController::::new(); + + let view_store = + Arc::new(ViewStore::new(pool_api.clone(), listener, dropped_stream_controller)); let dropped_monitor_task = Self::dropped_monitor_task( dropped_stream, mempool.clone(), + view_store.clone(), import_notification_sink.clone(), ); @@ -306,8 +346,8 @@ where Self { mempool, - api: pool_api.clone(), - view_store: Arc::new(ViewStore::new(pool_api, listener, dropped_stream_controller)), + api: pool_api, + view_store, ready_poll: Arc::from(Mutex::from(ReadyPoll::new())), enactment_state: Arc::new(Mutex::new(EnactmentState::new( best_block_hash, @@ -366,6 +406,16 @@ where self.mempool.unwatched_and_watched_count() } + /// Returns a set of future transactions for given block hash. + /// + /// Intended for logging / tests. + pub fn futures_at( + &self, + at: Block::Hash, + ) -> Option, ExtrinsicFor>>> { + self.view_store.futures_at(at) + } + /// Returns a best-effort set of ready transactions for a given block, without executing full /// maintain process. /// @@ -600,31 +650,33 @@ where let mempool_results = self.mempool.extend_unwatched(source, &xts); if view_store.is_empty() { - return Ok(mempool_results) + return Ok(mempool_results.into_iter().map(|r| r.map(|r| r.hash)).collect::>()) } let to_be_submitted = mempool_results .iter() .zip(xts) - .filter_map(|(result, xt)| result.as_ref().ok().map(|_| xt)) + .filter_map(|(result, xt)| { + result.as_ref().ok().map(|insertion| (insertion.source.clone(), xt)) + }) .collect::>(); self.metrics .report(|metrics| metrics.submitted_transactions.inc_by(to_be_submitted.len() as _)); let mempool = self.mempool.clone(); - let results_map = view_store.submit(source, to_be_submitted.into_iter()).await; + let results_map = view_store.submit(to_be_submitted.into_iter()).await; let mut submission_results = reduce_multiview_result(results_map).into_iter(); Ok(mempool_results .into_iter() .map(|result| { - result.and_then(|xt_hash| { + result.and_then(|insertion| { submission_results .next() .expect("The number of Ok results in mempool is exactly the same as the size of to-views-submission result. qed.") .inspect_err(|_| - mempool.remove(xt_hash) + mempool.remove(insertion.hash) ) }) }) @@ -660,19 +712,18 @@ where ) -> Result>>, Self::Error> { log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_and_watch views:{}", self.tx_hash(&xt), self.active_views_count()); let xt = Arc::from(xt); - let xt_hash = match self.mempool.push_watched(source, xt.clone()) { - Ok(xt_hash) => xt_hash, - Err(e) => return Err(e), - }; + let InsertionInfo { hash: xt_hash, source: timed_source } = + match self.mempool.push_watched(source, xt.clone()) { + Ok(result) => result, + Err(e) => return Err(e), + }; self.metrics.report(|metrics| metrics.submitted_transactions.inc()); - let view_store = self.view_store.clone(); - let mempool = self.mempool.clone(); - view_store - .submit_and_watch(at, source, xt) + self.view_store + .submit_and_watch(at, timed_source, xt) .await - .inspect_err(|_| mempool.remove(xt_hash)) + .inspect_err(|_| self.mempool.remove(xt_hash)) } /// Intended to remove transactions identified by the given hashes, and any dependent @@ -801,12 +852,12 @@ where ) -> Result { log::debug!(target: LOG_TARGET, "fatp::submit_local views:{}", self.active_views_count()); let xt = Arc::from(xt); - let result = self + let InsertionInfo { hash: xt_hash, .. } = self .mempool .extend_unwatched(TransactionSource::Local, &[xt.clone()]) .remove(0)?; - self.view_store.submit_local(xt).or_else(|_| Ok(result)) + self.view_store.submit_local(xt).or_else(|_| Ok(xt_hash)) } } @@ -914,6 +965,9 @@ where let start = Instant::now(); let watched_xts = self.register_listeners(&mut view).await; let duration = start.elapsed(); + // sync the transactions statuses and referencing views in all the listeners with newly + // cloned view. + view.pool.validated_pool().retrigger_notifications(); log::debug!(target: LOG_TARGET, "register_listeners: at {at:?} took {duration:?}"); // 2. Handle transactions from the tree route. Pruning transactions from the view first @@ -1041,58 +1095,35 @@ where self.active_views_count() ); let included_xts = self.extrinsics_included_since_finalized(view.at.hash).await; - let xts = self.mempool.clone_unwatched(); - - let mut all_submitted_count = 0; - if !xts.is_empty() { - let unwatched_count = xts.len(); - let mut buckets = HashMap::>>::default(); - xts.into_iter() - .filter(|(hash, _)| !view.pool.validated_pool().pool.read().is_imported(hash)) - .filter(|(hash, _)| !included_xts.contains(&hash)) - .map(|(_, tx)| (tx.source(), tx.tx())) - .for_each(|(source, tx)| buckets.entry(source).or_default().push(tx)); - - for (source, xts) in buckets { - all_submitted_count += xts.len(); - let _ = view.submit_many(source, xts).await; - } - log::debug!(target: LOG_TARGET, "update_view_with_mempool: at {:?} unwatched {}/{}", view.at.hash, all_submitted_count, unwatched_count); - } - - let watched_submitted_count = watched_xts.len(); - let mut buckets = HashMap::< - TransactionSource, - Vec<(ExtrinsicHash, ExtrinsicFor)>, - >::default(); - watched_xts + let (hashes, xts_filtered): (Vec<_>, Vec<_>) = watched_xts .into_iter() + .chain(self.mempool.clone_unwatched().into_iter()) + .filter(|(hash, _)| !view.is_imported(hash)) .filter(|(hash, _)| !included_xts.contains(&hash)) - .map(|(tx_hash, tx)| (tx.source(), tx_hash, tx.tx())) - .for_each(|(source, tx_hash, tx)| { - buckets.entry(source).or_default().push((tx_hash, tx)) - }); + .map(|(tx_hash, tx)| (tx_hash, (tx.source(), tx.tx()))) + .unzip(); - let mut watched_results = Vec::default(); - for (source, watched_xts) in buckets { - let hashes = watched_xts.iter().map(|i| i.0).collect::>(); - let results = view - .submit_many(source, watched_xts.into_iter().map(|i| i.1)) - .await - .into_iter() - .zip(hashes) - .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) - .collect::>(); - watched_results.extend(results); - } + let watched_results = view + .submit_many(xts_filtered) + .await + .into_iter() + .zip(hashes) + .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) + .collect::>(); + + let submitted_count = watched_results.len(); - log::debug!(target: LOG_TARGET, "update_view_with_mempool: at {:?} watched {}/{}", view.at.hash, watched_submitted_count, self.mempool_len().1); + log::debug!( + target: LOG_TARGET, + "update_view_with_mempool: at {:?} submitted {}/{}", + view.at.hash, + submitted_count, + self.mempool.len() + ); - all_submitted_count += watched_submitted_count; - let _ = all_submitted_count - .try_into() - .map(|v| self.metrics.report(|metrics| metrics.submitted_from_mempool_txs.inc_by(v))); + self.metrics + .report(|metrics| metrics.submitted_from_mempool_txs.inc_by(submitted_count as _)); // if there are no views yet, and a single newly created view is reporting error, just send // out the invalid event, and remove transaction. @@ -1176,7 +1207,14 @@ where }) .map(|(tx_hash, tx)| { //find arc if tx is known - self.mempool.get_by_hash(tx_hash).unwrap_or_else(|| Arc::from(tx)) + self.mempool + .get_by_hash(tx_hash) + .map(|tx| (tx.source(), tx.tx())) + .unwrap_or_else(|| { + // These transactions are coming from retracted blocks, we + // should simply consider them external. + (TimedTransactionSource::new_external(true), Arc::from(tx)) + }) }), ); @@ -1185,16 +1223,7 @@ where }); } - let _ = view - .pool - .resubmit_at( - &hash_and_number, - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TransactionSource::External, - resubmit_transactions, - ) - .await; + let _ = view.pool.resubmit_at(&hash_and_number, resubmit_transactions).await; } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs index 7fbdcade63b8..f9a41673bb8f 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs @@ -326,6 +326,7 @@ mod tests { let j0 = tokio::spawn(runnable); let stream = ctrl.event_stream(); + let stream2 = ctrl.event_stream(); let mut v1 = View::new(vec![(10, 1), (10, 2), (10, 3)]); let mut v2 = View::new(vec![(20, 1), (20, 2), (20, 6)]); @@ -342,20 +343,16 @@ mod tests { ctrl.add_view(1000, o1); ctrl.add_view(2000, o2); - let j4 = { - let ctrl = ctrl.clone(); - tokio::spawn(async move { - tokio::time::sleep(Duration::from_millis(70)).await; - ctrl.clean_notified_items(&vec![1, 3]); - ctrl.add_view(3000, o3.boxed()); - }) - }; + let out = stream.take(4).collect::>().await; + assert_eq!(out, vec![1, 2, 3, 6]); - let out = stream.take(6).collect::>().await; + ctrl.clean_notified_items(&vec![1, 3]); + ctrl.add_view(3000, o3.boxed()); + let out = stream2.take(6).collect::>().await; assert_eq!(out, vec![1, 2, 3, 6, 1, 3]); - drop(ctrl); - futures::future::join_all(vec![j0, j1, j2, j3, j4]).await; + drop(ctrl); + futures::future::join_all(vec![j0, j1, j2, j3]).await; } #[tokio::test] diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs index 8d0e69db2e9a..a00234a99808 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs @@ -36,6 +36,8 @@ use std::{ }; use tokio_stream::StreamMap; +use super::dropped_watcher::{DroppedReason, DroppedTransaction}; + /// A side channel allowing to control the external stream instance (one per transaction) with /// [`ControllerCommand`]. /// @@ -79,7 +81,7 @@ enum ControllerCommand { /// Notifies that a transaction was dropped from the pool. /// /// If all preconditions are met, an external dropped event will be sent out. - TransactionDropped, + TransactionDropped(DroppedReason>), } impl std::fmt::Debug for ControllerCommand @@ -99,8 +101,8 @@ where ControllerCommand::TransactionBroadcasted(_) => { write!(f, "ListenerAction::TransactionBroadcasted(...)") }, - ControllerCommand::TransactionDropped => { - write!(f, "ListenerAction::TransactionDropped") + ControllerCommand::TransactionDropped(r) => { + write!(f, "ListenerAction::TransactionDropped {r:?}") }, } } @@ -268,6 +270,7 @@ where /// stream map. fn remove_view(&mut self, block_hash: BlockHash) { self.status_stream_map.remove(&block_hash); + self.views_keeping_tx_valid.remove(&block_hash); trace!(target: LOG_TARGET, "[{:?}] RemoveView view: {:?} views:{:?}", self.tx_hash, block_hash, self.status_stream_map.keys().collect::>()); } } @@ -282,6 +285,11 @@ where Self { controllers: Default::default() } } + /// Returns `true` if the listener contains a stream controller for the specified hash. + pub fn contains_tx(&self, tx_hash: &ExtrinsicHash) -> bool { + self.controllers.read().contains_key(tx_hash) + } + /// Creates an external aggregated stream of events for given transaction. /// /// This method initializes an `ExternalWatcherContext` for the provided transaction hash, sets @@ -346,11 +354,16 @@ where log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Broadcasted", ctx.tx_hash); return Some((TransactionStatus::Broadcast(peers), ctx)) }, - ControllerCommand::TransactionDropped => { + ControllerCommand::TransactionDropped(DroppedReason::LimitsEnforced) => { log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Dropped", ctx.tx_hash); ctx.terminate = true; return Some((TransactionStatus::Dropped, ctx)) }, + ControllerCommand::TransactionDropped(DroppedReason::Usurped(by)) => { + log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Usurped({:?})", ctx.tx_hash, by); + ctx.terminate = true; + return Some((TransactionStatus::Usurped(by), ctx)) + }, } }, }; @@ -445,16 +458,15 @@ where /// /// This method sends a `TransactionDropped` command to the controller of each requested /// transaction prompting and external `Broadcasted` event. - pub(crate) fn transactions_dropped(&self, dropped: &[ExtrinsicHash]) { + pub(crate) fn transaction_dropped(&self, dropped: DroppedTransaction>) { let mut controllers = self.controllers.write(); - debug!(target: LOG_TARGET, "mvl::transactions_dropped: {:?}", dropped); - for tx_hash in dropped { - if let Some(tx) = controllers.remove(&tx_hash) { - debug!(target: LOG_TARGET, "[{:?}] transaction_dropped", tx_hash); - if let Err(e) = tx.unbounded_send(ControllerCommand::TransactionDropped) { - trace!(target: LOG_TARGET, "[{:?}] transactions_dropped: send message failed: {:?}", tx_hash, e); - }; - } + debug!(target: LOG_TARGET, "mvl::transaction_dropped: {:?}", dropped); + if let Some(tx) = controllers.remove(&dropped.tx_hash) { + let DroppedTransaction { tx_hash, reason } = dropped; + debug!(target: LOG_TARGET, "[{:?}] transaction_dropped", tx_hash); + if let Err(e) = tx.unbounded_send(ControllerCommand::TransactionDropped(reason)) { + trace!(target: LOG_TARGET, "[{:?}] transaction_dropped: send message failed: {:?}", tx_hash, e); + }; } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs index 9464ab3f5766..eb898c35a134 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs @@ -186,9 +186,9 @@ mod tests { use crate::{ common::tests::{uxt, TestApi}, fork_aware_txpool::view::FinishRevalidationLocalChannels, + TimedTransactionSource, }; use futures::executor::block_on; - use sc_transaction_pool_api::TransactionSource; use substrate_test_runtime::{AccountId, Transfer, H256}; use substrate_test_runtime_client::AccountKeyring::Alice; #[test] @@ -212,9 +212,10 @@ mod tests { nonce: 0, }); - let _ = block_on( - view.submit_many(TransactionSource::External, std::iter::once(uxt.clone().into())), - ); + let _ = block_on(view.submit_many(std::iter::once(( + TimedTransactionSource::new_external(false), + uxt.clone().into(), + )))); assert_eq!(api.validation_requests().len(), 1); let (finish_revalidation_request_tx, finish_revalidation_request_rx) = diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 86c07008c3f3..7b824d4653c2 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -30,7 +30,7 @@ use super::{metrics::MetricsLink as PrometheusMetrics, multi_view_listener::Mult use crate::{ common::log_xt::log_xt_trace, graph, - graph::{tracked_map::Size, ExtrinsicFor, ExtrinsicHash}, + graph::{base_pool::TimedTransactionSource, tracked_map::Size, ExtrinsicFor, ExtrinsicHash}, LOG_TARGET, }; use futures::FutureExt; @@ -74,7 +74,7 @@ where /// Size of the extrinsics actual body. bytes: usize, /// Transaction source. - source: TransactionSource, + source: TimedTransactionSource, /// When the transaction was revalidated, used to periodically revalidate the mem pool buffer. validated_at: AtomicU64, //todo: we need to add future / ready status at finalized block. @@ -95,18 +95,30 @@ where /// Shall the progress of transaction be watched. /// /// Was transaction sent with `submit_and_watch`. - fn is_watched(&self) -> bool { + pub(crate) fn is_watched(&self) -> bool { self.watched } /// Creates a new instance of wrapper for unwatched transaction. fn new_unwatched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { - Self { watched: false, tx, source, validated_at: AtomicU64::new(0), bytes } + Self { + watched: false, + tx, + source: TimedTransactionSource::from_transaction_source(source, true), + validated_at: AtomicU64::new(0), + bytes, + } } /// Creates a new instance of wrapper for watched transaction. fn new_watched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { - Self { watched: true, tx, source, validated_at: AtomicU64::new(0), bytes } + Self { + watched: true, + tx, + source: TimedTransactionSource::from_transaction_source(source, true), + validated_at: AtomicU64::new(0), + bytes, + } } /// Provides a clone of actual transaction body. @@ -117,8 +129,8 @@ where } /// Returns the source of the transaction. - pub(crate) fn source(&self) -> TransactionSource { - self.source + pub(crate) fn source(&self) -> TimedTransactionSource { + self.source.clone() } } @@ -174,6 +186,19 @@ where max_transactions_total_bytes: usize, } +/// Helper structure to encapsulate a result of [`TxMemPool::try_insert`]. +#[derive(Debug)] +pub(super) struct InsertionInfo { + pub(super) hash: Hash, + pub(super) source: TimedTransactionSource, +} + +impl InsertionInfo { + fn new(hash: Hash, source: TimedTransactionSource) -> Self { + Self { hash, source } + } +} + impl TxMemPool where Block: BlockT, @@ -220,8 +245,8 @@ where pub(super) fn get_by_hash( &self, hash: ExtrinsicHash, - ) -> Option> { - self.transactions.read().get(&hash).map(|t| t.tx()) + ) -> Option>> { + self.transactions.read().get(&hash).map(Clone::clone) } /// Returns a tuple with the count of unwatched and watched transactions in the memory pool. @@ -231,6 +256,11 @@ where (transactions.len() - watched_count, watched_count) } + /// Returns a total number of transactions kept within mempool. + pub fn len(&self) -> usize { + self.transactions.read().len() + } + /// Returns the number of bytes used by all extrinsics in the the pool. #[cfg(test)] pub fn bytes(&self) -> usize { @@ -249,7 +279,7 @@ where &self, hash: ExtrinsicHash, tx: TxInMemPool, - ) -> Result, ChainApi::Error> { + ) -> Result>, ChainApi::Error> { let bytes = self.transactions.bytes(); let mut transactions = self.transactions.write(); let result = match ( @@ -257,14 +287,15 @@ where transactions.contains_key(&hash), ) { (true, false) => { + let source = tx.source(); transactions.insert(hash, Arc::from(tx)); - Ok(hash) + Ok(InsertionInfo::new(hash, source)) }, (_, true) => Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash)).into()), (false, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped.into()), }; - log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result); + log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result.as_ref().map(|r| r.hash)); result } @@ -277,7 +308,7 @@ where &self, source: TransactionSource, xts: &[ExtrinsicFor], - ) -> Vec, ChainApi::Error>> { + ) -> Vec>, ChainApi::Error>> { let result = xts .iter() .map(|xt| { @@ -294,25 +325,18 @@ where &self, source: TransactionSource, xt: ExtrinsicFor, - ) -> Result, ChainApi::Error> { + ) -> Result>, ChainApi::Error> { let (hash, length) = self.api.hash_and_length(&xt); self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length)) } - /// Removes transactions from the memory pool which are specified by the given list of hashes - /// and send the `Dropped` event to the listeners of these transactions. - pub(super) async fn remove_dropped_transactions( + /// Removes transaction from the memory pool which are specified by the given list of hashes. + pub(super) async fn remove_dropped_transaction( &self, - to_be_removed: &[ExtrinsicHash], - ) { - log::debug!(target: LOG_TARGET, "remove_dropped_transactions count:{:?}", to_be_removed.len()); - log_xt_trace!(target: LOG_TARGET, to_be_removed, "[{:?}] mempool::remove_dropped_transactions"); - let mut transactions = self.transactions.write(); - to_be_removed.iter().for_each(|t| { - transactions.remove(t); - }); - - self.listener.transactions_dropped(to_be_removed); + dropped: &ExtrinsicHash, + ) -> Option>> { + log::debug!(target: LOG_TARGET, "[{:?}] mempool::remove_dropped_transaction", dropped); + self.transactions.write().remove(dropped) } /// Clones and returns a `HashMap` of references to all unwatched transactions in the memory @@ -369,13 +393,13 @@ where }; let validations_futures = input.into_iter().map(|(xt_hash, xt)| { - self.api.validate_transaction(finalized_block.hash, xt.source, xt.tx()).map( - move |validation_result| { + self.api + .validate_transaction(finalized_block.hash, xt.source.clone().into(), xt.tx()) + .map(move |validation_result| { xt.validated_at .store(finalized_block.number.into().as_u64(), atomic::Ordering::Relaxed); (xt_hash, validation_result) - }, - ) + }) }); let validation_results = futures::future::join_all(validations_futures).await; let input_len = validation_results.len(); @@ -403,7 +427,7 @@ where log::debug!( target: LOG_TARGET, - "mempool::revalidate: at {finalized_block:?} count:{input_len}/{count} purged:{} took {duration:?}", invalid_hashes.len(), + "mempool::revalidate: at {finalized_block:?} count:{input_len}/{count} invalid_hashes:{} took {duration:?}", invalid_hashes.len(), ); invalid_hashes diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 99095d88cb0a..3cbb8fa4871d 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -27,13 +27,13 @@ use super::metrics::MetricsLink as PrometheusMetrics; use crate::{ common::log_xt::log_xt_trace, graph::{ - self, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, IsValidator, ValidatedTransaction, - ValidatedTransactionFor, + self, base_pool::TimedTransactionSource, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, + IsValidator, ValidatedTransaction, ValidatedTransactionFor, }, LOG_TARGET, }; use parking_lot::Mutex; -use sc_transaction_pool_api::{error::Error as TxPoolError, PoolStatus, TransactionSource}; +use sc_transaction_pool_api::{error::Error as TxPoolError, PoolStatus}; use sp_blockchain::HashAndNumber; use sp_runtime::{ generic::BlockId, traits::Block as BlockT, transaction_validity::TransactionValidityError, @@ -157,22 +157,21 @@ where /// Imports many unvalidated extrinsics into the view. pub(super) async fn submit_many( &self, - source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator)>, ) -> Vec, ChainApi::Error>> { if log::log_enabled!(target: LOG_TARGET, log::Level::Trace) { let xts = xts.into_iter().collect::>(); - log_xt_trace!(target: LOG_TARGET, xts.iter().map(|xt| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); - self.pool.submit_at(&self.at, source, xts).await + log_xt_trace!(target: LOG_TARGET, xts.iter().map(|(_,xt)| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); + self.pool.submit_at(&self.at, xts).await } else { - self.pool.submit_at(&self.at, source, xts).await + self.pool.submit_at(&self.at, xts).await } } /// Import a single extrinsic and starts to watch its progress in the view. pub(super) async fn submit_and_watch( &self, - source: TransactionSource, + source: TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, ExtrinsicHash>, ChainApi::Error> { log::trace!(target: LOG_TARGET, "[{:?}] view::submit_and_watch at:{}", self.pool.validated_pool().api().hash_and_length(&xt).0, self.at.hash); @@ -193,7 +192,7 @@ where .api() .validate_transaction_blocking( self.at.hash, - TransactionSource::Local, + sc_transaction_pool_api::TransactionSource::Local, Arc::from(xt.clone()), )? .map_err(|e| { @@ -214,7 +213,7 @@ where let validated = ValidatedTransaction::valid_at( block_number.saturated_into::(), hash, - TransactionSource::Local, + TimedTransactionSource::new_local(true), Arc::from(xt), length, validity, @@ -285,7 +284,7 @@ where } _ = async { if let Some(tx) = batch_iter.next() { - let validation_result = (api.validate_transaction(self.at.hash, tx.source, tx.data.clone()).await, tx.hash, tx); + let validation_result = (api.validate_transaction(self.at.hash, tx.source.clone().into(), tx.data.clone()).await, tx.hash, tx); validation_results.push(validation_result); } else { self.revalidation_worker_channels.lock().as_mut().map(|ch| ch.remove_sender()); @@ -324,7 +323,7 @@ where ValidatedTransaction::valid_at( self.at.number.saturated_into::(), tx_hash, - tx.source, + tx.source.clone(), tx.data.clone(), api.hash_and_length(&tx.data).1, validity, @@ -455,4 +454,10 @@ where ); } } + + /// Returns true if the transaction with given hash is already imported into the view. + pub(super) fn is_imported(&self, tx_hash: &ExtrinsicHash) -> bool { + const IGNORE_BANNED: bool = false; + self.pool.validated_pool().check_is_known(tx_hash, IGNORE_BANNED).is_err() + } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index f23dcedd5bfd..a06c051f0a7e 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -24,17 +24,51 @@ use super::{ }; use crate::{ fork_aware_txpool::dropped_watcher::MultiViewDroppedWatcherController, - graph, - graph::{base_pool::Transaction, ExtrinsicFor, ExtrinsicHash, TransactionFor}, + graph::{ + self, + base_pool::{TimedTransactionSource, Transaction}, + ExtrinsicFor, ExtrinsicHash, TransactionFor, + }, ReadyIteratorFor, LOG_TARGET, }; use futures::prelude::*; use itertools::Itertools; use parking_lot::RwLock; -use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus, TransactionSource}; +use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus}; use sp_blockchain::TreeRoute; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use std::{collections::HashMap, sync::Arc, time::Instant}; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, + time::Instant, +}; + +/// Helper struct to keep the context for transaction replacements. +#[derive(Clone)] +struct PendingTxReplacement +where + ChainApi: graph::ChainApi, +{ + /// Indicates if the new transaction was already submitted to all the views in the view_store. + /// If true, it can be removed after inserting any new view. + processed: bool, + /// New transaction replacing the old one. + xt: ExtrinsicFor, + /// Source of the transaction. + source: TimedTransactionSource, + /// Inidicates if transaction is watched. + watched: bool, +} + +impl PendingTxReplacement +where + ChainApi: graph::ChainApi, +{ + /// Creates new unprocessed instance of pending transaction replacement. + fn new(xt: ExtrinsicFor, source: TimedTransactionSource, watched: bool) -> Self { + Self { processed: false, xt, source, watched } + } +} /// The helper structure encapsulates all the views. pub(super) struct ViewStore @@ -62,6 +96,13 @@ where pub(super) most_recent_view: RwLock>, /// The controller of multi view dropped stream. pub(super) dropped_stream_controller: MultiViewDroppedWatcherController, + /// The map used to synchronize replacement of transactions between maintain and dropped + /// notifcication threads. It is meant to assure that replaced transaction is also removed from + /// newly built views in maintain process. + /// + /// The map's key is hash of replaced extrinsic. + pending_txs_replacements: + RwLock, PendingTxReplacement>>, } impl ViewStore @@ -83,14 +124,14 @@ where listener, most_recent_view: RwLock::from(None), dropped_stream_controller, + pending_txs_replacements: Default::default(), } } /// Imports a bunch of unverified extrinsics to every active view. pub(super) async fn submit( &self, - source: TransactionSource, - xts: impl IntoIterator> + Clone, + xts: impl IntoIterator)> + Clone, ) -> HashMap, ChainApi::Error>>> { let submit_futures = { let active_views = self.active_views.read(); @@ -99,7 +140,7 @@ where .map(|(_, view)| { let view = view.clone(); let xts = xts.clone(); - async move { (view.at.hash, view.submit_many(source, xts).await) } + async move { (view.at.hash, view.submit_many(xts).await) } }) .collect::>() }; @@ -145,7 +186,7 @@ where pub(super) async fn submit_and_watch( &self, _at: Block::Hash, - source: TransactionSource, + source: TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, ChainApi::Error> { let tx_hash = self.api.hash_and_length(&xt).0; @@ -159,6 +200,7 @@ where .map(|(_, view)| { let view = view.clone(); let xt = xt.clone(); + let source = source.clone(); async move { match view.submit_and_watch(source, xt).await { Ok(watcher) => { @@ -261,12 +303,20 @@ where ) -> Vec, ExtrinsicFor>> { self.most_recent_view .read() - .map(|at| self.get_view_at(at, true)) + .map(|at| self.futures_at(at)) .flatten() - .map(|(v, _)| v.pool.validated_pool().pool.read().futures().cloned().collect()) .unwrap_or_default() } + /// Returns a list of future transactions in the view at given block hash. + pub(super) fn futures_at( + &self, + at: Block::Hash, + ) -> Option, ExtrinsicFor>>> { + self.get_view_at(at, true) + .map(|(v, _)| v.pool.validated_pool().pool.read().futures().cloned().collect()) + } + /// Collects all the transactions included in the blocks on the provided `tree_route` and /// triggers finalization event for them. /// @@ -329,12 +379,16 @@ where /// - moved to the inactive views set (`inactive_views`), /// - removed from the multi view listeners. /// - /// The `most_recent_view` is update with the reference to the newly inserted view. + /// The `most_recent_view` is updated with the reference to the newly inserted view. + /// + /// If there are any pending tx replacments, they are applied to the new view. pub(super) async fn insert_new_view( &self, view: Arc>, tree_route: &TreeRoute, ) { + self.apply_pending_tx_replacements(view.clone()).await; + //note: most_recent_view must be synced with changes in in/active_views. { let mut most_recent_view_lock = self.most_recent_view.write(); @@ -386,8 +440,10 @@ where let mut removed_views = vec![]; { - self.active_views - .read() + let active_views = self.active_views.read(); + let inactive_views = self.inactive_views.read(); + + active_views .iter() .filter(|(hash, v)| !match finalized_number { Err(_) | Ok(None) => **hash == finalized_hash, @@ -396,11 +452,8 @@ where }) .map(|(_, v)| removed_views.push(v.at.hash)) .for_each(drop); - } - { - self.inactive_views - .read() + inactive_views .iter() .filter(|(_, v)| !match finalized_number { Err(_) | Ok(None) => false, @@ -438,30 +491,48 @@ where let finalized_xts = self.finalize_route(finalized_hash, tree_route).await; let finalized_number = self.api.block_id_to_number(&BlockId::Hash(finalized_hash)); + let mut dropped_views = vec![]; //clean up older then finalized { let mut active_views = self.active_views.write(); - active_views.retain(|hash, v| match finalized_number { - Err(_) | Ok(None) => *hash == finalized_hash, - Ok(Some(n)) if v.at.number == n => *hash == finalized_hash, - Ok(Some(n)) => v.at.number > n, + let mut inactive_views = self.inactive_views.write(); + active_views.retain(|hash, v| { + let retain = match finalized_number { + Err(_) | Ok(None) => *hash == finalized_hash, + Ok(Some(n)) if v.at.number == n => *hash == finalized_hash, + Ok(Some(n)) => v.at.number > n, + }; + if !retain { + dropped_views.push(*hash); + } + retain }); - } - { - let mut inactive_views = self.inactive_views.write(); - inactive_views.retain(|_, v| match finalized_number { - Err(_) | Ok(None) => false, - Ok(Some(n)) => v.at.number >= n, + inactive_views.retain(|hash, v| { + let retain = match finalized_number { + Err(_) | Ok(None) => false, + Ok(Some(n)) => v.at.number >= n, + }; + if !retain { + dropped_views.push(*hash); + } + retain }); log::trace!(target:LOG_TARGET,"handle_finalized: inactive_views: {:?}", inactive_views.keys()); } - self.listener.remove_view(finalized_hash); + log::trace!(target:LOG_TARGET,"handle_finalized: dropped_views: {:?}", dropped_views); + self.listener.remove_stale_controllers(); self.dropped_stream_controller.remove_finalized_txs(finalized_xts.clone()); + self.listener.remove_view(finalized_hash); + for view in dropped_views { + self.listener.remove_view(view); + self.dropped_stream_controller.remove_view(view); + } + finalized_xts } @@ -484,4 +555,139 @@ where futures::future::join_all(finish_revalidation_futures).await; log::trace!(target:LOG_TARGET,"finish_background_revalidations took {:?}", start.elapsed()); } + + /// Replaces an existing transaction in the view_store with a new one. + /// + /// Attempts to replace a transaction identified by `replaced` with a new transaction `xt`. + /// + /// Before submitting a transaction to the views, the new *unprocessed* transaction replacement + /// record will be inserted into a pending replacement map. Once the submission to all the views + /// is accomplished, the record is marked as *processed*. + /// + /// This map is later applied in `insert_new_view` method executed from different thread. + /// + /// If the transaction is already being replaced, it will simply return without making + /// changes. + pub(super) async fn replace_transaction( + &self, + source: TimedTransactionSource, + xt: ExtrinsicFor, + replaced: ExtrinsicHash, + watched: bool, + ) { + if let Entry::Vacant(entry) = self.pending_txs_replacements.write().entry(replaced) { + entry.insert(PendingTxReplacement::new(xt.clone(), source.clone(), watched)); + } else { + return + }; + + let xt_hash = self.api.hash_and_length(&xt).0; + log::trace!(target:LOG_TARGET,"[{replaced:?}] replace_transaction wtih {xt_hash:?}, w:{watched}"); + + self.replace_transaction_in_views(source, xt, xt_hash, replaced, watched).await; + + if let Some(replacement) = self.pending_txs_replacements.write().get_mut(&replaced) { + replacement.processed = true; + } + } + + /// Applies pending transaction replacements to the specified view. + /// + /// After application, all already processed replacements are removed. + async fn apply_pending_tx_replacements(&self, view: Arc>) { + let mut futures = vec![]; + for replacement in self.pending_txs_replacements.read().values() { + let xt_hash = self.api.hash_and_length(&replacement.xt).0; + futures.push(self.replace_transaction_in_view( + view.clone(), + replacement.source.clone(), + replacement.xt.clone(), + xt_hash, + replacement.watched, + )); + } + let _results = futures::future::join_all(futures).await; + self.pending_txs_replacements.write().retain(|_, r| r.processed); + } + + /// Submits `xt` to the given view. + /// + /// For watched transaction stream is added to the listener. + async fn replace_transaction_in_view( + &self, + view: Arc>, + source: TimedTransactionSource, + xt: ExtrinsicFor, + xt_hash: ExtrinsicHash, + watched: bool, + ) { + if watched { + match view.submit_and_watch(source, xt).await { + Ok(watcher) => { + self.listener.add_view_watcher_for_tx( + xt_hash, + view.at.hash, + watcher.into_stream().boxed(), + ); + }, + Err(e) => { + log::trace!( + target:LOG_TARGET, + "[{:?}] replace_transaction: submit_and_watch to {} failed {}", + xt_hash, view.at.hash, e + ); + }, + } + } else { + if let Some(Err(e)) = view.submit_many(std::iter::once((source, xt))).await.pop() { + log::trace!( + target:LOG_TARGET, + "[{:?}] replace_transaction: submit to {} failed {}", + xt_hash, view.at.hash, e + ); + } + } + } + + /// Sends `xt` to every view (both active and inactive) containing `replaced` extrinsics. + /// + /// It is assumed that transaction is already known by the pool. Intended to ba called when `xt` + /// is replacing `replaced` extrinsic. + async fn replace_transaction_in_views( + &self, + source: TimedTransactionSource, + xt: ExtrinsicFor, + xt_hash: ExtrinsicHash, + replaced: ExtrinsicHash, + watched: bool, + ) { + if watched && !self.listener.contains_tx(&xt_hash) { + log::trace!( + target:LOG_TARGET, + "error: replace_transaction_in_views: no listener for watched transaction {:?}", + xt_hash, + ); + return; + } + + let submit_futures = { + let active_views = self.active_views.read(); + let inactive_views = self.inactive_views.read(); + active_views + .iter() + .chain(inactive_views.iter()) + .filter(|(_, view)| view.is_imported(&replaced)) + .map(|(_, view)| { + self.replace_transaction_in_view( + view.clone(), + source.clone(), + xt.clone(), + xt_hash, + watched, + ) + }) + .collect::>() + }; + let _results = futures::future::join_all(submit_futures).await; + } } diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs index e4c3a6c425a9..04eaa998f42e 100644 --- a/substrate/client/transaction-pool/src/graph/base_pool.rs +++ b/substrate/client/transaction-pool/src/graph/base_pool.rs @@ -20,7 +20,7 @@ //! //! For a more full-featured pool, have a look at the `pool` module. -use std::{cmp::Ordering, collections::HashSet, fmt, hash, sync::Arc}; +use std::{cmp::Ordering, collections::HashSet, fmt, hash, sync::Arc, time::Instant}; use crate::LOG_TARGET; use log::{trace, warn}; @@ -30,8 +30,8 @@ use sp_core::hexdisplay::HexDisplay; use sp_runtime::{ traits::Member, transaction_validity::{ - TransactionLongevity as Longevity, TransactionPriority as Priority, - TransactionSource as Source, TransactionTag as Tag, + TransactionLongevity as Longevity, TransactionPriority as Priority, TransactionSource, + TransactionTag as Tag, }, }; @@ -83,6 +83,44 @@ pub struct PruneStatus { pub pruned: Vec>>, } +/// A transaction source that includes a timestamp indicating when the transaction was submitted. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TimedTransactionSource { + /// The original source of the transaction. + pub source: TransactionSource, + + /// The time at which the transaction was submitted. + pub timestamp: Option, +} + +impl From for TransactionSource { + fn from(value: TimedTransactionSource) -> Self { + value.source + } +} + +impl TimedTransactionSource { + /// Creates a new instance with an internal `TransactionSource::InBlock` source and an optional + /// timestamp. + pub fn new_in_block(with_timestamp: bool) -> Self { + Self { source: TransactionSource::InBlock, timestamp: with_timestamp.then(Instant::now) } + } + /// Creates a new instance with an internal `TransactionSource::External` source and an optional + /// timestamp. + pub fn new_external(with_timestamp: bool) -> Self { + Self { source: TransactionSource::External, timestamp: with_timestamp.then(Instant::now) } + } + /// Creates a new instance with an internal `TransactionSource::Local` source and an optional + /// timestamp. + pub fn new_local(with_timestamp: bool) -> Self { + Self { source: TransactionSource::Local, timestamp: with_timestamp.then(Instant::now) } + } + /// Creates a new instance with an given source and an optional timestamp. + pub fn from_transaction_source(source: TransactionSource, with_timestamp: bool) -> Self { + Self { source, timestamp: with_timestamp.then(Instant::now) } + } +} + /// Immutable transaction #[derive(PartialEq, Eq, Clone)] pub struct Transaction { @@ -102,8 +140,8 @@ pub struct Transaction { pub provides: Vec, /// Should that transaction be propagated. pub propagate: bool, - /// Source of that transaction. - pub source: Source, + /// Timed source of that transaction. + pub source: TimedTransactionSource, } impl AsRef for Transaction { @@ -157,7 +195,7 @@ impl Transaction { bytes: self.bytes, hash: self.hash.clone(), priority: self.priority, - source: self.source, + source: self.source.clone(), valid_till: self.valid_till, requires: self.requires.clone(), provides: self.provides.clone(), @@ -322,22 +360,36 @@ impl BasePool { if !first { - promoted.push(current_hash); + promoted.push(current_hash.clone()); } + // If there were conflicting future transactions promoted, removed them from + // promoted set. + promoted.retain(|hash| replaced.iter().all(|tx| *hash != tx.hash)); // The transactions were removed from the ready pool. We might attempt to // re-import them. removed.append(&mut replaced); }, + Err(e @ error::Error::TooLowPriority { .. }) => + if first { + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); + return Err(e) + } else { + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); + removed.push(current_tx); + promoted.retain(|hash| *hash != current_hash); + }, // transaction failed to be imported. Err(e) => if first { - trace!(target: LOG_TARGET, "[{:?}] Error importing: {:?}", current_hash, e); + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); return Err(e) } else { - failed.push(current_hash); + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); + failed.push(current_tx.hash.clone()); }, } first = false; @@ -434,8 +486,24 @@ impl BasePool Some(current.clone()), - Some(ref tx) if tx.imported_at > current.imported_at => Some(current.clone()), - other => other, + Some(worst) => Some( + match (worst.transaction.source.timestamp, current.transaction.source.timestamp) + { + (Some(worst_timestamp), Some(current_timestamp)) => { + if worst_timestamp > current_timestamp { + current.clone() + } else { + worst + } + }, + _ => + if worst.imported_at > current.imported_at { + current.clone() + } else { + worst + }, + }, + ), }); if let Some(worst) = worst { @@ -562,7 +630,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: Source::External, + source: TimedTransactionSource::new_external(false), } } @@ -760,6 +828,58 @@ mod tests { ); } + #[test] + fn should_remove_conflicting_future() { + let mut pool = pool(); + pool.import(Transaction { + data: vec![3u8].into(), + hash: 3, + requires: vec![vec![1]], + priority: 50u64, + provides: vec![vec![3]], + ..default_tx().clone() + }) + .unwrap(); + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.ready.len(), 0); + + let tx2 = Transaction { + data: vec![2u8].into(), + hash: 2, + requires: vec![vec![1]], + provides: vec![vec![3]], + ..default_tx().clone() + }; + pool.import(tx2.clone()).unwrap(); + assert_eq!(pool.future.len(), 2); + + let res = pool + .import(Transaction { + data: vec![1u8].into(), + hash: 1, + provides: vec![vec![1]], + ..default_tx().clone() + }) + .unwrap(); + + assert_eq!( + res, + Imported::Ready { + hash: 1, + promoted: vec![3], + failed: vec![], + removed: vec![tx2.into()] + } + ); + + let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); + assert_eq!(it.next(), Some(1)); + assert_eq!(it.next(), Some(3)); + assert_eq!(it.next(), None); + + assert_eq!(pool.future.len(), 0); + } + #[test] fn should_handle_a_cycle() { // given @@ -783,14 +903,14 @@ mod tests { assert_eq!(pool.ready.len(), 0); // when - pool.import(Transaction { + let tx2 = Transaction { data: vec![2u8].into(), hash: 2, requires: vec![vec![2]], provides: vec![vec![0]], ..default_tx().clone() - }) - .unwrap(); + }; + pool.import(tx2.clone()).unwrap(); // then { @@ -817,7 +937,12 @@ mod tests { assert_eq!(it.next(), None); assert_eq!( res, - Imported::Ready { hash: 4, promoted: vec![1, 3], failed: vec![2], removed: vec![] } + Imported::Ready { + hash: 4, + promoted: vec![1, 3], + failed: vec![], + removed: vec![tx2.into()] + } ); assert_eq!(pool.future.len(), 0); } @@ -1024,7 +1149,7 @@ mod tests { ), "Transaction { \ hash: 4, priority: 1000, valid_till: 64, bytes: 1, propagate: true, \ -source: TransactionSource::External, requires: [03, 02], provides: [04], data: [4]}" +source: TimedTransactionSource { source: TransactionSource::External, timestamp: None }, requires: [03, 02], provides: [04], data: [4]}" .to_owned() ); } diff --git a/substrate/client/transaction-pool/src/graph/listener.rs b/substrate/client/transaction-pool/src/graph/listener.rs index a5593920eec4..41daf5491f70 100644 --- a/substrate/client/transaction-pool/src/graph/listener.rs +++ b/substrate/client/transaction-pool/src/graph/listener.rs @@ -36,6 +36,7 @@ pub type DroppedByLimitsStream = TracingUnboundedReceiver { + /// Map containing per-transaction sinks for emitting transaction status events. watchers: HashMap>>, finality_watchers: LinkedHashMap, Vec>, @@ -119,32 +120,44 @@ impl Listener, limits_enforced: bool) { + /// Transaction was dropped from the pool because of enforcing the limit. + pub fn limit_enforced(&mut self, tx: &H) { + trace!(target: LOG_TARGET, "[{:?}] Dropped (limit enforced)", tx); + self.fire(tx, |watcher| watcher.limit_enforced()); + + if let Some(ref sink) = self.dropped_by_limits_sink { + if let Err(e) = sink.unbounded_send((tx.clone(), TransactionStatus::Dropped)) { + trace!(target: LOG_TARGET, "[{:?}] dropped_sink: send message failed: {:?}", tx, e); + } + } + } + + /// Transaction was replaced with other extrinsic. + pub fn usurped(&mut self, tx: &H, by: &H) { trace!(target: LOG_TARGET, "[{:?}] Dropped (replaced with {:?})", tx, by); - self.fire(tx, |watcher| match by { - Some(t) => watcher.usurped(t.clone()), - None => watcher.dropped(), - }); - - //note: LimitEnforced could be introduced as new status to get rid of this flag. - if limits_enforced { - if let Some(ref sink) = self.dropped_by_limits_sink { - if let Err(e) = sink.unbounded_send((tx.clone(), TransactionStatus::Dropped)) { - trace!(target: LOG_TARGET, "[{:?}] dropped_sink/future: send message failed: {:?}", tx, e); - } + self.fire(tx, |watcher| watcher.usurped(by.clone())); + + if let Some(ref sink) = self.dropped_by_limits_sink { + if let Err(e) = + sink.unbounded_send((tx.clone(), TransactionStatus::Usurped(by.clone()))) + { + trace!(target: LOG_TARGET, "[{:?}] dropped_sink: send message failed: {:?}", tx, e); } } } + /// Transaction was dropped from the pool because of the failure during the resubmission of + /// revalidate transactions or failure during pruning tags. + pub fn dropped(&mut self, tx: &H) { + trace!(target: LOG_TARGET, "[{:?}] Dropped", tx); + self.fire(tx, |watcher| watcher.dropped()); + } + /// Transaction was removed as invalid. pub fn invalid(&mut self, tx: &H) { trace!(target: LOG_TARGET, "[{:?}] Extrinsic invalid", tx); diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index 2dd8de352c6b..23b71ce437b3 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -181,10 +181,8 @@ impl Pool { pub async fn submit_at( &self, at: &HashAndNumber, - source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator)>, ) -> Vec, B::Error>> { - let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -195,10 +193,8 @@ impl Pool { pub async fn resubmit_at( &self, at: &HashAndNumber, - source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator)>, ) -> Vec, B::Error>> { - let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -207,10 +203,10 @@ impl Pool { pub async fn submit_one( &self, at: &HashAndNumber, - source: TransactionSource, + source: base::TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, B::Error> { - let res = self.submit_at(at, source, std::iter::once(xt)).await.pop(); + let res = self.submit_at(at, std::iter::once((source, xt))).await.pop(); res.expect("One extrinsic passed; one result returned; qed") } @@ -218,7 +214,7 @@ impl Pool { pub async fn submit_and_watch( &self, at: &HashAndNumber, - source: TransactionSource, + source: base::TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, ExtrinsicHash>, B::Error> { let (_, tx) = self @@ -368,7 +364,7 @@ impl Pool { // Try to re-validate pruned transactions since some of them might be still valid. // note that `known_imported_hashes` will be rejected here due to temporary ban. let pruned_transactions = - prune_status.pruned.into_iter().map(|tx| (tx.source, tx.data.clone())); + prune_status.pruned.into_iter().map(|tx| (tx.source.clone(), tx.data.clone())); let reverified_transactions = self.verify(at, pruned_transactions, CheckBannedBeforeVerify::Yes).await; @@ -396,7 +392,7 @@ impl Pool { async fn verify( &self, at: &HashAndNumber, - xts: impl IntoIterator)>, + xts: impl IntoIterator)>, check: CheckBannedBeforeVerify, ) -> IndexMap, ValidatedTransactionFor> { let HashAndNumber { number, hash } = *at; @@ -417,7 +413,7 @@ impl Pool { &self, block_hash: ::Hash, block_number: NumberFor, - source: TransactionSource, + source: base::TimedTransactionSource, xt: ExtrinsicFor, check: CheckBannedBeforeVerify, ) -> (ExtrinsicHash, ValidatedTransactionFor) { @@ -431,7 +427,7 @@ impl Pool { let validation_result = self .validated_pool .api() - .validate_transaction(block_hash, source, xt.clone()) + .validate_transaction(block_hash, source.clone().into(), xt.clone()) .await; let status = match validation_result { @@ -488,6 +484,7 @@ mod tests { use super::{super::base_pool::Limit, *}; use crate::common::tests::{pool, uxt, TestApi, INVALID_NONCE}; use assert_matches::assert_matches; + use base::TimedTransactionSource; use codec::Encode; use futures::executor::block_on; use parking_lot::Mutex; @@ -497,7 +494,8 @@ mod tests { use substrate_test_runtime::{AccountId, ExtrinsicBuilder, Transfer, H256}; use substrate_test_runtime_client::AccountKeyring::{Alice, Bob}; - const SOURCE: TransactionSource = TransactionSource::External; + const SOURCE: TimedTransactionSource = + TimedTransactionSource { source: TransactionSource::External, timestamp: None }; #[test] fn should_validate_and_import_transaction() { @@ -545,8 +543,8 @@ mod tests { let initial_hashes = txs.iter().map(|t| api.hash_and_length(t).0).collect::>(); // when - let txs = txs.into_iter().map(|x| Arc::from(x)).collect::>(); - let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), SOURCE, txs)); + let txs = txs.into_iter().map(|x| (SOURCE, Arc::from(x))).collect::>(); + let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs)); log::debug!("--> {hashes:#?}"); // then diff --git a/substrate/client/transaction-pool/src/graph/ready.rs b/substrate/client/transaction-pool/src/graph/ready.rs index 860bcff0bace..9061d0e25581 100644 --- a/substrate/client/transaction-pool/src/graph/ready.rs +++ b/substrate/client/transaction-pool/src/graph/ready.rs @@ -589,7 +589,6 @@ fn remove_item(vec: &mut Vec, item: &T) { #[cfg(test)] mod tests { use super::*; - use sp_runtime::transaction_validity::TransactionSource as Source; fn tx(id: u8) -> Transaction> { Transaction { @@ -601,7 +600,7 @@ mod tests { requires: vec![vec![1], vec![2]], provides: vec![vec![3], vec![4]], propagate: true, - source: Source::External, + source: crate::TimedTransactionSource::new_external(false), } } @@ -711,7 +710,7 @@ mod tests { requires: vec![tx1.provides[0].clone()], provides: vec![], propagate: true, - source: Source::External, + source: crate::TimedTransactionSource::new_external(false), }; // when diff --git a/substrate/client/transaction-pool/src/graph/rotator.rs b/substrate/client/transaction-pool/src/graph/rotator.rs index 61a26fb4138c..9a2e269b5eed 100644 --- a/substrate/client/transaction-pool/src/graph/rotator.rs +++ b/substrate/client/transaction-pool/src/graph/rotator.rs @@ -106,7 +106,6 @@ impl PoolRotator { #[cfg(test)] mod tests { use super::*; - use sp_runtime::transaction_validity::TransactionSource; type Hash = u64; type Ex = (); @@ -126,7 +125,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: TransactionSource::External, + source: crate::TimedTransactionSource::new_external(false), }; (hash, tx) @@ -192,7 +191,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: TransactionSource::External, + source: crate::TimedTransactionSource::new_external(false), } } diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index d7f55198a40a..14df63d9673e 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -30,7 +30,7 @@ use serde::Serialize; use sp_blockchain::HashAndNumber; use sp_runtime::{ traits::{self, SaturatedConversion}, - transaction_validity::{TransactionSource, TransactionTag as Tag, ValidTransaction}, + transaction_validity::{TransactionTag as Tag, ValidTransaction}, }; use std::time::Instant; @@ -62,7 +62,7 @@ impl ValidatedTransaction { pub fn valid_at( at: u64, hash: Hash, - source: TransactionSource, + source: base::TimedTransactionSource, data: Ex, bytes: usize, validity: ValidTransaction, @@ -280,7 +280,7 @@ impl ValidatedPool { // run notifications let mut listener = self.listener.write(); for h in &removed { - listener.dropped(h, None, true); + listener.limit_enforced(h); } removed @@ -453,7 +453,7 @@ impl ValidatedPool { match final_status { Status::Future => listener.future(&hash), Status::Ready => listener.ready(&hash, None), - Status::Dropped => listener.dropped(&hash, None, false), + Status::Dropped => listener.dropped(&hash), Status::Failed => listener.invalid(&hash), } } @@ -492,7 +492,7 @@ impl ValidatedPool { fire_events(&mut *listener, promoted); } for f in &status.failed { - listener.dropped(f, None, false); + listener.dropped(f); } } @@ -671,6 +671,21 @@ impl ValidatedPool { ) -> super::listener::DroppedByLimitsStream, BlockHash> { self.listener.write().create_dropped_by_limits_stream() } + + /// Resends ready and future events for all the ready and future transactions that are already + /// in the pool. + /// + /// Intended to be called after cloning the instance of `ValidatedPool`. + pub fn retrigger_notifications(&self) { + let pool = self.pool.read(); + let mut listener = self.listener.write(); + pool.ready().for_each(|r| { + listener.ready(&r.hash, None); + }); + pool.futures().for_each(|f| { + listener.future(&f.hash); + }); + } } fn fire_events(listener: &mut Listener, imported: &base::Imported) @@ -682,7 +697,7 @@ where base::Imported::Ready { ref promoted, ref failed, ref removed, ref hash } => { listener.ready(hash, None); failed.iter().for_each(|f| listener.invalid(f)); - removed.iter().for_each(|r| listener.dropped(&r.hash, Some(hash), false)); + removed.iter().for_each(|r| listener.usurped(&r.hash, hash)); promoted.iter().for_each(|p| listener.ready(p, None)); }, base::Imported::Future { ref hash } => listener.future(hash), diff --git a/substrate/client/transaction-pool/src/graph/watcher.rs b/substrate/client/transaction-pool/src/graph/watcher.rs index fb7cf99d4dc6..2fd31e772fd8 100644 --- a/substrate/client/transaction-pool/src/graph/watcher.rs +++ b/substrate/client/transaction-pool/src/graph/watcher.rs @@ -113,6 +113,12 @@ impl Sender { } /// Transaction has been dropped from the pool because of the limit. + pub fn limit_enforced(&mut self) { + self.send(TransactionStatus::Dropped); + self.is_finalized = true; + } + + /// Transaction has been dropped from the pool. pub fn dropped(&mut self) { self.send(TransactionStatus::Dropped); self.is_finalized = true; diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index 3d3d596c291f..366d91a973d2 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -36,7 +36,10 @@ pub use api::FullChainApi; pub use builder::{Builder, TransactionPoolHandle, TransactionPoolOptions, TransactionPoolType}; pub use common::notification_future; pub use fork_aware_txpool::{ForkAwareTxPool, ForkAwareTxPoolTask}; -pub use graph::{base_pool::Limit as PoolLimit, ChainApi, Options, Pool}; +pub use graph::{ + base_pool::{Limit as PoolLimit, TimedTransactionSource}, + ChainApi, Options, Pool, +}; use single_state_txpool::prune_known_txs_for_block; pub use single_state_txpool::{BasicPool, RevalidationType}; pub use transaction_pool_wrapper::TransactionPoolWrapper; diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs index 5ef726c9f7d3..74031b1e1c72 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs @@ -88,7 +88,7 @@ async fn batch_revalidate( let validation_results = futures::future::join_all(batch.into_iter().filter_map(|ext_hash| { pool.validated_pool().ready_by_hash(&ext_hash).map(|ext| { - api.validate_transaction(at, ext.source, ext.data.clone()) + api.validate_transaction(at, ext.source.clone().into(), ext.data.clone()) .map(move |validation_result| (validation_result, ext_hash, ext)) }) })) @@ -121,7 +121,7 @@ async fn batch_revalidate( ValidatedTransaction::valid_at( block_number.saturated_into::(), ext_hash, - ext.source, + ext.source.clone(), ext.data.clone(), api.hash_and_length(&ext.data).1, validity, @@ -375,9 +375,9 @@ mod tests { use crate::{ common::tests::{uxt, TestApi}, graph::Pool, + TimedTransactionSource, }; use futures::executor::block_on; - use sc_transaction_pool_api::TransactionSource; use substrate_test_runtime::{AccountId, Transfer, H256}; use substrate_test_runtime_client::AccountKeyring::{Alice, Bob}; @@ -398,7 +398,7 @@ mod tests { let uxt_hash = block_on(pool.submit_one( &han_of_block0, - TransactionSource::External, + TimedTransactionSource::new_external(false), uxt.clone().into(), )) .expect("Should be valid"); @@ -433,14 +433,15 @@ mod tests { let han_of_block0 = api.expect_hash_and_number(0); let unknown_block = H256::repeat_byte(0x13); - let uxt_hashes = block_on(pool.submit_at( - &han_of_block0, - TransactionSource::External, - vec![uxt0.into(), uxt1.into()], - )) - .into_iter() - .map(|r| r.expect("Should be valid")) - .collect::>(); + let source = TimedTransactionSource::new_external(false); + let uxt_hashes = + block_on(pool.submit_at( + &han_of_block0, + vec![(source.clone(), uxt0.into()), (source, uxt1.into())], + )) + .into_iter() + .map(|r| r.expect("Should be valid")) + .collect::>(); assert_eq!(api.validation_requests().len(), 2); assert_eq!(pool.validated_pool().status().ready, 2); diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs index b29630b563bb..e7504012ca67 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -29,7 +29,7 @@ use crate::{ error, log_xt::log_xt_trace, }, - graph::{self, ExtrinsicHash, IsValidator}, + graph::{self, base_pool::TimedTransactionSource, ExtrinsicHash, IsValidator}, ReadyIteratorFor, LOG_TARGET, }; use async_trait::async_trait; @@ -254,14 +254,19 @@ where xts: Vec>, ) -> Result, Self::Error>>, Self::Error> { let pool = self.pool.clone(); - let xts = xts.into_iter().map(Arc::from).collect::>(); + let xts = xts + .into_iter() + .map(|xt| { + (TimedTransactionSource::from_transaction_source(source, false), Arc::from(xt)) + }) + .collect::>(); self.metrics .report(|metrics| metrics.submitted_transactions.inc_by(xts.len() as u64)); let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - Ok(pool.submit_at(&at, source, xts).await) + Ok(pool.submit_at(&at, xts).await) } async fn submit_one( @@ -277,7 +282,8 @@ where let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - pool.submit_one(&at, source, xt).await + pool.submit_one(&at, TimedTransactionSource::from_transaction_source(source, false), xt) + .await } async fn submit_and_watch( @@ -294,7 +300,13 @@ where let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; - let watcher = pool.submit_and_watch(&at, source, xt).await?; + let watcher = pool + .submit_and_watch( + &at, + TimedTransactionSource::from_transaction_source(source, false), + xt, + ) + .await?; Ok(watcher.into_stream().boxed()) } @@ -458,7 +470,7 @@ where let validated = ValidatedTransaction::valid_at( block_number.saturated_into::(), hash, - TransactionSource::Local, + TimedTransactionSource::new_local(false), Arc::from(xt), bytes, validity, @@ -662,8 +674,8 @@ where resubmit_transactions.extend( //todo: arctx - we need to get ref from somewhere - block_transactions.into_iter().map(Arc::from).filter(|tx| { - let tx_hash = pool.hash_of(tx); + block_transactions.into_iter().map(Arc::from).filter_map(|tx| { + let tx_hash = pool.hash_of(&tx); let contains = pruned_log.contains(&tx_hash); // need to count all transactions, not just filtered, here @@ -676,8 +688,15 @@ where tx_hash, hash, ); + Some(( + // These transactions are coming from retracted blocks, we should + // simply consider them external. + TimedTransactionSource::new_external(false), + tx, + )) + } else { + None } - !contains }), ); @@ -686,14 +705,7 @@ where }); } - pool.resubmit_at( - &hash_and_number, - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TransactionSource::External, - resubmit_transactions, - ) - .await; + pool.resubmit_at(&hash_and_number, resubmit_transactions).await; } let extra_pool = pool.clone(); diff --git a/substrate/client/transaction-pool/tests/fatp.rs b/substrate/client/transaction-pool/tests/fatp.rs index 9f343a9bd029..c51ca6e17663 100644 --- a/substrate/client/transaction-pool/tests/fatp.rs +++ b/substrate/client/transaction-pool/tests/fatp.rs @@ -2267,19 +2267,13 @@ fn fatp_avoid_stuck_transaction() { assert_pool_status!(header06.hash(), &pool, 0, 0); - // Import enough blocks to make xt4i revalidated - let mut prev_header = header03; - // wait 10 blocks for revalidation - for n in 7..=11 { - let header = api.push_block(n, vec![], true); - let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); - block_on(pool.maintain(event)); - prev_header = header; - } + let header07 = api.push_block(7, vec![], true); + let event = finalized_block_event(&pool, header03.hash(), header07.hash()); + block_on(pool.maintain(event)); let xt4i_events = futures::executor::block_on_stream(xt4i_watcher).collect::>(); log::debug!("xt4i_events: {:#?}", xt4i_events); - assert_eq!(xt4i_events, vec![TransactionStatus::Future, TransactionStatus::Invalid]); + assert_eq!(xt4i_events, vec![TransactionStatus::Future, TransactionStatus::Dropped]); assert_eq!(pool.mempool_len(), (0, 0)); } diff --git a/substrate/client/transaction-pool/tests/fatp_common/mod.rs b/substrate/client/transaction-pool/tests/fatp_common/mod.rs index 15f2b7f79c14..aecd83360f1e 100644 --- a/substrate/client/transaction-pool/tests/fatp_common/mod.rs +++ b/substrate/client/transaction-pool/tests/fatp_common/mod.rs @@ -201,6 +201,20 @@ macro_rules! assert_ready_iterator { }}; } +#[macro_export] +macro_rules! assert_future_iterator { + ($hash:expr, $pool:expr, [$( $xt:expr ),*]) => {{ + let futures = $pool.futures_at($hash).unwrap(); + let expected = vec![ $($pool.api().hash_and_length(&$xt).0),*]; + log::debug!(target:LOG_TARGET, "expected: {:#?}", futures); + log::debug!(target:LOG_TARGET, "output: {:#?}", expected); + assert_eq!(expected.len(), futures.len()); + let hsf = futures.iter().map(|a| a.hash).collect::>(); + let hse = expected.into_iter().collect::>(); + assert_eq!(hse,hsf); + }}; +} + pub const SOURCE: TransactionSource = TransactionSource::External; #[cfg(test)] diff --git a/substrate/client/transaction-pool/tests/fatp_limits.rs b/substrate/client/transaction-pool/tests/fatp_limits.rs index 03792fd89dfa..afd8183957a8 100644 --- a/substrate/client/transaction-pool/tests/fatp_limits.rs +++ b/substrate/client/transaction-pool/tests/fatp_limits.rs @@ -641,3 +641,192 @@ fn fatp_limits_future_size_works() { assert_pool_status!(header01.hash(), &pool, 0, 3); assert_eq!(pool.mempool_len().0, 3); } + +#[test] +fn fatp_limits_watcher_ready_transactions_are_not_droped_when_view_is_dropped() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(6).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + + let xt3 = uxt(Dave, 500); + let xt4 = uxt(Eve, 600); + let xt5 = uxt(Ferdie, 700); + + let _xt0_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let _xt1_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let _xt2_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let _xt3_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 4); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + let _xt4_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let _xt5_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 6); + + let header04 = + api.push_block_with_parent(header03.hash(), vec![xt4.clone(), xt5.clone()], true); + api.set_nonce(header04.hash(), Alice.into(), 201); + api.set_nonce(header04.hash(), Bob.into(), 301); + api.set_nonce(header04.hash(), Charlie.into(), 401); + api.set_nonce(header04.hash(), Dave.into(), 501); + api.set_nonce(header04.hash(), Eve.into(), 601); + api.set_nonce(header04.hash(), Ferdie.into(), 701); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + + assert_ready_iterator!(header01.hash(), pool, [xt0, xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt2, xt3]); + assert_ready_iterator!(header03.hash(), pool, [xt4, xt5]); + assert_ready_iterator!(header04.hash(), pool, []); + + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header01.hash()))); + assert!(!pool.status_all().contains_key(&header01.hash())); + + block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header02.hash()))); + assert!(!pool.status_all().contains_key(&header02.hash())); + + //view 01 was dropped + assert!(pool.ready_at(header01.hash()).now_or_never().is_none()); + assert_eq!(pool.mempool_len().1, 6); + + block_on(pool.maintain(finalized_block_event(&pool, header02.hash(), header03.hash()))); + + //no revalidation has happened yet, all txs are kept + assert_eq!(pool.mempool_len().1, 6); + + //view 03 is still there + assert!(!pool.status_all().contains_key(&header03.hash())); + + //view 02 was dropped + assert!(pool.ready_at(header02.hash()).now_or_never().is_none()); + + let mut prev_header = header03; + for n in 5..=11 { + let header = api.push_block(n, vec![], true); + let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); + block_on(pool.maintain(event)); + prev_header = header; + } + + //now revalidation has happened, all txs are dropped + assert_eq!(pool.mempool_len().1, 0); +} + +#[test] +fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(6).with_future_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 201); + let xt1 = uxt(Bob, 301); + let xt2 = uxt(Charlie, 401); + + let xt3 = uxt(Dave, 501); + let xt4 = uxt(Eve, 601); + let xt5 = uxt(Ferdie, 701); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 0, 2); + assert_eq!(pool.mempool_len().1, 2); + assert_future_iterator!(header01.hash(), pool, [xt0, xt1]); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 0, 2); + assert_eq!(pool.mempool_len().1, 4); + assert_future_iterator!(header02.hash(), pool, [xt2, xt3]); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let xt5_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 0, 2); + assert_eq!(pool.mempool_len().1, 6); + assert_future_iterator!(header03.hash(), pool, [xt4, xt5]); + + let header04 = api.push_block_with_parent(header03.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + + assert_pool_status!(header04.hash(), &pool, 0, 2); + assert_eq!(pool.futures().len(), 2); + assert_future_iterator!(header04.hash(), pool, [xt4, xt5]); + + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header04.hash()))); + assert_eq!(pool.active_views_count(), 1); + assert_eq!(pool.inactive_views_count(), 0); + //todo: can we do better? We don't have API to check if event was processed internally. + let mut counter = 0; + while pool.mempool_len().1 != 2 { + sleep(std::time::Duration::from_millis(1)); + counter = counter + 1; + if counter > 20 { + assert!(false, "timeout {}", pool.mempool_len().1); + } + } + assert_eq!(pool.mempool_len().1, 2); + assert_pool_status!(header04.hash(), &pool, 0, 2); + assert_eq!(pool.futures().len(), 2); + + let to_be_checked = vec![xt0_watcher, xt1_watcher, xt2_watcher, xt3_watcher]; + for x in to_be_checked { + let x_status = futures::executor::block_on_stream(x).take(2).collect::>(); + assert_eq!(x_status, vec![TransactionStatus::Future, TransactionStatus::Dropped]); + } + + let to_be_checked = vec![xt4_watcher, xt5_watcher]; + for x in to_be_checked { + let x_status = futures::executor::block_on_stream(x).take(1).collect::>(); + assert_eq!(x_status, vec![TransactionStatus::Future]); + } +} diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs new file mode 100644 index 000000000000..41bc374b38f4 --- /dev/null +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -0,0 +1,249 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tests of priorities for fork-aware transaction pool. + +pub mod fatp_common; + +use fatp_common::{new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE}; +use futures::{executor::block_on, FutureExt}; +use sc_transaction_pool::ChainApi; +use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionStatus}; +use substrate_test_runtime_client::AccountKeyring::*; +use substrate_test_runtime_transaction_pool::uxt; + +#[test] +fn fatp_prio_ready_higher_evicts_lower() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let result0 = block_on(pool.submit_one(header01.hash(), SOURCE, xt0.clone())); + let result1 = block_on(pool.submit_one(header01.hash(), SOURCE, xt1.clone())); + + log::info!("r0 => {:?}", result0); + log::info!("r1 => {:?}", result1); + log::info!("len: {:?}", pool.mempool_len()); + log::info!("len: {:?}", pool.status_all()[&header01.hash()]); + assert_ready_iterator!(header01.hash(), pool, [xt1]); + assert_pool_status!(header01.hash(), &pool, 1, 0); +} + +#[test] +fn fatp_prio_watcher_ready_higher_evicts_lower() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!( + xt0_status, + vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt1).0)] + ); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Ready]); + + log::info!("len: {:?}", pool.mempool_len()); + log::info!("len: {:?}", pool.status_all()[&header01.hash()]); + assert_ready_iterator!(header01.hash(), pool, [xt1]); + assert_pool_status!(header01.hash(), &pool, 1, 0); +} + +#[test] +fn fatp_prio_watcher_future_higher_evicts_lower() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(3).build(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 201); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt2.clone())).unwrap(); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + + assert_eq!( + xt0_status, + vec![TransactionStatus::Future, TransactionStatus::Usurped(api.hash_and_length(&xt2).0)] + ); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Future, TransactionStatus::Ready]); + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready]); + + assert_eq!(pool.mempool_len().1, 2); + assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); + assert_pool_status!(header01.hash(), &pool, 2, 0); +} + +#[test] +fn fatp_prio_watcher_ready_lower_prio_gets_dropped_from_all_views() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let header03a = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header03a.hash()))); + + let header03b = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03a.hash()), header03b.hash()))); + + assert_pool_status!(header03a.hash(), &pool, 1, 0); + assert_ready_iterator!(header03a.hash(), pool, [xt0]); + assert_pool_status!(header03b.hash(), &pool, 1, 0); + assert_ready_iterator!(header03b.hash(), pool, [xt0]); + assert_ready_iterator!(header01.hash(), pool, [xt0]); + assert_ready_iterator!(header02.hash(), pool, [xt0]); + + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Ready]); + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!( + xt0_status, + vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt1).0)] + ); + assert_ready_iterator!(header03a.hash(), pool, [xt1]); + assert_ready_iterator!(header03b.hash(), pool, [xt1]); + assert_ready_iterator!(header01.hash(), pool, [xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt1]); +} + +#[test] +fn fatp_prio_watcher_future_lower_prio_gets_dropped_from_all_views() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + + let xt0 = uxt(Alice, 201); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let header03a = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header03a.hash()))); + + let header03b = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03a.hash()), header03b.hash()))); + + assert_pool_status!(header03a.hash(), &pool, 0, 2); + assert_future_iterator!(header03a.hash(), pool, [xt0, xt1]); + assert_pool_status!(header03b.hash(), &pool, 0, 2); + assert_future_iterator!(header03b.hash(), pool, [xt0, xt1]); + assert_future_iterator!(header01.hash(), pool, [xt0, xt1]); + assert_future_iterator!(header02.hash(), pool, [xt0, xt1]); + + let xt2_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt2.clone())).unwrap(); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready]); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Future]); + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!( + xt0_status, + vec![TransactionStatus::Future, TransactionStatus::Usurped(api.hash_and_length(&xt2).0)] + ); + assert_future_iterator!(header03a.hash(), pool, []); + assert_future_iterator!(header03b.hash(), pool, []); + assert_future_iterator!(header01.hash(), pool, []); + assert_future_iterator!(header02.hash(), pool, []); + + assert_ready_iterator!(header03a.hash(), pool, [xt2, xt1]); + assert_ready_iterator!(header03b.hash(), pool, [xt2, xt1]); + assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt2, xt1]); +} diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index ed0fd7d4e655..e556ba9875f1 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -80,12 +80,14 @@ fn create_basic_pool(test_api: TestApi) -> BasicPool { create_basic_pool_with_genesis(Arc::from(test_api)).0 } +const TSOURCE: TimedTransactionSource = + TimedTransactionSource { source: TransactionSource::External, timestamp: None }; const SOURCE: TransactionSource = TransactionSource::External; #[test] fn submission_should_work() { let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); let pending: Vec<_> = pool @@ -99,9 +101,9 @@ fn submission_should_work() { #[test] fn multiple_submission_should_work() { let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); let pending: Vec<_> = pool @@ -116,7 +118,7 @@ fn multiple_submission_should_work() { fn early_nonce_should_be_culled() { sp_tracing::try_init_simple(); let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 208).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 208).into())) .unwrap(); log::debug!("-> {:?}", pool.validated_pool().status()); @@ -132,7 +134,7 @@ fn early_nonce_should_be_culled() { fn late_nonce_should_be_queued() { let (pool, api) = pool(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -141,7 +143,7 @@ fn late_nonce_should_be_queued() { .collect(); assert_eq!(pending, Vec::::new()); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -155,9 +157,9 @@ fn late_nonce_should_be_queued() { fn prune_tags_should_work() { let (pool, api) = pool(); let hash209 = - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) .unwrap(); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) .unwrap(); let pending: Vec<_> = pool @@ -183,9 +185,9 @@ fn should_ban_invalid_transactions() { let (pool, api) = pool(); let uxt = Arc::from(uxt(Alice, 209)); let hash = - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap(); pool.validated_pool().remove_invalid(&[hash]); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); // when let pending: Vec<_> = pool @@ -196,7 +198,7 @@ fn should_ban_invalid_transactions() { assert_eq!(pending, Vec::::new()); // then - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); } #[test] @@ -224,7 +226,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { })); let pool = Pool::new(Default::default(), true.into(), api.clone()); let xt0 = Arc::from(uxt(Alice, 209)); - block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt0.clone())) + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, xt0.clone())) .expect("1. Imported"); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(api.validation_requests().len(), 1); @@ -242,7 +244,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { api.increment_nonce(Alice.into()); api.push_block(2, Vec::new(), true); let xt1 = uxt(Alice, 211); - block_on(pool.submit_one(&api.expect_hash_and_number(2), SOURCE, xt1.clone().into())) + block_on(pool.submit_one(&api.expect_hash_and_number(2), TSOURCE, xt1.clone().into())) .expect("2. Imported"); assert_eq!(api.validation_requests().len(), 3); assert_eq!(pool.validated_pool().status().ready, 1); From a2ffae303e8b6c52cff720fb2f3e2019d65d91b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 4 Dec 2024 10:06:57 +0100 Subject: [PATCH 22/29] umbrella: Remove `pallet-revive-fixtures` (#6743) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need to have them in the umbrella crate also by having them in the umbrella crate they are bleeding into the normal build. --------- Co-authored-by: GitHub Action Co-authored-by: Alexander Theißen --- Cargo.lock | 1 - prdoc/pr_6743.prdoc | 10 ++++++++++ substrate/frame/revive/fixtures/Cargo.toml | 3 +++ umbrella/Cargo.toml | 8 +------- umbrella/src/lib.rs | 4 ---- 5 files changed, 14 insertions(+), 12 deletions(-) create mode 100644 prdoc/pr_6743.prdoc diff --git a/Cargo.lock b/Cargo.lock index bc2ebb2a057d..863822f4ffd5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18577,7 +18577,6 @@ dependencies = [ "pallet-remark 28.0.0", "pallet-revive 0.1.0", "pallet-revive-eth-rpc", - "pallet-revive-fixtures 0.1.0", "pallet-revive-mock-network 0.1.0", "pallet-revive-proc-macro 0.1.0", "pallet-revive-uapi 0.1.0", diff --git a/prdoc/pr_6743.prdoc b/prdoc/pr_6743.prdoc new file mode 100644 index 000000000000..4c35ff46ca67 --- /dev/null +++ b/prdoc/pr_6743.prdoc @@ -0,0 +1,10 @@ +title: 'umbrella: Remove `pallet-revive-fixtures`' +doc: +- audience: Runtime Dev + description: |- + No need to have them in the umbrella crate also by having them in the umbrella crate they are bleeding into the normal build. +crates: +- name: pallet-revive-fixtures + bump: major +- name: polkadot-sdk + bump: major diff --git a/substrate/frame/revive/fixtures/Cargo.toml b/substrate/frame/revive/fixtures/Cargo.toml index 9fd434db6179..88921cca08ec 100644 --- a/substrate/frame/revive/fixtures/Cargo.toml +++ b/substrate/frame/revive/fixtures/Cargo.toml @@ -8,6 +8,9 @@ description = "Fixtures for testing and benchmarking" homepage.workspace = true repository.workspace = true +[package.metadata.polkadot-sdk] +exclude-from-umbrella = true + [lints] workspace = true diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 9affcffd2ade..8ed9c3dcb02c 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -120,7 +120,6 @@ std = [ "pallet-recovery?/std", "pallet-referenda?/std", "pallet-remark?/std", - "pallet-revive-fixtures?/std", "pallet-revive-mock-network?/std", "pallet-revive?/std", "pallet-root-offences?/std", @@ -541,7 +540,7 @@ with-tracing = [ "sp-tracing?/with-tracing", "sp-tracing?/with-tracing", ] -runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-fixtures", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] +runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] runtime = [ "frame-benchmarking", "frame-benchmarking-pallet-pov", @@ -1193,11 +1192,6 @@ path = "../substrate/frame/revive" default-features = false optional = true -[dependencies.pallet-revive-fixtures] -path = "../substrate/frame/revive/fixtures" -default-features = false -optional = true - [dependencies.pallet-revive-proc-macro] path = "../substrate/frame/revive/proc-macro" default-features = false diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 2216864fad0f..3712fb3343cf 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -584,10 +584,6 @@ pub use pallet_revive; #[cfg(feature = "pallet-revive-eth-rpc")] pub use pallet_revive_eth_rpc; -/// Fixtures for testing and benchmarking. -#[cfg(feature = "pallet-revive-fixtures")] -pub use pallet_revive_fixtures; - /// A mock network for testing pallet-revive. #[cfg(feature = "pallet-revive-mock-network")] pub use pallet_revive_mock_network; From 377bc3f830a63e277dd94fed490670d1be6d840f Mon Sep 17 00:00:00 2001 From: Egor_P Date: Wed, 4 Dec 2024 11:06:55 +0100 Subject: [PATCH 23/29] [Release|CI/CD] Add pipeline to promote release candidate from rcX to final in S3 (#6748) This PR adds the pipeline, that moves release candidate artefacts from `polkadot-stableYYMM-rcX` bucket to the one that is going to be the final `polkadot-stableYYMM` (bucket name matches the tag name). So that it could be used for publishing later without a need to re-build it again. --- .github/scripts/common/lib.sh | 2 +- .github/scripts/release/release_lib.sh | 44 +++++- .../release-31_promote-rc-to-final.yml | 125 ++++++++++++++++++ .../release-reusable-promote-to-final.yml | 83 ++++++++++++ .../workflows/release-reusable-rc-buid.yml | 4 +- .../workflows/release-reusable-s3-upload.yml | 14 +- 6 files changed, 253 insertions(+), 19 deletions(-) create mode 100644 .github/workflows/release-31_promote-rc-to-final.yml create mode 100644 .github/workflows/release-reusable-promote-to-final.yml diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index 41dc0ba06dd2..00f8c089831e 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -297,7 +297,7 @@ fetch_release_artifacts_from_s3() { pwd ls -al --color popd > /dev/null - + unset OUTPUT_DIR } # Pass the name of the binary as input, it will diff --git a/.github/scripts/release/release_lib.sh b/.github/scripts/release/release_lib.sh index 43227180cb7c..984709f2ea03 100644 --- a/.github/scripts/release/release_lib.sh +++ b/.github/scripts/release/release_lib.sh @@ -129,15 +129,17 @@ upload_s3_release() { echo "Working on version: $version " echo "Working on platform: $target " + URL_BASE=$(get_s3_url_base $product) + echo "Current content, should be empty on new uploads:" - aws s3 ls "s3://releases.parity.io/${product}/${version}/${target}" --recursive --human-readable --summarize || true + aws s3 ls "s3://${URL_BASE}/${version}/${target}" --recursive --human-readable --summarize || true echo "Content to be uploaded:" - artifacts="artifacts/$product/" + artifacts="release-artifacts/$target/$product/" ls "$artifacts" - aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/${product}/${version}/${target}" + aws s3 sync --acl public-read "$artifacts" "s3://${URL_BASE}/${version}/${target}" echo "Uploaded files:" - aws s3 ls "s3://releases.parity.io/${product}/${version}/${target}" --recursive --human-readable --summarize - echo "✅ The release should be at https://releases.parity.io/${product}/${version}/${target}" + aws s3 ls "s3://${URL_BASE}/${version}/${target}" --recursive --human-readable --summarize + echo "✅ The release should be at https://${URL_BASE}/${version}/${target}" } # Upload runtimes artifacts to s3 release bucket @@ -161,3 +163,35 @@ upload_s3_runtimes_release_artifacts() { aws s3 ls "s3://releases.parity.io/polkadot/runtimes/${version}/" --recursive --human-readable --summarize echo "✅ The release should be at https://releases.parity.io/polkadot/runtimes/${version}" } + + +# Pass the name of the binary as input, it will +# return the s3 base url +function get_s3_url_base() { + name=$1 + case $name in + polkadot | polkadot-execute-worker | polkadot-prepare-worker ) + printf "releases.parity.io/polkadot" + ;; + + polkadot-parachain) + printf "releases.parity.io/polkadot-parachain" + ;; + + polkadot-omni-node) + printf "releases.parity.io/polkadot-omni-node" + ;; + + chain-spec-builder) + printf "releases.parity.io/chain-spec-builder" + ;; + + frame-omni-bencher) + printf "releases.parity.io/frame-omni-bencher" + ;; + *) + printf "UNSUPPORTED BINARY $name" + exit 1 + ;; + esac +} diff --git a/.github/workflows/release-31_promote-rc-to-final.yml b/.github/workflows/release-31_promote-rc-to-final.yml new file mode 100644 index 000000000000..6aa9d4bddd1d --- /dev/null +++ b/.github/workflows/release-31_promote-rc-to-final.yml @@ -0,0 +1,125 @@ +name: Release - Promote RC to final candidate on S3 + +on: + workflow_dispatch: + inputs: + binary: + description: Binary to be build for the release + default: all + type: choice + options: + - polkadot + - polkadot-parachain + - polkadot-omni-node + - frame-omni-bencher + - chain-spec-builder + - all + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX + type: string + + +jobs: + + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [ check-synchronization ] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + final_tag: ${{ steps.validate_inputs.outputs.final_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + + promote-polkadot-rc-to-final: + if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: polkadot + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-polkadot-parachain-rc-to-final: + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: polkadot-parachain + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-polkadot-omni-node-rc-to-final: + if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: polkadot-omni-node + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-frame-omni-bencher-rc-to-final: + if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: frame-omni-bencher + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-chain-spec-builder-rc-to-final: + if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: chain-spec-builder + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/release-reusable-promote-to-final.yml b/.github/workflows/release-reusable-promote-to-final.yml new file mode 100644 index 000000000000..ed4a80a01e82 --- /dev/null +++ b/.github/workflows/release-reusable-promote-to-final.yml @@ -0,0 +1,83 @@ +name: Promote rc to final + +on: + workflow_call: + inputs: + package: + description: Package to be promoted + required: true + type: string + + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX taht will be changed to final in form of polkadot-stableYYMM(-X) + required: true + type: string + + target: + description: Target triple for which the artifacts are being uploaded (e.g aarch64-apple-darwin) + required: true + type: string + + secrets: + AWS_DEFAULT_REGION: + required: true + AWS_RELEASE_ACCESS_KEY_ID: + required: true + AWS_RELEASE_SECRET_ACCESS_KEY: + required: true + +jobs: + + promote-release-artifacts: + environment: release + runs-on: ubuntu-latest + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + AWS_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + + steps: + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Prepare final tag + id: prepare_final_tag + shell: bash + run: | + tag="$(echo ${{ inputs.release_tag }} | sed 's/-rc[0-9]*$//')" + echo $tag + echo "FINAL_TAG=${tag}" >> $GITHUB_OUTPUT + + - name: Fetch binaries from s3 based on version + run: | + . ./.github/scripts/common/lib.sh + + VERSION="${{ inputs.release_tag }}" + if [[ ${{ inputs.package }} == 'polkadot' ]]; then + packages=(polkadot polkadot-prepare-worker polkadot-execute-worker) + for package in "${packages[@]}"; do + fetch_release_artifacts_from_s3 $package ${{ inputs.target }} + done + else + fetch_release_artifacts_from_s3 ${{ inputs.package }} ${{ inputs.target }} + fi + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ env.AWS_REGION }} + + - name: Upload ${{ inputs.package }} ${{ inputs.target }} artifacts to s3 + run: | + . ./.github/scripts/release/release_lib.sh + + if [[ ${{ inputs.package }} == 'polkadot' ]]; then + packages=(polkadot polkadot-prepare-worker polkadot-execute-worker) + for package in "${packages[@]}"; do + upload_s3_release $package ${{ steps.prepare_final_tag.outputs.final_tag }} ${{ inputs.target }} + done + else + upload_s3_release ${{ inputs.package }} ${{ steps.prepare_final_tag.outputs.final_tag }} ${{ inputs.target }} + fi diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml index dc1b4553eb9b..0222b2aa91e2 100644 --- a/.github/workflows/release-reusable-rc-buid.yml +++ b/.github/workflows/release-reusable-rc-buid.yml @@ -133,7 +133,7 @@ jobs: - name: Upload ${{ matrix.binaries }} artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: ${{ matrix.binaries }} + name: ${{ matrix.binaries }}_${{ inputs.target }} path: /artifacts/${{ matrix.binaries }} build-macos-rc: @@ -285,7 +285,7 @@ jobs: - name: Upload ${{inputs.package }} artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: ${{ inputs.package }} + name: ${{ inputs.package }}_${{ inputs.target }} path: target/production overwrite: true diff --git a/.github/workflows/release-reusable-s3-upload.yml b/.github/workflows/release-reusable-s3-upload.yml index f85466bc8c07..48c7e53c6c8f 100644 --- a/.github/workflows/release-reusable-s3-upload.yml +++ b/.github/workflows/release-reusable-s3-upload.yml @@ -9,7 +9,7 @@ on: type: string release_tag: - description: Tag matching the actual release candidate with the format stableYYMM-rcX or stableYYMM-rcX + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM-rcX required: true type: string @@ -40,18 +40,10 @@ jobs: uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Download amd64 artifacts - if: ${{ inputs.target == 'x86_64-unknown-linux-gnu' }} uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: - name: ${{ inputs.package }} - path: artifacts/${{ inputs.package }} - - - name: Download arm artifacts - if: ${{ inputs.target == 'aarch64-apple-darwin' }} - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - with: - name: ${{ inputs.package }}_aarch64-apple-darwin - path: artifacts/${{ inputs.package }} + name: ${{ inputs.package }}_${{ inputs.target }} + path: release-artifacts/${{ inputs.target }}/${{ inputs.package }} - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 From 34632ed68272ce2c250cb085c5ab9e53f0a2ced6 Mon Sep 17 00:00:00 2001 From: Javier Viola <363911+pepoviola@users.noreply.github.com> Date: Wed, 4 Dec 2024 13:29:13 +0100 Subject: [PATCH 24/29] Disable flaky tests reported in #6574/#6644 (#6749) Reference issues #6574 #6644 --- .gitlab/pipeline/zombienet/polkadot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 3dab49a118e5..ac4bdac7ad15 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -179,7 +179,7 @@ zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks: --local-dir="${LOCAL_DIR}/elastic_scaling" --test="0001-basic-3cores-6s-blocks.zndsl" -zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains: +.zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains: extends: - .zombienet-polkadot-common before_script: @@ -233,7 +233,7 @@ zombienet-polkadot-functional-0015-coretime-shared-core: --local-dir="${LOCAL_DIR}/functional" --test="0016-approval-voting-parallel.zndsl" -zombienet-polkadot-functional-0017-sync-backing: +.zombienet-polkadot-functional-0017-sync-backing: extends: - .zombienet-polkadot-common script: From 5ca726750da563c46449f9aa915296e6c6967e61 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 4 Dec 2024 14:47:47 +0100 Subject: [PATCH 25/29] chore: Update litep2p to v0.8.3 (#6742) ## [0.8.3] - 2024-12-03 This release includes two fixes for small memory leaks on edge-cases in the notification and request-response protocols. ### Fixed - req-resp: Fix memory leak of pending substreams ([#297](https://github.com/paritytech/litep2p/pull/297)) - notification: Fix memory leak of pending substreams ([#296](https://github.com/paritytech/litep2p/pull/296)) cc @paritytech/networking --------- Signed-off-by: Alexandru Vasile --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- prdoc/pr_6742.prdoc | 11 +++++++++++ substrate/client/network/src/litep2p/mod.rs | 2 +- 4 files changed, 15 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_6742.prdoc diff --git a/Cargo.lock b/Cargo.lock index 863822f4ffd5..eee12dc5bc40 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10225,9 +10225,9 @@ dependencies = [ [[package]] name = "litep2p" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "569e7dbec8a0d4b08d30f4942cd579cfe8db5d3f83f8604abe61697c38d17e73" +checksum = "14e490b5a6d486711fd0284bd30e607a287343f2935a59a9192bd7109e85f443" dependencies = [ "async-trait", "bs58", diff --git a/Cargo.toml b/Cargo.toml index ecc385504181..49fdc198fe33 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -848,7 +848,7 @@ linked-hash-map = { version = "0.5.4" } linked_hash_set = { version = "0.1.4" } linregress = { version = "0.5.1" } lite-json = { version = "0.2.0", default-features = false } -litep2p = { version = "0.8.2", features = ["websocket"] } +litep2p = { version = "0.8.3", features = ["websocket"] } log = { version = "0.4.22", default-features = false } macro_magic = { version = "0.5.1" } maplit = { version = "1.0.2" } diff --git a/prdoc/pr_6742.prdoc b/prdoc/pr_6742.prdoc new file mode 100644 index 000000000000..92c3755a3c28 --- /dev/null +++ b/prdoc/pr_6742.prdoc @@ -0,0 +1,11 @@ +title: Update litep2p backend to v0.8.3 +doc: +- audience: Node Dev + description: |- + This release includes two fixes for small memory leaks on edge-cases in the notification and request-response protocols. + While at it, have downgraded a log message from litep2p. + +crates: +- name: sc-network + bump: patch + diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs index 6d3575fc2b6b..b6d64b34d64a 100644 --- a/substrate/client/network/src/litep2p/mod.rs +++ b/substrate/client/network/src/litep2p/mod.rs @@ -753,7 +753,7 @@ impl NetworkBackend for Litep2pNetworkBac } if self.litep2p.add_known_address(peer.into(), iter::once(address.clone())) == 0usize { - log::warn!( + log::debug!( target: LOG_TARGET, "couldn't add known address ({address}) for {peer:?}, unsupported transport" ); From 2779043b0f667b75062cdc085e8052190b78cb20 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Wed, 4 Dec 2024 17:43:51 +0100 Subject: [PATCH 26/29] [CI/CD] Fix permissions issue in the backport to stable flow (#6754) This PR has changes to the `command-backport.yml`: - swapped action that creates backports PRs from master to the stable branches and added another app with more permissions --- .github/workflows/command-backport.yml | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/command-backport.yml b/.github/workflows/command-backport.yml index eecf0ac72d2c..53dcea2f1d6d 100644 --- a/.github/workflows/command-backport.yml +++ b/.github/workflows/command-backport.yml @@ -29,12 +29,13 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Generate token - id: generate_token - uses: tibdex/github-app-token@v2.1.0 + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 with: - app_id: ${{ secrets.CMD_BOT_APP_ID }} - private_key: ${{ secrets.CMD_BOT_APP_KEY }} + app-id: ${{ vars.RELEASE_AUTOMATION_APP_ID }} + private-key: ${{ secrets.RELEASE_AUTOMATION_APP_PRIVATE_KEY }} + owner: paritytech - name: Create backport pull requests uses: korthout/backport-action@v3 @@ -42,7 +43,7 @@ jobs: with: target_branches: stable2407 stable2409 stable2412 merge_commits: skip - github_token: ${{ steps.generate_token.outputs.token }} + github_token: ${{ steps.generate_write_token.outputs.token }} pull_description: | Backport #${pull_number} into `${target_branch}` from ${pull_author}. @@ -86,7 +87,7 @@ jobs: const reviewer = '${{ github.event.pull_request.user.login }}'; for (const pullNumber of pullNumbers) { - await github.pulls.createReviewRequest({ + await github.pulls.requestReviewers({ owner: context.repo.owner, repo: context.repo.repo, pull_number: parseInt(pullNumber), From 82117ad53fc68e8097183e759926b62265ffff0a Mon Sep 17 00:00:00 2001 From: Jarkko Sakkinen Date: Wed, 4 Dec 2024 18:55:33 +0100 Subject: [PATCH 27/29] wasm-builder: Use riscv32emac-unknown-none-polkavm.json target (#6419) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Closes #6335. ## Integration N/A ## Review Notes `RuntimeTarget` is converted to return path to the custom target JSON file --------- Signed-off-by: Jarkko Sakkinen Co-authored-by: Alexander Theißen Co-authored-by: Koute --- Cargo.lock | 12 +- Cargo.toml | 4 +- prdoc/pr_6419.prdoc | 12 ++ substrate/utils/wasm-builder/src/builder.rs | 3 +- substrate/utils/wasm-builder/src/lib.rs | 105 +++++++----------- .../utils/wasm-builder/src/prerequisites.rs | 7 +- .../utils/wasm-builder/src/wasm_project.rs | 28 +++-- 7 files changed, 87 insertions(+), 84 deletions(-) create mode 100644 prdoc/pr_6419.prdoc diff --git a/Cargo.lock b/Cargo.lock index eee12dc5bc40..dad578ba0c1b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14752,7 +14752,7 @@ dependencies = [ "anyhow", "frame-system 28.0.0", "log", - "polkavm-linker 0.17.0", + "polkavm-linker 0.17.1", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", @@ -19936,9 +19936,9 @@ dependencies = [ [[package]] name = "polkavm-linker" -version = "0.17.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d359dc721d2cc9b555ebb3558c305112ddc5bdac09d26f95f2f7b49c1f2db7e9" +checksum = "0422ead3030d5cde69e2206dbc7d65da872b121876507cd5363f6c6e6aa45157" dependencies = [ "dirs", "gimli 0.31.1", @@ -26495,7 +26495,7 @@ dependencies = [ "libsecp256k1", "log", "parity-scale-codec", - "polkavm-derive 0.9.1", + "polkavm-derive 0.17.0", "rustversion", "secp256k1 0.28.2", "sp-core 28.0.0", @@ -26979,7 +26979,7 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec", - "polkavm-derive 0.9.1", + "polkavm-derive 0.17.0", "primitive-types 0.13.1", "rustversion", "sp-core 28.0.0", @@ -28623,7 +28623,7 @@ dependencies = [ "merkleized-metadata", "parity-scale-codec", "parity-wasm", - "polkavm-linker 0.9.2", + "polkavm-linker 0.17.1", "sc-executor 0.32.0", "shlex", "sp-core 28.0.0", diff --git a/Cargo.toml b/Cargo.toml index 49fdc198fe33..383fc46c4e76 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1090,8 +1090,8 @@ polkadot-test-client = { path = "polkadot/node/test/client" } polkadot-test-runtime = { path = "polkadot/runtime/test-runtime" } polkadot-test-service = { path = "polkadot/node/test/service" } polkavm = { version = "0.9.3", default-features = false } -polkavm-derive = "0.9.1" -polkavm-linker = "0.9.2" +polkavm-derive = "0.17.0" +polkavm-linker = "0.17.1" portpicker = { version = "0.1.1" } pretty_assertions = { version = "1.3.0" } primitive-types = { version = "0.13.1", default-features = false, features = [ diff --git a/prdoc/pr_6419.prdoc b/prdoc/pr_6419.prdoc new file mode 100644 index 000000000000..6cc155d64b91 --- /dev/null +++ b/prdoc/pr_6419.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Use the custom target riscv32emac-unknown-none-polkavm +doc: + - audience: Runtime Dev + description: | + Closes: https://github.com/paritytech/polkadot-sdk/issues/6335 + +crates: +- name: substrate-wasm-builder + bump: patch diff --git a/substrate/utils/wasm-builder/src/builder.rs b/substrate/utils/wasm-builder/src/builder.rs index a40aafe1d812..5bdc743eac31 100644 --- a/substrate/utils/wasm-builder/src/builder.rs +++ b/substrate/utils/wasm-builder/src/builder.rs @@ -235,7 +235,8 @@ impl WasmBuilder { /// Build the WASM binary. pub fn build(mut self) { - let target = crate::runtime_target(); + let target = RuntimeTarget::new(); + if target == RuntimeTarget::Wasm { if self.export_heap_base { self.rust_flags.push("-Clink-arg=--export=__heap_base".into()); diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index 420ecd63e1dc..ce90f492e08f 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -112,7 +112,6 @@ //! wasm32-unknown-unknown --toolchain nightly-2020-02-20`. use std::{ - collections::BTreeSet, env, fs, io::BufRead, path::{Path, PathBuf}, @@ -254,26 +253,22 @@ struct CargoCommand { program: String, args: Vec, version: Option, - target_list: Option>, } impl CargoCommand { fn new(program: &str) -> Self { let version = Self::extract_version(program, &[]); - let target_list = Self::extract_target_list(program, &[]); - CargoCommand { program: program.into(), args: Vec::new(), version, target_list } + CargoCommand { program: program.into(), args: Vec::new(), version } } fn new_with_args(program: &str, args: &[&str]) -> Self { let version = Self::extract_version(program, args); - let target_list = Self::extract_target_list(program, args); CargoCommand { program: program.into(), args: args.iter().map(ToString::to_string).collect(), version, - target_list, } } @@ -294,23 +289,6 @@ impl CargoCommand { Version::extract(&version) } - fn extract_target_list(program: &str, args: &[&str]) -> Option> { - // This is technically an unstable option, but we don't care because we only need this - // to build RISC-V runtimes, and those currently require a specific nightly toolchain - // anyway, so it's totally fine for this to fail in other cases. - let list = Command::new(program) - .args(args) - .args(&["rustc", "-Z", "unstable-options", "--print", "target-list"]) - // Make sure if we're called from within a `build.rs` the host toolchain won't override - // a rustup toolchain we've picked. - .env_remove("RUSTC") - .output() - .ok() - .and_then(|o| String::from_utf8(o.stdout).ok())?; - - Some(list.trim().split("\n").map(ToString::to_string).collect()) - } - /// Returns the version of this cargo command or `None` if it failed to extract the version. fn version(&self) -> Option { self.version @@ -326,19 +304,10 @@ impl CargoCommand { fn supports_substrate_runtime_env(&self, target: RuntimeTarget) -> bool { match target { RuntimeTarget::Wasm => self.supports_substrate_runtime_env_wasm(), - RuntimeTarget::Riscv => self.supports_substrate_runtime_env_riscv(), + RuntimeTarget::Riscv => true, } } - /// Check if the supplied cargo command supports our RISC-V runtime environment. - fn supports_substrate_runtime_env_riscv(&self) -> bool { - let Some(target_list) = self.target_list.as_ref() else { return false }; - // This is our custom target which currently doesn't exist on any upstream toolchain, - // so if it exists it's guaranteed to be our custom toolchain and have have everything - // we need, so any further version checks are unnecessary at this point. - target_list.contains("riscv32ema-unknown-none-elf") - } - /// Check if the supplied cargo command supports our Substrate wasm environment. /// /// This means that either the cargo version is at minimum 1.68.0 or this is a nightly cargo. @@ -409,13 +378,6 @@ fn get_bool_environment_variable(name: &str) -> Option { } } -/// Returns whether we need to also compile the standard library when compiling the runtime. -fn build_std_required() -> bool { - let default = runtime_target() == RuntimeTarget::Wasm; - - crate::get_bool_environment_variable(crate::WASM_BUILD_STD).unwrap_or(default) -} - #[derive(Copy, Clone, PartialEq, Eq)] enum RuntimeTarget { Wasm, @@ -423,36 +385,55 @@ enum RuntimeTarget { } impl RuntimeTarget { - fn rustc_target(self) -> &'static str { + /// Creates a new instance. + fn new() -> Self { + let Some(value) = env::var_os(RUNTIME_TARGET) else { + return Self::Wasm; + }; + + if value == "wasm" { + Self::Wasm + } else if value == "riscv" { + Self::Riscv + } else { + build_helper::warning!( + "RUNTIME_TARGET environment variable must be set to either \"wasm\" or \"riscv\"" + ); + std::process::exit(1); + } + } + + /// Figures out the target parameter value for rustc. + fn rustc_target(self) -> String { match self { - RuntimeTarget::Wasm => "wasm32-unknown-unknown", - RuntimeTarget::Riscv => "riscv32ema-unknown-none-elf", + RuntimeTarget::Wasm => "wasm32-unknown-unknown".to_string(), + RuntimeTarget::Riscv => { + let path = polkavm_linker::target_json_32_path().expect("riscv not found"); + path.into_os_string().into_string().unwrap() + }, } } - fn build_subdirectory(self) -> &'static str { - // Keep the build directories separate so that when switching between - // the targets we won't trigger unnecessary rebuilds. + /// Figures out the target directory name used by cargo. + fn rustc_target_dir(self) -> &'static str { match self { - RuntimeTarget::Wasm => "wbuild", - RuntimeTarget::Riscv => "rbuild", + RuntimeTarget::Wasm => "wasm32-unknown-unknown", + RuntimeTarget::Riscv => "riscv32emac-unknown-none-polkavm", } } -} -fn runtime_target() -> RuntimeTarget { - let Some(value) = env::var_os(RUNTIME_TARGET) else { - return RuntimeTarget::Wasm; - }; + /// Figures out the build-std argument. + fn rustc_target_build_std(self) -> Option<&'static str> { + if !crate::get_bool_environment_variable(crate::WASM_BUILD_STD).unwrap_or(true) { + return None; + } - if value == "wasm" { - RuntimeTarget::Wasm - } else if value == "riscv" { - RuntimeTarget::Riscv - } else { - build_helper::warning!( - "the '{RUNTIME_TARGET}' environment variable has an invalid value; it must be either 'wasm' or 'riscv'" - ); - std::process::exit(1); + // This is a nightly-only flag. + let arg = match self { + RuntimeTarget::Wasm => "build-std", + RuntimeTarget::Riscv => "build-std=core,alloc", + }; + + Some(arg) } } diff --git a/substrate/utils/wasm-builder/src/prerequisites.rs b/substrate/utils/wasm-builder/src/prerequisites.rs index 4de6b87f618d..9abfd1725237 100644 --- a/substrate/utils/wasm-builder/src/prerequisites.rs +++ b/substrate/utils/wasm-builder/src/prerequisites.rs @@ -196,11 +196,14 @@ fn check_wasm_toolchain_installed( error, colorize_aux_message(&"-".repeat(60)), )) - } + }; } let version = dummy_crate.get_rustc_version(); - if crate::build_std_required() { + + let target = RuntimeTarget::new(); + assert!(target == RuntimeTarget::Wasm); + if target.rustc_target_build_std().is_some() { if let Some(sysroot) = dummy_crate.get_sysroot() { let src_path = Path::new(sysroot.trim()).join("lib").join("rustlib").join("src").join("rust"); diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index 26edd2ea1f22..6530e4c22fb9 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -109,6 +109,15 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { crate_metadata } +/// Keep the build directories separate so that when switching between the +/// targets we won't trigger unnecessary rebuilds. +fn build_subdirectory(target: RuntimeTarget) -> &'static str { + match target { + RuntimeTarget::Wasm => "wbuild", + RuntimeTarget::Riscv => "rbuild", + } +} + /// Creates the WASM project, compiles the WASM binary and compacts the WASM binary. /// /// # Returns @@ -125,7 +134,7 @@ pub(crate) fn create_and_compile( #[cfg(feature = "metadata-hash")] enable_metadata_hash: Option, ) -> (Option, WasmBinaryBloaty) { let runtime_workspace_root = get_wasm_workspace_root(); - let runtime_workspace = runtime_workspace_root.join(target.build_subdirectory()); + let runtime_workspace = runtime_workspace_root.join(build_subdirectory(target)); let crate_metadata = crate_metadata(orig_project_cargo_toml); @@ -770,7 +779,7 @@ impl BuildConfiguration { .collect::>() .iter() .rev() - .take_while(|c| c.as_os_str() != target.build_subdirectory()) + .take_while(|c| c.as_os_str() != build_subdirectory(target)) .last() .expect("We put the runtime project within a `target/.../[rw]build` path; qed") .as_os_str() @@ -841,9 +850,7 @@ fn build_bloaty_blob( "-C target-cpu=mvp -C target-feature=-sign-ext -C link-arg=--export-table ", ); }, - RuntimeTarget::Riscv => { - rustflags.push_str("-C target-feature=+lui-addi-fusion -C relocation-model=pie -C link-arg=--emit-relocs -C link-arg=--unique "); - }, + RuntimeTarget::Riscv => (), } rustflags.push_str(default_rustflags); @@ -907,10 +914,9 @@ fn build_bloaty_blob( // // So here we force the compiler to also compile the standard library crates for us // to make sure that they also only use the MVP features. - if crate::build_std_required() { - // Unfortunately this is still a nightly-only flag, but FWIW it is pretty widely used - // so it's unlikely to break without a replacement. - build_cmd.arg("-Z").arg("build-std"); + if let Some(arg) = target.rustc_target_build_std() { + build_cmd.arg("-Z").arg(arg); + if !cargo_cmd.supports_nightly_features() { build_cmd.env("RUSTC_BOOTSTRAP", "1"); } @@ -934,7 +940,7 @@ fn build_bloaty_blob( let blob_name = get_blob_name(target, &manifest_path); let target_directory = project .join("target") - .join(target.rustc_target()) + .join(target.rustc_target_dir()) .join(blob_build_profile.directory()); match target { RuntimeTarget::Riscv => { @@ -968,7 +974,7 @@ fn build_bloaty_blob( }, }; - std::fs::write(&polkavm_path, program.as_bytes()) + std::fs::write(&polkavm_path, program) .expect("writing the blob to a file always works"); } From 654d60c3a373cb5268f50e6cd9274580e12dce87 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 5 Dec 2024 09:42:32 +0100 Subject: [PATCH 28/29] ci: skip check-semver in master and merge queue (#6762) tbd --- .github/workflows/check-semver.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index e9bedd16e6d1..11b386da21e9 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -78,6 +78,11 @@ jobs: - name: check semver run: | + if [ -z "$PR" ]; then + echo "Skipping master/merge queue" + exit 0 + fi + export CARGO_TARGET_DIR=target export RUSTFLAGS='-A warnings -A missing_docs' export SKIP_WASM_BUILD=1 From f4a196ab1473856c9c5992239fcc2f14c2c42914 Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Thu, 5 Dec 2024 09:54:31 +0100 Subject: [PATCH 29/29] Optimize initialization of networking protocol benchmarks (#6636) # Description These changes should enhance the quality of benchmark results by excluding worker initialization time from the measurements and reducing the overall duration of the benchmarks. ### Integration It should not affect any downstream projects. ### Review Notes - Workers initialize once per benchmark to avoid side effects. - The listen address is assigned when a worker starts. - Benchmarks are divided into two groups by size to create better charts for comparison. --------- Co-authored-by: GitHub Action --- prdoc/pr_6636.prdoc | 9 + .../network/benches/notifications_protocol.rs | 386 ++++++++---------- .../benches/request_response_protocol.rs | 318 ++++++++------- 3 files changed, 353 insertions(+), 360 deletions(-) create mode 100644 prdoc/pr_6636.prdoc diff --git a/prdoc/pr_6636.prdoc b/prdoc/pr_6636.prdoc new file mode 100644 index 000000000000..1db5fd54d971 --- /dev/null +++ b/prdoc/pr_6636.prdoc @@ -0,0 +1,9 @@ +title: Optimize initialization of networking protocol benchmarks +doc: +- audience: Node Dev + description: |- + These changes should enhance the quality of benchmark results by excluding worker initialization time from the measurements and reducing the overall duration of the benchmarks. + +crates: +- name: sc-network + validate: false diff --git a/substrate/client/network/benches/notifications_protocol.rs b/substrate/client/network/benches/notifications_protocol.rs index c1e18c7b7f47..40a810d616b5 100644 --- a/substrate/client/network/benches/notifications_protocol.rs +++ b/substrate/client/network/benches/notifications_protocol.rs @@ -25,55 +25,42 @@ use sc_network::{ FullNetworkConfiguration, MultiaddrWithPeerId, NetworkConfiguration, NonReservedPeerMode, NotificationHandshake, Params, ProtocolId, Role, SetConfig, }, - service::traits::NotificationEvent, + service::traits::{NetworkService, NotificationEvent}, Litep2pNetworkBackend, NetworkBackend, NetworkWorker, NotificationMetrics, NotificationService, - Roles, + PeerId, Roles, }; use sc_network_common::{sync::message::BlockAnnouncesHandshake, ExHashT}; -use sc_network_types::build_multiaddr; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Zero}; -use std::{ - net::{IpAddr, Ipv4Addr, TcpListener}, - str::FromStr, -}; +use std::{sync::Arc, time::Duration}; use substrate_test_runtime_client::runtime; +use tokio::{sync::Mutex, task::JoinHandle}; -const MAX_SIZE: u64 = 2u64.pow(30); -const SAMPLE_SIZE: usize = 50; -const NOTIFICATIONS: usize = 50; -const EXPONENTS: &[(u32, &'static str)] = &[ - (6, "64B"), - (9, "512B"), - (12, "4KB"), - (15, "64KB"), - (18, "256KB"), - (21, "2MB"), - (24, "16MB"), - (27, "128MB"), +const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of notifications, label) + (6, 100, "64B"), + (9, 100, "512B"), + (12, 100, "4KB"), + (15, 100, "64KB"), ]; - -// TODO: It's be better to bind system-provided port when initializing the worker -fn get_listen_address() -> sc_network::Multiaddr { - let ip = Ipv4Addr::from_str("127.0.0.1").unwrap(); - let listener = TcpListener::bind((IpAddr::V4(ip), 0)).unwrap(); // Bind to a random port - let local_addr = listener.local_addr().unwrap(); - let port = local_addr.port(); - - build_multiaddr!(Ip4(ip), Tcp(port)) -} +const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of notifications, label) + (18, 10, "256KB"), + (21, 10, "2MB"), + (24, 10, "16MB"), + (27, 10, "128MB"), +]; +const MAX_SIZE: u64 = 2u64.pow(30); fn create_network_worker( - listen_addr: sc_network::Multiaddr, -) -> (N, Box) +) -> (N, Arc, Arc>>) where B: BlockT + 'static, H: ExHashT, N: NetworkBackend, { let role = Role::Full; - let mut net_conf = NetworkConfiguration::new_local(); - net_conf.listen_addresses = vec![listen_addr]; + let net_conf = NetworkConfiguration::new_local(); let network_config = FullNetworkConfiguration::::new(&net_conf, None); let genesis_hash = runtime::Hash::zero(); let (block_announce_config, notification_service) = N::notification_config( @@ -110,96 +97,122 @@ where notification_metrics: NotificationMetrics::new(None), }) .unwrap(); + let network_service = worker.network_service(); + let notification_service = Arc::new(Mutex::new(notification_service)); - (worker, notification_service) + (worker, network_service, notification_service) } -async fn run_serially(size: usize, limit: usize) +struct BenchSetup { + notification_service1: Arc>>, + notification_service2: Arc>>, + peer_id2: PeerId, + handle1: JoinHandle<()>, + handle2: JoinHandle<()>, +} + +impl Drop for BenchSetup { + fn drop(&mut self) { + self.handle1.abort(); + self.handle2.abort(); + } +} + +fn setup_workers(rt: &tokio::runtime::Runtime) -> Arc where B: BlockT + 'static, H: ExHashT, N: NetworkBackend, { - let listen_address1 = get_listen_address(); - let listen_address2 = get_listen_address(); - let (worker1, mut notification_service1) = create_network_worker::(listen_address1); - let (worker2, mut notification_service2) = - create_network_worker::(listen_address2.clone()); - let peer_id2: sc_network::PeerId = worker2.network_service().local_peer_id().into(); + let _guard = rt.enter(); - worker1 - .network_service() - .add_reserved_peer(MultiaddrWithPeerId { multiaddr: listen_address2, peer_id: peer_id2 }) - .unwrap(); + let (worker1, network_service1, notification_service1) = create_network_worker::(); + let (worker2, network_service2, notification_service2) = create_network_worker::(); + let peer_id2: sc_network::PeerId = network_service2.local_peer_id().into(); + let handle1 = tokio::spawn(worker1.run()); + let handle2 = tokio::spawn(worker2.run()); - let network1_run = worker1.run(); - let network2_run = worker2.run(); - let (tx, rx) = async_channel::bounded(10); + let ready = tokio::spawn({ + let notification_service1 = Arc::clone(¬ification_service1); + let notification_service2 = Arc::clone(¬ification_service2); - let network1 = tokio::spawn(async move { - let mut sent_counter = 0; - tokio::pin!(network1_run); - loop { - tokio::select! { - _ = &mut network1_run => {}, - event = notification_service1.next_event() => { - match event { - Some(NotificationEvent::NotificationStreamOpened { .. }) => { - sent_counter += 1; - notification_service1 - .send_async_notification(&peer_id2, vec![0; size]) - .await - .unwrap(); - }, - Some(NotificationEvent::NotificationStreamClosed { .. }) => { - if sent_counter >= limit { - break; - } - panic!("Unexpected stream closure {:?}", event); - } - event => panic!("Unexpected event {:?}", event), - }; - }, - message = rx.recv() => { - match message { - Ok(Some(_)) => { - sent_counter += 1; - notification_service1 - .send_async_notification(&peer_id2, vec![0; size]) - .await - .unwrap(); - }, - Ok(None) => break, - Err(err) => panic!("Unexpected error {:?}", err), + async move { + let listen_address2 = { + while network_service2.listen_addresses().is_empty() { + tokio::time::sleep(Duration::from_millis(10)).await; + } + network_service2.listen_addresses()[0].clone() + }; + network_service1 + .add_reserved_peer(MultiaddrWithPeerId { + multiaddr: listen_address2, + peer_id: peer_id2, + }) + .unwrap(); - } + let mut notification_service1 = notification_service1.lock().await; + let mut notification_service2 = notification_service2.lock().await; + loop { + tokio::select! { + Some(event) = notification_service1.next_event() => { + if let NotificationEvent::NotificationStreamOpened { .. } = event { + break; + } + }, + Some(event) = notification_service2.next_event() => { + if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = event { + result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); + } + }, } } } }); - let network2 = tokio::spawn(async move { - let mut received_counter = 0; - tokio::pin!(network2_run); - loop { - tokio::select! { - _ = &mut network2_run => {}, - event = notification_service2.next_event() => { - match event { - Some(NotificationEvent::ValidateInboundSubstream { result_tx, .. }) => { - result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); - }, - Some(NotificationEvent::NotificationStreamOpened { .. }) => {}, - Some(NotificationEvent::NotificationReceived { .. }) => { - received_counter += 1; - if received_counter >= limit { - let _ = tx.send(None).await; - break - } - let _ = tx.send(Some(())).await; - }, - event => panic!("Unexpected event {:?}", event), - }; - }, + + tokio::task::block_in_place(|| { + let _ = tokio::runtime::Handle::current().block_on(ready); + }); + + Arc::new(BenchSetup { + notification_service1, + notification_service2, + peer_id2, + handle1, + handle2, + }) +} + +async fn run_serially(setup: Arc, size: usize, limit: usize) { + let (tx, rx) = async_channel::bounded(1); + let _ = tx.send(Some(())).await; + let network1 = tokio::spawn({ + let notification_service1 = Arc::clone(&setup.notification_service1); + let peer_id2 = setup.peer_id2; + async move { + let mut notification_service1 = notification_service1.lock().await; + while let Ok(message) = rx.recv().await { + let Some(_) = message else { break }; + notification_service1 + .send_async_notification(&peer_id2, vec![0; size]) + .await + .unwrap(); + } + } + }); + let network2 = tokio::spawn({ + let notification_service2 = Arc::clone(&setup.notification_service2); + async move { + let mut notification_service2 = notification_service2.lock().await; + let mut received_counter = 0; + while let Some(event) = notification_service2.next_event().await { + if let NotificationEvent::NotificationReceived { .. } = event { + received_counter += 1; + if received_counter >= limit { + let _ = tx.send(None).await; + break; + } + let _ = tx.send(Some(())).await; + } } } }); @@ -207,77 +220,34 @@ where let _ = tokio::join!(network1, network2); } -async fn run_with_backpressure(size: usize, limit: usize) -where - B: BlockT + 'static, - H: ExHashT, - N: NetworkBackend, -{ - let listen_address1 = get_listen_address(); - let listen_address2 = get_listen_address(); - let (worker1, mut notification_service1) = create_network_worker::(listen_address1); - let (worker2, mut notification_service2) = - create_network_worker::(listen_address2.clone()); - let peer_id2: sc_network::PeerId = worker2.network_service().local_peer_id().into(); - - worker1 - .network_service() - .add_reserved_peer(MultiaddrWithPeerId { multiaddr: listen_address2, peer_id: peer_id2 }) - .unwrap(); - - let network1_run = worker1.run(); - let network2_run = worker2.run(); - - let network1 = tokio::spawn(async move { - let mut sent_counter = 0; - tokio::pin!(network1_run); - loop { - tokio::select! { - _ = &mut network1_run => {}, - event = notification_service1.next_event() => { - match event { - Some(NotificationEvent::NotificationStreamOpened { .. }) => { - while sent_counter < limit { - sent_counter += 1; - notification_service1 - .send_async_notification(&peer_id2, vec![0; size]) - .await - .unwrap(); - } - }, - Some(NotificationEvent::NotificationStreamClosed { .. }) => { - if sent_counter != limit { panic!("Stream closed unexpectedly") } - break - }, - event => panic!("Unexpected event {:?}", event), - }; - }, +async fn run_with_backpressure(setup: Arc, size: usize, limit: usize) { + let (tx, rx) = async_channel::bounded(1); + let network1 = tokio::spawn({ + let setup = Arc::clone(&setup); + async move { + let mut notification_service1 = setup.notification_service1.lock().await; + for _ in 0..limit { + notification_service1 + .send_async_notification(&setup.peer_id2, vec![0; size]) + .await + .unwrap(); } + let _ = rx.recv().await; } }); - let network2 = tokio::spawn(async move { - let mut received_counter = 0; - tokio::pin!(network2_run); - loop { - tokio::select! { - _ = &mut network2_run => {}, - event = notification_service2.next_event() => { - match event { - Some(NotificationEvent::ValidateInboundSubstream { result_tx, .. }) => { - result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); - }, - Some(NotificationEvent::NotificationStreamOpened { .. }) => {}, - Some(NotificationEvent::NotificationStreamClosed { .. }) => { - if received_counter != limit { panic!("Stream closed unexpectedly") } - break - }, - Some(NotificationEvent::NotificationReceived { .. }) => { - received_counter += 1; - if received_counter >= limit { break } - }, - event => panic!("Unexpected event {:?}", event), - }; - }, + let network2 = tokio::spawn({ + let setup = Arc::clone(&setup); + async move { + let mut notification_service2 = setup.notification_service2.lock().await; + let mut received_counter = 0; + while let Some(event) = notification_service2.next_event().await { + if let NotificationEvent::NotificationReceived { .. } = event { + received_counter += 1; + if received_counter >= limit { + let _ = tx.send(()).await; + break; + } + } } } }); @@ -285,64 +255,64 @@ where let _ = tokio::join!(network1, network2); } -fn run_benchmark(c: &mut Criterion) { +fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) { let rt = tokio::runtime::Runtime::new().unwrap(); let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let mut group = c.benchmark_group("notifications_benchmark"); + let mut group = c.benchmark_group(group); group.plot_config(plot_config); - for &(exponent, label) in EXPONENTS.iter() { + let libp2p_setup = setup_workers::>(&rt); + for &(exponent, limit, label) in payload.iter() { let size = 2usize.pow(exponent); - group.throughput(Throughput::Bytes(NOTIFICATIONS as u64 * size as u64)); - + group.throughput(Throughput::Bytes(limit as u64 * size as u64)); group.bench_with_input( BenchmarkId::new("libp2p/serially", label), - &(size, NOTIFICATIONS), + &(size, limit), |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_serially::>(size, limit) - }); + b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit)); }, ); group.bench_with_input( - BenchmarkId::new("litep2p/serially", label), - &(size, NOTIFICATIONS), + BenchmarkId::new("libp2p/with_backpressure", label), + &(size, limit), |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_serially::( - size, limit, - ) - }); + b.to_async(&rt) + .iter(|| run_with_backpressure(Arc::clone(&libp2p_setup), size, limit)); }, ); + } + drop(libp2p_setup); + + let litep2p_setup = setup_workers::(&rt); + for &(exponent, limit, label) in payload.iter() { + let size = 2usize.pow(exponent); + group.throughput(Throughput::Bytes(limit as u64 * size as u64)); group.bench_with_input( - BenchmarkId::new("libp2p/with_backpressure", label), - &(size, NOTIFICATIONS), + BenchmarkId::new("litep2p/serially", label), + &(size, limit), |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_with_backpressure::>( - size, limit, - ) - }); + b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit)); }, ); group.bench_with_input( BenchmarkId::new("litep2p/with_backpressure", label), - &(size, NOTIFICATIONS), + &(size, limit), |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_with_backpressure::( - size, limit, - ) - }); + b.to_async(&rt) + .iter(|| run_with_backpressure(Arc::clone(&litep2p_setup), size, limit)); }, ); } + drop(litep2p_setup); } -criterion_group! { - name = benches; - config = Criterion::default().sample_size(SAMPLE_SIZE); - targets = run_benchmark +fn run_benchmark_with_small_payload(c: &mut Criterion) { + run_benchmark(c, SMALL_PAYLOAD, "notifications_protocol/small_payload"); } + +fn run_benchmark_with_large_payload(c: &mut Criterion) { + run_benchmark(c, LARGE_PAYLOAD, "notifications_protocol/large_payload"); +} + +criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload); criterion_main!(benches); diff --git a/substrate/client/network/benches/request_response_protocol.rs b/substrate/client/network/benches/request_response_protocol.rs index b428d0d75ac5..85381112b753 100644 --- a/substrate/client/network/benches/request_response_protocol.rs +++ b/substrate/client/network/benches/request_response_protocol.rs @@ -25,46 +25,39 @@ use sc_network::{ FullNetworkConfiguration, IncomingRequest, NetworkConfiguration, NonReservedPeerMode, NotificationHandshake, OutgoingResponse, Params, ProtocolId, Role, SetConfig, }, + service::traits::NetworkService, IfDisconnected, Litep2pNetworkBackend, NetworkBackend, NetworkRequest, NetworkWorker, - NotificationMetrics, NotificationService, Roles, + NotificationMetrics, NotificationService, PeerId, Roles, }; use sc_network_common::{sync::message::BlockAnnouncesHandshake, ExHashT}; -use sc_network_types::build_multiaddr; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Zero}; -use std::{ - net::{IpAddr, Ipv4Addr, TcpListener}, - str::FromStr, - time::Duration, -}; +use std::{sync::Arc, time::Duration}; use substrate_test_runtime_client::runtime; +use tokio::{sync::Mutex, task::JoinHandle}; const MAX_SIZE: u64 = 2u64.pow(30); -const SAMPLE_SIZE: usize = 50; -const REQUESTS: usize = 50; -const EXPONENTS: &[(u32, &'static str)] = &[ - (6, "64B"), - (9, "512B"), - (12, "4KB"), - (15, "64KB"), - (18, "256KB"), - (21, "2MB"), - (24, "16MB"), - (27, "128MB"), +const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of requests, label) + (6, 100, "64B"), + (9, 100, "512B"), + (12, 100, "4KB"), + (15, 100, "64KB"), +]; +const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of requests, label) + (18, 10, "256KB"), + (21, 10, "2MB"), + (24, 10, "16MB"), + (27, 10, "128MB"), ]; -fn get_listen_address() -> sc_network::Multiaddr { - let ip = Ipv4Addr::from_str("127.0.0.1").unwrap(); - let listener = TcpListener::bind((IpAddr::V4(ip), 0)).unwrap(); // Bind to a random port - let local_addr = listener.local_addr().unwrap(); - let port = local_addr.port(); - - build_multiaddr!(Ip4(ip), Tcp(port)) -} - -pub fn create_network_worker( - listen_addr: sc_network::Multiaddr, -) -> (N, async_channel::Receiver, Box) +pub fn create_network_worker() -> ( + N, + Arc, + async_channel::Receiver, + Arc>>, +) where B: BlockT + 'static, H: ExHashT, @@ -80,8 +73,7 @@ where Some(tx), ); let role = Role::Full; - let mut net_conf = NetworkConfiguration::new_local(); - net_conf.listen_addresses = vec![listen_addr]; + let net_conf = NetworkConfiguration::new_local(); let mut network_config = FullNetworkConfiguration::new(&net_conf, None); network_config.add_request_response_protocol(request_response_config); let genesis_hash = runtime::Hash::zero(); @@ -119,71 +111,115 @@ where notification_metrics: NotificationMetrics::new(None), }) .unwrap(); + let notification_service = Arc::new(Mutex::new(notification_service)); + let network_service = worker.network_service(); - (worker, rx, notification_service) + (worker, network_service, rx, notification_service) } -async fn run_serially(size: usize, limit: usize) +struct BenchSetup { + #[allow(dead_code)] + notification_service1: Arc>>, + #[allow(dead_code)] + notification_service2: Arc>>, + network_service1: Arc, + peer_id2: PeerId, + handle1: JoinHandle<()>, + handle2: JoinHandle<()>, + #[allow(dead_code)] + rx1: async_channel::Receiver, + rx2: async_channel::Receiver, +} + +impl Drop for BenchSetup { + fn drop(&mut self) { + self.handle1.abort(); + self.handle2.abort(); + } +} + +fn setup_workers(rt: &tokio::runtime::Runtime) -> Arc where B: BlockT + 'static, H: ExHashT, N: NetworkBackend, { - let listen_address1 = get_listen_address(); - let listen_address2 = get_listen_address(); - let (worker1, _rx1, _notification_service1) = create_network_worker::(listen_address1); - let service1 = worker1.network_service().clone(); - let (worker2, rx2, _notification_service2) = - create_network_worker::(listen_address2.clone()); + let _guard = rt.enter(); + + let (worker1, network_service1, rx1, notification_service1) = + create_network_worker::(); + let (worker2, network_service2, rx2, notification_service2) = + create_network_worker::(); let peer_id2 = worker2.network_service().local_peer_id(); + let handle1 = tokio::spawn(worker1.run()); + let handle2 = tokio::spawn(worker2.run()); - worker1.network_service().add_known_address(peer_id2, listen_address2.into()); + let ready = tokio::spawn({ + let network_service1 = Arc::clone(&network_service1); - let network1_run = worker1.run(); - let network2_run = worker2.run(); - let (break_tx, break_rx) = async_channel::bounded(10); - let requests = async move { - let mut sent_counter = 0; - while sent_counter < limit { - let _ = service1 - .request( - peer_id2.into(), - "/request-response/1".into(), - vec![0; 2], - None, - IfDisconnected::TryConnect, - ) - .await - .unwrap(); - sent_counter += 1; + async move { + let listen_address2 = { + while network_service2.listen_addresses().is_empty() { + tokio::time::sleep(Duration::from_millis(10)).await; + } + network_service2.listen_addresses()[0].clone() + }; + network_service1.add_known_address(peer_id2, listen_address2.into()); } - let _ = break_tx.send(()).await; - }; + }); - let network1 = tokio::spawn(async move { - tokio::pin!(requests); - tokio::pin!(network1_run); - loop { - tokio::select! { - _ = &mut network1_run => {}, - _ = &mut requests => break, + tokio::task::block_in_place(|| { + let _ = tokio::runtime::Handle::current().block_on(ready); + }); + + Arc::new(BenchSetup { + notification_service1, + notification_service2, + network_service1, + peer_id2, + handle1, + handle2, + rx1, + rx2, + }) +} + +async fn run_serially(setup: Arc, size: usize, limit: usize) { + let (break_tx, break_rx) = async_channel::bounded(1); + let network1 = tokio::spawn({ + let network_service1 = Arc::clone(&setup.network_service1); + let peer_id2 = setup.peer_id2; + async move { + for _ in 0..limit { + let _ = network_service1 + .request( + peer_id2.into(), + "/request-response/1".into(), + vec![0; 2], + None, + IfDisconnected::TryConnect, + ) + .await + .unwrap(); } + let _ = break_tx.send(()).await; } }); - let network2 = tokio::spawn(async move { - tokio::pin!(network2_run); - loop { - tokio::select! { - _ = &mut network2_run => {}, - res = rx2.recv() => { - let IncomingRequest { pending_response, .. } = res.unwrap(); - pending_response.send(OutgoingResponse { - result: Ok(vec![0; size]), - reputation_changes: vec![], - sent_feedback: None, - }).unwrap(); - }, - _ = break_rx.recv() => break, + let network2 = tokio::spawn({ + let rx2 = setup.rx2.clone(); + async move { + loop { + tokio::select! { + res = rx2.recv() => { + let IncomingRequest { pending_response, .. } = res.unwrap(); + pending_response.send(OutgoingResponse { + result: Ok(vec![0; size]), + reputation_changes: vec![], + sent_feedback: None, + }).unwrap(); + }, + _ = break_rx.recv() => break, + } } } }); @@ -194,29 +230,12 @@ where // The libp2p request-response implementation does not provide any backpressure feedback. // So this benchmark is useless until we implement it for litep2p. #[allow(dead_code)] -async fn run_with_backpressure(size: usize, limit: usize) -where - B: BlockT + 'static, - H: ExHashT, - N: NetworkBackend, -{ - let listen_address1 = get_listen_address(); - let listen_address2 = get_listen_address(); - let (worker1, _rx1, _notification_service1) = create_network_worker::(listen_address1); - let service1 = worker1.network_service().clone(); - let (worker2, rx2, _notification_service2) = - create_network_worker::(listen_address2.clone()); - let peer_id2 = worker2.network_service().local_peer_id(); - - worker1.network_service().add_known_address(peer_id2, listen_address2.into()); - - let network1_run = worker1.run(); - let network2_run = worker2.run(); - let (break_tx, break_rx) = async_channel::bounded(10); +async fn run_with_backpressure(setup: Arc, size: usize, limit: usize) { + let (break_tx, break_rx) = async_channel::bounded(1); let requests = futures::future::join_all((0..limit).into_iter().map(|_| { let (tx, rx) = futures::channel::oneshot::channel(); - service1.start_request( - peer_id2.into(), + setup.network_service1.start_request( + setup.peer_id2.into(), "/request-response/1".into(), vec![0; 8], None, @@ -227,77 +246,72 @@ where })); let network1 = tokio::spawn(async move { - tokio::pin!(requests); - tokio::pin!(network1_run); - loop { - tokio::select! { - _ = &mut network1_run => {}, - responses = &mut requests => { - for res in responses { - res.unwrap().unwrap(); - } - let _ = break_tx.send(()).await; - break; - }, - } + let responses = requests.await; + for res in responses { + res.unwrap().unwrap(); } + let _ = break_tx.send(()).await; }); let network2 = tokio::spawn(async move { - tokio::pin!(network2_run); - loop { - tokio::select! { - _ = &mut network2_run => {}, - res = rx2.recv() => { - let IncomingRequest { pending_response, .. } = res.unwrap(); - pending_response.send(OutgoingResponse { - result: Ok(vec![0; size]), - reputation_changes: vec![], - sent_feedback: None, - }).unwrap(); - }, - _ = break_rx.recv() => break, - } + for _ in 0..limit { + let IncomingRequest { pending_response, .. } = setup.rx2.recv().await.unwrap(); + pending_response + .send(OutgoingResponse { + result: Ok(vec![0; size]), + reputation_changes: vec![], + sent_feedback: None, + }) + .unwrap(); } + break_rx.recv().await }); let _ = tokio::join!(network1, network2); } -fn run_benchmark(c: &mut Criterion) { +fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) { let rt = tokio::runtime::Runtime::new().unwrap(); let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); - let mut group = c.benchmark_group("request_response_benchmark"); + let mut group = c.benchmark_group(group); group.plot_config(plot_config); - for &(exponent, label) in EXPONENTS.iter() { + let libp2p_setup = setup_workers::>(&rt); + for &(exponent, limit, label) in payload.iter() { let size = 2usize.pow(exponent); - group.throughput(Throughput::Bytes(REQUESTS as u64 * size as u64)); + group.throughput(Throughput::Bytes(limit as u64 * size as u64)); group.bench_with_input( BenchmarkId::new("libp2p/serially", label), - &(size, REQUESTS), - |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_serially::>(size, limit) - }); - }, - ); - group.bench_with_input( - BenchmarkId::new("litep2p/serially", label), - &(size, REQUESTS), + &(size, limit), |b, &(size, limit)| { - b.to_async(&rt).iter(|| { - run_serially::( - size, limit, - ) - }); + b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit)); }, ); } + drop(libp2p_setup); + + // TODO: NetworkRequest::request should be implemented for Litep2pNetworkService + let litep2p_setup = setup_workers::(&rt); + // for &(exponent, limit, label) in payload.iter() { + // let size = 2usize.pow(exponent); + // group.throughput(Throughput::Bytes(limit as u64 * size as u64)); + // group.bench_with_input( + // BenchmarkId::new("litep2p/serially", label), + // &(size, limit), + // |b, &(size, limit)| { + // b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit)); + // }, + // ); + // } + drop(litep2p_setup); } -criterion_group! { - name = benches; - config = Criterion::default().sample_size(SAMPLE_SIZE); - targets = run_benchmark +fn run_benchmark_with_small_payload(c: &mut Criterion) { + run_benchmark(c, SMALL_PAYLOAD, "request_response_benchmark/small_payload"); } + +fn run_benchmark_with_large_payload(c: &mut Criterion) { + run_benchmark(c, LARGE_PAYLOAD, "request_response_benchmark/large_payload"); +} + +criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload); criterion_main!(benches);
Release notes

Sourced from lycheeverse/lychee-action's releases.

Version 2.1.0

What's Changed

New Contributors

Full Changelog: https://github.com/lycheeverse/lychee-action/compare/v2...v2.1.0