From 2c35bc3d273f47d45c17078134c048016ef7a57a Mon Sep 17 00:00:00 2001 From: Ignacio Palacios Date: Mon, 4 Sep 2023 12:30:25 +0200 Subject: [PATCH 01/28] [xcm-emulator] Redo Parachain init (#1356) * bring back proper init * refactor block cycle * ".git/.scripts/commands/fmt/fmt.sh" * Update cumulus/xcm/xcm-emulator/src/lib.rs Co-authored-by: Squirrel --------- Co-authored-by: command-bot <> Co-authored-by: Giles Cope --- .../assets/asset-hub-kusama/src/tests/send.rs | 2 - .../asset-hub-polkadot/src/tests/send.rs | 2 - .../asset-hub-polkadot/src/tests/teleport.rs | 2 - .../asset-hub-westend/src/tests/send.rs | 2 - .../bridge-hub-rococo/src/tests/example.rs | 141 +++++++++--------- .../src/tests/fellowship.rs | 3 - cumulus/xcm/xcm-emulator/src/lib.rs | 128 +++++++++------- 7 files changed, 142 insertions(+), 138 deletions(-) diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/send.rs index 598256db83a0..d633c25b7324 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/send.rs @@ -174,8 +174,6 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { PenpalKusamaA::assert_xcm_pallet_sent(); }); - PenpalKusamaA::execute_with(|| {}); - AssetHubKusama::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/send.rs index 978377b0fda3..143ab06b4e99 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/send.rs @@ -177,8 +177,6 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { PenpalPolkadotA::assert_xcm_pallet_sent(); }); - PenpalPolkadotA::execute_with(|| {}); - AssetHubPolkadot::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/teleport.rs index 166f73137e75..f0fbcf37fccf 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/teleport.rs @@ -187,7 +187,6 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { /// Limited Teleport of native asset from System Parachain to Relay Chain /// should work when there is enough balance in Relay Chain's `CheckAccount` #[test] -#[cfg(feature = "FIXME-IGNORED")] // fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { // Dependency - Relay Chain's `CheckAccount` should have enough balance limited_teleport_native_assets_from_relay_to_system_para_works(); @@ -226,7 +225,6 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { /// Limited Teleport of native asset from System Parachain to Relay Chain /// should't work when there is not enough balance in Relay Chain's `CheckAccount` #[test] -#[cfg(feature = "FIXME-IGNORED")] // fn limited_teleport_native_assets_from_system_para_to_relay_fails() { // Init values for Relay Chain let amount_to_send: Balance = ASSET_HUB_POLKADOT_ED * 1000; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs index f2040351e5db..fcaffdabc4cf 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs @@ -116,8 +116,6 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { PenpalWestendA::assert_xcm_pallet_sent(); }); - PenpalWestendA::execute_with(|| {}); - AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/example.rs b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/example.rs index 127292829fd5..777acd2aa972 100644 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/example.rs +++ b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/example.rs @@ -14,85 +14,86 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +use crate::*; + #[test] -#[ignore] fn example() { - // // Init tests variables - // // XcmPallet send arguments - // let sudo_origin = ::RuntimeOrigin::root(); - // let destination = Rococo::child_location_of(BridgeHubRococo::para_id()).into(); - // let weight_limit = WeightLimit::Unlimited; - // let check_origin = None; + // Init tests variables + // XcmPallet send arguments + let sudo_origin = ::RuntimeOrigin::root(); + let destination = Rococo::child_location_of(BridgeHubRococo::para_id()).into(); + let weight_limit = WeightLimit::Unlimited; + let check_origin = None; - // let remote_xcm = Xcm(vec![ClearOrigin]); + let remote_xcm = Xcm(vec![ClearOrigin]); - // let xcm = VersionedXcm::from(Xcm(vec![ - // UnpaidExecution { weight_limit, check_origin }, - // ExportMessage { - // network: WococoId, - // destination: X1(Parachain(AssetHubWococo::para_id().into())), - // xcm: remote_xcm, - // }, - // ])); + let xcm = VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit, check_origin }, + ExportMessage { + network: WococoId, + destination: X1(Parachain(AssetHubWococo::para_id().into())), + xcm: remote_xcm, + }, + ])); - // //Rococo Global Consensus - // // Send XCM message from Relay Chain to Bridge Hub source Parachain - // Rococo::execute_with(|| { - // assert_ok!(::XcmPallet::send( - // sudo_origin, - // bx!(destination), - // bx!(xcm), - // )); + //Rococo Global Consensus + // Send XCM message from Relay Chain to Bridge Hub source Parachain + Rococo::execute_with(|| { + assert_ok!(::XcmPallet::send( + sudo_origin, + bx!(destination), + bx!(xcm), + )); - // type RuntimeEvent = ::RuntimeEvent; + type RuntimeEvent = ::RuntimeEvent; - // assert_expected_events!( - // Rococo, - // vec![ - // RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, - // ] - // ); - // }); - // // Receive XCM message in Bridge Hub source Parachain - // BridgeHubRococo::execute_with(|| { - // type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + Rococo, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + // Receive XCM message in Bridge Hub source Parachain + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; - // assert_expected_events!( - // BridgeHubRococo, - // vec![ - // RuntimeEvent::DmpQueue(cumulus_pallet_dmp_queue::Event::ExecutedDownward { - // outcome: Outcome::Complete(_), - // .. - // }) => {}, - // RuntimeEvent::BridgeWococoMessages(pallet_bridge_messages::Event::MessageAccepted { - // lane_id: LaneId([0, 0, 0, 1]), - // nonce: 1, - // }) => {}, - // ] - // ); - // }); + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::DmpQueue(cumulus_pallet_dmp_queue::Event::ExecutedDownward { + outcome: Outcome::Complete(_), + .. + }) => {}, + RuntimeEvent::BridgeWococoMessages(pallet_bridge_messages::Event::MessageAccepted { + lane_id: LaneId([0, 0, 0, 1]), + nonce: 1, + }) => {}, + ] + ); + }); - // // Wococo GLobal Consensus - // // Receive XCM message in Bridge Hub target Parachain - // BridgeHubWococo::execute_with(|| { - // type RuntimeEvent = ::RuntimeEvent; + // Wococo GLobal Consensus + // Receive XCM message in Bridge Hub target Parachain + BridgeHubWococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; - // assert_expected_events!( - // BridgeHubWococo, - // vec![ - // RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, - // ] - // ); - // }); - // // Receive embeded XCM message within `ExportMessage` in Parachain destination - // AssetHubWococo::execute_with(|| { - // type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubWococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); + // Receive embeded XCM message within `ExportMessage` in Parachain destination + AssetHubWococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; - // assert_expected_events!( - // AssetHubWococo, - // vec![ - // RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::Fail { .. }) => {}, - // ] - // ); - // }); + assert_expected_events!( + AssetHubWococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::Fail { .. }) => {}, + ] + ); + }); } diff --git a/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/fellowship.rs b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/fellowship.rs index 26fa55acb0d6..82e998f5a76c 100644 --- a/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/fellowship.rs +++ b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/fellowship.rs @@ -58,11 +58,8 @@ fn pay_salary() { ); }); - Collectives::execute_with(|| {}); - AssetHubPolkadot::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; - assert_expected_events!( AssetHubPolkadot, vec![ diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs index 59f238528fc1..9fda0632bae4 100644 --- a/cumulus/xcm/xcm-emulator/src/lib.rs +++ b/cumulus/xcm/xcm-emulator/src/lib.rs @@ -248,6 +248,14 @@ pub trait Parachain: Chain { type ParachainInfo: Get; type ParachainSystem; + fn init(); + + fn new_block(); + + fn finalize_block(); + + fn set_last_head(); + fn para_id() -> ParaId { Self::ext_wrapper(|| Self::ParachainInfo::get()) } @@ -263,8 +271,6 @@ pub trait Parachain: Chain { fn sovereign_account_id_of(location: MultiLocation) -> AccountId { Self::LocationToAccountId::convert_location(&location).unwrap() } - - fn init(); } pub trait Bridge { @@ -603,28 +609,74 @@ macro_rules! decl_test_parachains { type ParachainSystem = $crate::ParachainSystemPallet<::Runtime>; type ParachainInfo = $parachain_info; + // We run an empty block during initialisation to open HRMP channels + // and have them ready for the next block fn init() { use $crate::{Chain, HeadData, Network, NetworkComponent, Hooks, Encode, Parachain, TestExt}; + // Set the last block head for later use in the next block + Self::set_last_head(); + // Initialize a new block + Self::new_block(); + // Finalize the new block + Self::finalize_block(); + } + + fn new_block() { + use $crate::{Chain, HeadData, Network, NetworkComponent, Hooks, Encode, Parachain, TestExt}; + + let para_id = Self::para_id().into(); + + Self::ext_wrapper(|| { + // Increase Relay Chain block number + let mut relay_block_number = <$name as NetworkComponent>::Network::relay_block_number(); + relay_block_number += 1; + <$name as NetworkComponent>::Network::set_relay_block_number(relay_block_number); + + // Initialize a new Parachain block + let mut block_number = ::System::block_number(); + block_number += 1; + let parent_head_data = $crate::LAST_HEAD.with(|b| b.borrow_mut() + .get_mut(::Network::name()) + .expect("network not initialized?") + .get(¶_id) + .expect("network not initialized?") + .clone() + ); + ::System::initialize(&block_number, &parent_head_data.hash(), &Default::default()); + <::ParachainSystem as Hooks<$crate::BlockNumber>>::on_initialize(block_number); + + let _ = ::ParachainSystem::set_validation_data( + ::RuntimeOrigin::none(), + <$name as NetworkComponent>::Network::hrmp_channel_parachain_inherent_data(para_id, relay_block_number, parent_head_data), + ); + }); + } - let para_id = Self::para_id(); + fn finalize_block() { + use $crate::{Chain, Encode, Hooks, Network, NetworkComponent, Parachain, TestExt}; Self::ext_wrapper(|| { let block_number = ::System::block_number(); - let mut relay_block_number = ::Network::relay_block_number(); + ::ParachainSystem::on_finalize(block_number); + }); + + Self::set_last_head(); + } - // Get parent head data - let header = ::System::finalize(); - let parent_head_data = HeadData(header.encode()); + fn set_last_head() { + use $crate::{Chain, Encode, HeadData, Network, NetworkComponent, Parachain, TestExt}; + + let para_id = Self::para_id().into(); + + Self::ext_wrapper(|| { + // Store parent head data for use later. + let created_header = ::System::finalize(); $crate::LAST_HEAD.with(|b| b.borrow_mut() .get_mut(::Network::name()) .expect("network not initialized?") - .insert(para_id.into(), parent_head_data.clone()) + .insert(para_id, HeadData(created_header.encode())) ); - - let next_block_number = block_number + 1; - ::System::initialize(&next_block_number, &header.hash(), &Default::default()); - <::ParachainSystem as Hooks<$crate::BlockNumber>>::on_initialize(next_block_number); }); } } @@ -746,43 +798,20 @@ macro_rules! __impl_test_ext_for_parachain { // Make sure the Network is initialized <$name as NetworkComponent>::Network::init(); - let para_id = <$name>::para_id().into(); - - // Initialize block - $local_ext.with(|v| { - v.borrow_mut().execute_with(|| { - let parent_head_data = $crate::LAST_HEAD.with(|b| b.borrow_mut() - .get_mut(::Network::name()) - .expect("network not initialized?") - .get(¶_id) - .expect("network not initialized?") - .clone() - ); - - // Increase block number - let mut relay_block_number = <$name as NetworkComponent>::Network::relay_block_number(); - relay_block_number += 1; - <$name as NetworkComponent>::Network::set_relay_block_number(relay_block_number); - - let _ = ::ParachainSystem::set_validation_data( - ::RuntimeOrigin::none(), - <$name as NetworkComponent>::Network::hrmp_channel_parachain_inherent_data(para_id, relay_block_number, parent_head_data), - ); - }) - }); + // Initialize a new block + Self::new_block(); // Execute let r = $local_ext.with(|v| v.borrow_mut().execute_with(execute)); - // provide inbound DMP/HRMP messages through a side-channel. - // normally this would come through the `set_validation_data`, - // but we go around that. - <$name as NetworkComponent>::Network::process_messages(); + // Finalize the block + Self::finalize_block(); - // Finalize block and send messages if needed + let para_id = <$name>::para_id().into(); + + // Send messages if needed $local_ext.with(|v| { v.borrow_mut().execute_with(|| { - let block_number = ::System::block_number(); let mock_header = $crate::HeaderT::new( 0, Default::default(), @@ -791,16 +820,6 @@ macro_rules! __impl_test_ext_for_parachain { Default::default(), ); - // Finalize to get xcmp messages. - ::ParachainSystem::on_finalize(block_number); - // Store parent head data for use later. - let created_header = ::System::finalize(); - $crate::LAST_HEAD.with(|b| b.borrow_mut() - .get_mut(::Network::name()) - .expect("network not initialized?") - .insert(para_id.into(), $crate::HeadData(created_header.encode())) - ); - let collation_info = ::ParachainSystem::collect_collation_info(&mock_header); // send upward messages @@ -834,11 +853,6 @@ macro_rules! __impl_test_ext_for_parachain { // clean events ::System::reset_events(); - - // reinitialize before next call. - let next_block_number = block_number + 1; - ::System::initialize(&next_block_number, &created_header.hash(), &Default::default()); - <::ParachainSystem as Hooks<$crate::BlockNumber>>::on_initialize(next_block_number); }) }); From be761b743ba4c8ac0c5ad39fc71b98547ac9ba37 Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Mon, 4 Sep 2023 23:42:19 +1200 Subject: [PATCH 02/28] add Treasurer to SchedulerOrigin (#1325) --- polkadot/runtime/kusama/src/lib.rs | 3 ++- polkadot/runtime/polkadot/src/lib.rs | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/polkadot/runtime/kusama/src/lib.rs b/polkadot/runtime/kusama/src/lib.rs index 4b5f03b38c6c..94af807fb5de 100644 --- a/polkadot/runtime/kusama/src/lib.rs +++ b/polkadot/runtime/kusama/src/lib.rs @@ -228,7 +228,8 @@ impl pallet_scheduler::Config for Runtime { type MaximumWeight = MaximumSchedulerWeight; // The goal of having ScheduleOrigin include AuctionAdmin is to allow the auctions track of // OpenGov to schedule periodic auctions. - type ScheduleOrigin = EitherOf, AuctionAdmin>; + // Also allow Treasurer to schedule recurring payments. + type ScheduleOrigin = EitherOf, AuctionAdmin>, Treasurer>; type MaxScheduledPerBlock = MaxScheduledPerBlock; type WeightInfo = weights::pallet_scheduler::WeightInfo; type OriginPrivilegeCmp = OriginPrivilegeCmp; diff --git a/polkadot/runtime/polkadot/src/lib.rs b/polkadot/runtime/polkadot/src/lib.rs index 2efd329eea0c..b71e0f726c55 100644 --- a/polkadot/runtime/polkadot/src/lib.rs +++ b/polkadot/runtime/polkadot/src/lib.rs @@ -212,7 +212,8 @@ impl pallet_scheduler::Config for Runtime { type MaximumWeight = MaximumSchedulerWeight; // The goal of having ScheduleOrigin include AuctionAdmin is to allow the auctions track of // OpenGov to schedule periodic auctions. - type ScheduleOrigin = EitherOf, AuctionAdmin>; + // Also allow Treasurer to schedule recurring payments. + type ScheduleOrigin = EitherOf, AuctionAdmin>, Treasurer>; type MaxScheduledPerBlock = MaxScheduledPerBlock; type WeightInfo = weights::pallet_scheduler::WeightInfo; type OriginPrivilegeCmp = OriginPrivilegeCmp; From 2f242e0792dc941c7d3225a1d7c266fde02fc938 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 4 Sep 2023 14:45:33 +0200 Subject: [PATCH 03/28] Cleanup repo (a tiny bit) (#1382) * Delete stale adoc files Signed-off-by: Oliver Tale-Yazdi * Convert adoc to md Signed-off-by: Oliver Tale-Yazdi * Add adoc to gitignore Signed-off-by: Oliver Tale-Yazdi * Delete more random unmaintained files Signed-off-by: Oliver Tale-Yazdi * Markdown lint Signed-off-by: Oliver Tale-Yazdi --------- Signed-off-by: Oliver Tale-Yazdi --- .gitignore | 1 + cumulus/bridges/rustfmt.toml | 24 - polkadot/RELEASE.md | 52 -- polkadot/doc/release-checklist.md | 91 --- polkadot/node/service/README.adoc | 5 - polkadot/parachain/README.adoc | 5 - polkadot/primitives/README.adoc | 5 - polkadot/runtime/polkadot/README.adoc | 5 - polkadot/src/README.adoc | 5 - polkadot/statement-table/README.adoc | 5 - substrate/.maintain/getgoing.sh | 6 - substrate/.maintain/runtime-dep.py | 34 -- substrate/.maintain/update-deps.sh | 9 - ...ll-completion.adoc => shell-completion.md} | 23 +- substrate/client/cli/README.adoc | 6 - substrate/docs/README.adoc | 522 ------------------ substrate/docs/Structure.adoc | 121 ---- 17 files changed, 9 insertions(+), 910 deletions(-) delete mode 100644 cumulus/bridges/rustfmt.toml delete mode 100644 polkadot/RELEASE.md delete mode 100644 polkadot/doc/release-checklist.md delete mode 100644 polkadot/node/service/README.adoc delete mode 100644 polkadot/parachain/README.adoc delete mode 100644 polkadot/primitives/README.adoc delete mode 100644 polkadot/runtime/polkadot/README.adoc delete mode 100644 polkadot/src/README.adoc delete mode 100644 polkadot/statement-table/README.adoc delete mode 100644 substrate/.maintain/getgoing.sh delete mode 100755 substrate/.maintain/runtime-dep.py delete mode 100755 substrate/.maintain/update-deps.sh rename substrate/bin/node/cli/doc/{shell-completion.adoc => shell-completion.md} (75%) delete mode 100644 substrate/client/cli/README.adoc delete mode 100644 substrate/docs/README.adoc delete mode 100644 substrate/docs/Structure.adoc diff --git a/.gitignore b/.gitignore index bd7f34b48104..35e02e706b42 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ .local .vscode .wasm-binaries +*.adoc *.bin *.iml *.orig diff --git a/cumulus/bridges/rustfmt.toml b/cumulus/bridges/rustfmt.toml deleted file mode 100644 index 082150daf04e..000000000000 --- a/cumulus/bridges/rustfmt.toml +++ /dev/null @@ -1,24 +0,0 @@ -# Basic -hard_tabs = true -max_width = 100 -use_small_heuristics = "Max" -# Imports -imports_granularity = "Crate" -reorder_imports = true -# Consistency -newline_style = "Unix" -# Format comments -comment_width = 100 -wrap_comments = true -# Misc -chain_width = 80 -spaces_around_ranges = false -binop_separator = "Back" -reorder_impl_items = false -match_arm_leading_pipes = "Preserve" -match_arm_blocks = false -match_block_trailing_comma = true -trailing_comma = "Vertical" -trailing_semicolon = false -use_field_init_shorthand = true - diff --git a/polkadot/RELEASE.md b/polkadot/RELEASE.md deleted file mode 100644 index 196f27e595d6..000000000000 --- a/polkadot/RELEASE.md +++ /dev/null @@ -1,52 +0,0 @@ -# Polkadot Release Process - -## Branches -* release-candidate branch: The branch used for staging of the next release. Named like `release-v0.8.26` - -## Notes -* The release-candidate branch *must* be made in the `paritytech/polkadot` repo in order for release automation to work -correctly -* Any new pushes/merges to the release-candidate branch (for example, refs/heads/release-v0.8.26) will result in the rc -index being bumped (e.g., v0.8.26-rc1 to v0.8.26-rc2) and new wasm built. - -## Release workflow - -Below are the steps of the release workflow. Steps prefixed with NOACTION are automated and require no human action. - -1. To initiate the release process: - 1. branch master off to a release candidate branch: - `git checkout master; git pull; git checkout -b release-v0.8.26` - 1. In the [Substrate](https://github.com/paritytech/substrate) repo, check out the commit used by Polkadot (this can - be found using the following command in the *Polkadot* repo: `grep 'paritytech/substrate' Cargo.lock | grep -E - '[0-9a-f]{40}' | sort | uniq` - 1. Branch off this **Substrate** commit into its own branch: `git branch -b polkadot-v0.8.26; git push origin - refs/heads/polkadot-v0.8.26` - 1. In the **Polkadot** repository, use [diener](https://github.com/bkchr/diener/) to switch to this branch: `diener - update --branch "polkadot-v0.8.26" --substrate`. Update Cargo.lock (to do this, you can run `cargo build` and then - ctrl+c once it finishes fetching and begins compiling) - 1. Push the **Polkadot** `release-v0.8.26` branch to Github: `git push origin refs/heads/release-v0.8.26` -1. NOACTION: The current HEAD of the release-candidate branch is tagged `v0.8.26-rc1` -1. NOACTION: A draft release and runtime WASMs are created for this release-candidate automatically. A link to the draft - release will be linked in the internal Polkadot matrix channel. -1. NOACTION: A new Github issue is created containing a checklist of manual steps to be completed before we are - confident with the release. This will be linked in Matrix. -1. Complete the steps in the issue created in step 4, signing them off as completed -1. (optional) If a fix is required to the release-candidate: - 1. Merge the fix with `master` first - 1. Cherry-pick the commit from `master` to `release-v0.8.26`, fixing any merge conflicts. Try to avoid unnecessarily - bumping crates. - 1. Push the release-candidate branch to Github - this is now the new release- candidate - 1. Depending on the cherry-picked changes, it may be necessary to perform some or all of the manual tests again. - 1. If there are **Substrate** changes required, these should be cherry-picked to the Substrate `polkadot-v0.8.26` - branch and pushed, and the version of Substrate used in **Polkadot** updated using `cargo update -p sp-io` -1. Once happy with the release-candidate, tag the current top commit in the release candidate branch and push to Github: - `git tag -s -m 'v0.8.26' v0.8.26; git push --tags` -1. NOACTION: The HEAD of the `release` branch will be tagged with `v0.8.26`, and a final draft release will be created - on Github. - -## Security releases - -Occasionally there may be changes that need to be made to the most recently released version of Polkadot, without taking -*every* change to `master` since the last release. For example, in the event of a security vulnerability being found, -where releasing a fixed version is a matter of some expediency. In cases like this, the fix should first be merged with -master, cherry-picked to a branch forked from `release`, tested, and then finally merged with `release`. A sensible -versioning scheme for changes like this is `vX.Y.Z-1`. diff --git a/polkadot/doc/release-checklist.md b/polkadot/doc/release-checklist.md deleted file mode 100644 index 8c57791fed1d..000000000000 --- a/polkadot/doc/release-checklist.md +++ /dev/null @@ -1,91 +0,0 @@ - -# Notes - -## Burn In - -Ensure that Parity DevOps has run the new release on Westend, Kusama, and Polkadot validators for at least 12 hours -prior to publishing the release. - -## Build Artifacts - -Add any necessary assets to the release. They should include: - -* Linux binary -* GPG signature of the Linux binary -* SHA256 of binary -* Source code -* Wasm binaries of any runtimes - -## Release notes - -The release notes should list: - -* The priority of the release (i.e., how quickly users should upgrade) - this is based on the max priority of any - *client* changes. -* Which native runtimes and their versions are included -* The proposal hashes of the runtimes as built with [srtool](https://gitlab.com/chevdor/srtool) -* Any changes in this release that are still awaiting audit - -The release notes may also list: - -* Free text at the beginning of the notes mentioning anything important regarding this release -* Notable changes (those labelled with B[1-9]-* labels) separated into sections - -## Spec Version - -A runtime upgrade must bump the spec number. This may follow a pattern with the client release (e.g. runtime v12 -corresponds to v0.8.12, even if the current runtime is not v11). - -## Old Migrations Removed - -Any previous `on_runtime_upgrade` functions from old upgrades must be removed to prevent them from executing a second -time. The `on_runtime_upgrade` function can be found in `runtime//src/lib.rs`. - -## New Migrations - -Ensure that any migrations that are required due to storage or logic changes are included in the `on_runtime_upgrade` -function of the appropriate pallets. - -## Extrinsic Ordering - -Offline signing libraries depend on a consistent ordering of call indices and functions. Compare the metadata of the -current and new runtimes and ensure that the `module index, call index` tuples map to the same set of functions. In case -of a breaking change, increase `transaction_version`. - -To verify the order has not changed, you may manually start the following [Github -Action](https://github.com/paritytech/polkadot/actions/workflows/extrinsic-ordering-check-from-bin.yml). It takes around -a minute to run and will produce the report as artifact you need to manually check. - -The things to look for in the output are lines like: - * `[Identity] idx 28 -> 25 (calls 15)` - indicates the index for `Identity` has changed - * `[+] Society, Recovery` - indicates the new version includes 2 additional modules/pallets. - * If no indices have changed, every modules line should look something like `[Identity] idx 25 (calls 15)` - -Note: Adding new functions to the runtime does not constitute a breaking change as long as the indexes did not change. - -## Proxy Filtering - -The runtime contains proxy filters that map proxy types to allowable calls. If the new runtime contains any new calls, -verify that the proxy filters are up to date to include them. - -## Benchmarks - -There are three benchmarking machines reserved for updating the weights at release-time. To initialise a benchmark run -for each production runtime (`westend`, `kusama`, `polkadot`): -* Go to https://gitlab.parity.io/parity/polkadot/-/pipelines?page=1&scope=branches&ref=master -* Click the link to the last pipeline run for master -* Start each of the manual jobs: - * `update_westend_weights` - * `update_polkadot_weights` - * `update_kusama_weights` -* When these jobs have completed (it takes a few hours), a git PATCH file will be available to download as an artifact. -* On your local machine, branch off master -* Download the patch file and apply it to your branch with `git patch patchfile.patch` -* Commit the changes to your branch and submit a PR against master -* The weights should be (Currently manually) checked to make sure there are no big outliers (i.e., twice or half the - weight). - -## Polkadot JS - -Ensure that a release of [Polkadot JS API](https://github.com/polkadot-js/api) contains any new types or interfaces -necessary to interact with the new runtime. diff --git a/polkadot/node/service/README.adoc b/polkadot/node/service/README.adoc deleted file mode 100644 index 2196d5467806..000000000000 --- a/polkadot/node/service/README.adoc +++ /dev/null @@ -1,5 +0,0 @@ - -= Polkadot Service - -placeholder -//TODO Write content :) (https://github.com/paritytech/polkadot/issues/159) diff --git a/polkadot/parachain/README.adoc b/polkadot/parachain/README.adoc deleted file mode 100644 index 8650919e64ec..000000000000 --- a/polkadot/parachain/README.adoc +++ /dev/null @@ -1,5 +0,0 @@ - -= Polkadot Parachain - -placeholder -//TODO Write content :) (https://github.com/paritytech/polkadot/issues/159) diff --git a/polkadot/primitives/README.adoc b/polkadot/primitives/README.adoc deleted file mode 100644 index 0e5c9412f002..000000000000 --- a/polkadot/primitives/README.adoc +++ /dev/null @@ -1,5 +0,0 @@ - -= Polkadot primitives - -placeholder -//TODO Write content :) (https://github.com/paritytech/polkadot/issues/159) diff --git a/polkadot/runtime/polkadot/README.adoc b/polkadot/runtime/polkadot/README.adoc deleted file mode 100644 index 33373310819f..000000000000 --- a/polkadot/runtime/polkadot/README.adoc +++ /dev/null @@ -1,5 +0,0 @@ - -= Polkadot Runtime - -placeholder -//TODO Write content :) (https://github.com/paritytech/polkadot/issues/159) diff --git a/polkadot/src/README.adoc b/polkadot/src/README.adoc deleted file mode 100644 index 4ec8e18d8afe..000000000000 --- a/polkadot/src/README.adoc +++ /dev/null @@ -1,5 +0,0 @@ - -= Polkadot Src - -placeholder -//TODO Write content :) (https://github.com/paritytech/polkadot/issues/159) diff --git a/polkadot/statement-table/README.adoc b/polkadot/statement-table/README.adoc deleted file mode 100644 index a4da4dee80ff..000000000000 --- a/polkadot/statement-table/README.adoc +++ /dev/null @@ -1,5 +0,0 @@ - -= Polkadot Statement table - -placeholder -//TODO Write content :) (https://github.com/paritytech/polkadot/issues/159) diff --git a/substrate/.maintain/getgoing.sh b/substrate/.maintain/getgoing.sh deleted file mode 100644 index 98f360837d04..000000000000 --- a/substrate/.maintain/getgoing.sh +++ /dev/null @@ -1,6 +0,0 @@ -/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" -brew install openssl cmake -curl https://sh.rustup.rs -sSf | sh -source ~/.cargo/env -cargo install --git https://github.com/paritytech/substrate subkey -cargo install --git https://github.com/paritytech/substrate substrate diff --git a/substrate/.maintain/runtime-dep.py b/substrate/.maintain/runtime-dep.py deleted file mode 100755 index 3198bb3e2669..000000000000 --- a/substrate/.maintain/runtime-dep.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python3 - -# To run this script, you need to install the 'toml' python package and install the 'graphviz' package: -# pip install toml -# sudo apt-get install graphviz -# the first parameter is the runtime folder -# python ./.maintain/runtime-dep.py ./substrate/runtime | dot -Tpng -o output.png -import sys -import os -import toml - -if len(sys.argv) != 2: - print("needs the runtime folder.") - sys.exit(-1) - -runtime_dir = sys.argv[1] - -files = [os.path.join(runtime_dir, f, 'Cargo.toml') for f in os.listdir(runtime_dir) if os.path.isfile(os.path.join(runtime_dir, f, 'Cargo.toml')) and f != 'example'] - -print("digraph G {") - - -PREFIX = "substrate-runtime-" - -for f in files: - manifest = toml.load(f) - - package_name = manifest['package']['name'] - deps = [d for d in manifest['dependencies'].keys() if d.startswith(PREFIX)] - - for d in deps: - print(" \"{}\" -> \"{}\";".format(package_name, d)) - -print("}") diff --git a/substrate/.maintain/update-deps.sh b/substrate/.maintain/update-deps.sh deleted file mode 100755 index cd6b7c853825..000000000000 --- a/substrate/.maintain/update-deps.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -- -set -eu -case $0 in - (/*) dir=${0%/*}/;; - (*/*) dir=./${0%/*};; - (*) dir=.;; -esac - -find "$dir/.." -name Cargo.lock -execdir cargo update \; diff --git a/substrate/bin/node/cli/doc/shell-completion.adoc b/substrate/bin/node/cli/doc/shell-completion.md similarity index 75% rename from substrate/bin/node/cli/doc/shell-completion.adoc rename to substrate/bin/node/cli/doc/shell-completion.md index 168f00994fb2..3f009a04373d 100644 --- a/substrate/bin/node/cli/doc/shell-completion.adoc +++ b/substrate/bin/node/cli/doc/shell-completion.md @@ -1,12 +1,11 @@ +# Shell completion -== Shell completion - -The Substrate cli command supports shell auto-completion. For this to work, you will need to run the completion script matching your build and system. +The Substrate cli command supports shell auto-completion. For this to work, you will need to run the +completion script matching your build and system. Assuming you built a release version using `cargo build --release` and use `bash` run the following: -[source, shell] -source target/release/completion-scripts/substrate.bash +`source target/release/completion-scripts/substrate.bash` You can find completion scripts for: - bash @@ -17,25 +16,19 @@ You can find completion scripts for: To make this change persistent, you can proceed as follows: -.First install - -[source, shell] ----- +```shell COMPL_DIR=$HOME/.completion mkdir -p $COMPL_DIR cp -f target/release/completion-scripts/substrate.bash $COMPL_DIR/ echo "source $COMPL_DIR/substrate.bash" >> $HOME/.bash_profile source $HOME/.bash_profile ----- - -.Update +``` When you build a new version of Substrate, the following will ensure your auto-completion script matches the current binary: -[source, shell] ----- +```shell COMPL_DIR=$HOME/.completion mkdir -p $COMPL_DIR cp -f target/release/completion-scripts/substrate.bash $COMPL_DIR/ source $HOME/.bash_profile ----- +``` diff --git a/substrate/client/cli/README.adoc b/substrate/client/cli/README.adoc deleted file mode 100644 index fc58908fdf23..000000000000 --- a/substrate/client/cli/README.adoc +++ /dev/null @@ -1,6 +0,0 @@ - -= Substrate CLI - -Substrate CLI library - -include::doc/shell-completion.adoc[] diff --git a/substrate/docs/README.adoc b/substrate/docs/README.adoc deleted file mode 100644 index 3537e346a66e..000000000000 --- a/substrate/docs/README.adoc +++ /dev/null @@ -1,522 +0,0 @@ -= Substrate -:Author: Substrate developers -:Revision: 0.2.0 -:toc: -:sectnums: - -== Intro in one sentence - -Substrate is a next-generation framework for blockchain innovation. - -== Description - -At its heart, Substrate is a combination of three technologies: https://webassembly.org/[WebAssembly], https://libp2p.io/[Libp2p] and GRANDPA Consensus. About GRANDPA, see this https://hackmd.io/Jd0byWX0RiqFiXUVC78Bdw?view#GRANDPA[definition], https://medium.com/polkadot-network/grandpa-block-finality-in-polkadot-an-introduction-part-1-d08a24a021b5[introduction] and https://github.com/w3f/consensus/blob/master/pdf/grandpa.pdf[formal specification]. It is both a library for building new blockchains and a "skeleton key" of a blockchain client, able to synchronize to any Substrate-based chain. - -Substrate chains have three distinct features that make them "next-generation": a dynamic, self-defining state-transition function; light-client functionality from day one; and a progressive consensus algorithm with fast block production and adaptive, definite finality. The STF, encoded in WebAssembly, is known as the "runtime". This defines the `execute_block` function, and can specify everything from the staking algorithm, transaction semantics, logging mechanisms and procedures for replacing any aspect of itself or of the blockchain's state ("governance"). Because the runtime is entirely dynamic all of these can be switched out or upgraded at any time. A Substrate chain is very much a "living organism". - -See also https://www.parity.io/what-is-substrate/. - -== Usage - -Substrate is still an early stage project, and while it has already been used as the basis of major projects like Polkadot, using it is still a significant undertaking. In particular, you should have a good knowledge of blockchain concepts and basic cryptography. Terminology like header, block, client, hash, transaction and signature should be familiar. At present you will need a working knowledge of Rust to be able to do anything interesting (though eventually, we aim for this not to be the case). - -Substrate is designed for use in one of three ways: - -**1. Trivial**: By running the Substrate binary `substrate` and configuring it with a genesis block that includes the current demonstration runtime. In this case, you just build Substrate, configure a JSON file, and launch your own blockchain. This affords you the least amount of customizability, primarily allowing you to change the genesis parameters of the various included runtime modules such as balances, staking, block-period, fees, and governance. - -**2. Modular**: By hacking together pallets built with Substrate FRAME into a new runtime and possibly altering or reconfiguring the Substrate client's block authoring logic. This affords you a very large amount of freedom over your blockchain's logic, letting you change data types, add or remove modules, and crucially, add your own modules. Much can be changed without touching the block authoring logic (since it is generic). If this is the case, then the existing Substrate binary can be used for block authoring and syncing. If the block authoring logic needs to be tweaked, then a new, altered block authoring binary must be built as a separate project and used by validators. This is how the Polkadot relay chain is built and should suffice for almost all circumstances in the near to mid-term. - -**3. Generic**: The entire FRAME can be ignored and the entire runtime designed and implemented from scratch. If desired, this can be done in a language other than Rust, provided it can target WebAssembly. If the runtime can be made compatible with the existing client's block authoring logic, then you can simply construct a new genesis block from your Wasm blob and launch your chain with the existing Rust-based Substrate client. If not, then you'll need to alter the client's block authoring logic accordingly. This is probably a useless option for most projects right now, but provides complete flexibility allowing for a long-term, far-reaching upgrade path for the Substrate paradigm. - -=== The Basics of Substrate - -Substrate is a blockchain platform with a completely generic state transition function. That said, it does come with both standards and conventions (particularly regarding the Runtime Module Library) regarding underlying data structures. Roughly speaking, these core data types correspond to +trait+s in terms of the actual non-negotiable standard and generic +struct+s in terms of the convention. - -``` -Header := Parent + ExtrinsicsRoot + StorageRoot + Digest -Block := Header + Extrinsics + Justifications -``` - -=== Extrinsics - -Extrinsics in Substrate are pieces of information from "the outside world" that are contained in the blocks of the chain. You might think "ahh, that means *transactions*": in fact, no. Extrinsics fall into two broad categories of which only one is *transactions*. The other is known as *inherents*. The difference between these two is that transactions are signed and gossiped on the network and can be deemed useful *per se*. This fits the mold of what you would call transactions in Bitcoin or Ethereum. - -Inherents, meanwhile, are not passed on the network and are not signed. They represent data which describes the environment but which cannot call upon anything to prove it such as a signature. Rather they are assumed to be "true" simply because a sufficiently large number of validators have agreed on them being reasonable. - -To give an example, there is the timestamp inherent, which sets the current timestamp of the block. This is not a fixed part of Substrate, but does come as part of FRAME to be used as desired. No signature could fundamentally prove that a block were authored at a given time in quite the same way that a signature can "prove" the desire to spend some particular funds. Rather, it is the business of each validator to ensure that they believe the timestamp is set to something reasonable before they agree that the block candidate is valid. - -Other examples include the parachain-heads extrinsic in Polkadot and the "note-missed-proposal" extrinsic used in FRAME to determine and punish or deactivate offline validators. - - -=== Runtime and API - -Substrate chains all have a runtime. The runtime is a WebAssembly "blob" that includes a number of entry-points. Some entry-points are required as part of the underlying Substrate specification. Others are merely convention and required for the default implementation of the Substrate client to be able to author blocks. - -If you want to develop a chain with Substrate, you will need to implement the `Core` trait. This `Core` trait generates an API with the minimum necessary functionality to interact with your runtime. A special macro is provided called `impl_runtime_apis!` that help you implement runtime API traits. All runtime API trait implementations need to be done in one call of the `impl_runtime_apis!` macro. All parameters and return values need to implement https://crates.io/crates/parity-codec[`parity-codec`] to be encodable and decodable. - -Here's a snippet of the Polkadot API implementation as of PoC-3: - -```rust -impl_runtime_apis! { - impl client_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: ::Header) { - Executive::initialize_block(&header) - } - } - // ---snip--- -} -``` - - -=== Inherent Extrinsics - -Substrate FRAME includes functionality for timestamps and slashing. If used, these rely on "trusted" external information being passed in via inherent extrinsics. The Substrate reference block authoring client software will expect to be able to call into the runtime API with collated data (in the case of the reference Substrate authoring client, this is merely the current timestamp and which nodes were offline) in order to return the appropriate extrinsics ready for inclusion. If new inherent extrinsic types and data are to be used in a modified runtime, then it is this function (and its argument type) that would change. - -=== Block-authoring Logic - -In Substrate, there is a major distinction between blockchain *syncing* and block *authoring* ("authoring" is a more general term for what is called "mining" in Bitcoin). The first case might be referred to as a "full node" (or "light node" - Substrate supports both): authoring necessarily requires a synced node and, therefore, all authoring clients must necessarily be able to synchronize. However, the reverse is not true. The primary functionality that authoring nodes have which is not in "sync nodes" is threefold: transaction queue logic, inherent transaction knowledge and BFT consensus logic. BFT consensus logic is provided as a core element of Substrate and can be ignored since it is only exposed in the SDK under the `authorities()` API entry. - -Transaction queue logic in Substrate is designed to be as generic as possible, allowing a runtime to express which transactions are fit for inclusion in a block through the `initialize_block` and `apply_extrinsic` calls. However, more subtle aspects like prioritization and replacement policy must currently be expressed "hard coded" as part of the blockchain's authoring code. That said, Substrate's reference implementation for a transaction queue should be sufficient for an initial chain implementation. - -Inherent extrinsic knowledge is again somewhat generic, and the actual construction of the extrinsics is, by convention, delegated to the "soft code" in the runtime. If ever there needs to be additional extrinsic information in the chain, then both the block authoring logic will need to be altered to provide it into the runtime and the runtime's `inherent_extrinsics` call will need to use this extra information in order to construct any additional extrinsic transactions for inclusion in the block. - -== Roadmap - -=== So far - -- 0.1 "PoC-1": PBFT consensus, Wasm runtime engine, basic runtime modules. -- 0.2 "PoC-2": Libp2p - -=== In progress - -- AfG consensus -- Improved PoS -- Smart contract runtime module - -=== The future - -- Splitting out runtime modules into separate repo -- Introduce substrate executable (the skeleton-key runtime) -- Introduce basic but extensible transaction queue and block-builder and place them in the executable. -- DAO runtime module -- Audit - -== Trying out Substrate Node - -Substrate Node is Substrate's pre-baked blockchain client. You can run a development node locally or configure a new chain and launch your own global testnet. - -=== On Mac and Ubuntu - -To get going as fast as possible, there is a simple script that installs all required dependencies and installs Substrate into your path. Just open a terminal and run: - -[source, shell] ----- -curl https://getsubstrate.io -sSf | bash ----- - -You can start a local Substrate development chain with running `substrate --dev`. - -To create your own global network/cryptocurrency, you'll need to make a new Substrate Node chain specification file ("chainspec"). - -First let's get a template chainspec that you can edit. We'll use the "staging" chain, a sort of default chain that the node comes pre-configured with: - -[source, shell] ----- -substrate build-spec --chain=staging > ~/chainspec.json ----- - -Now, edit `~/chainspec.json` in your editor. There are a lot of individual fields for each module, and one very large one which contains the WebAssembly code blob for this chain. The easiest field to edit is the block `period`. Change it to 10 (seconds): - -[source, json] ----- - "timestamp": { - "minimumPeriod": 10 - }, ----- - -Now with this new chainspec file, you can build a "raw" chain definition for your new chain: - -[source, shell] ----- -substrate build-spec --chain ~/chainspec.json --raw > ~/mychain.json ----- - -This can be fed into Substrate: - -[source, shell] ----- -substrate --chain ~/mychain.json ----- - -It won't do much until you start producing blocks though, so to do that you'll need to use the `--validator` option together with passing the seed for the account(s) that is configured to be the initial authorities: - -[source, shell] ----- -substrate --chain ~/mychain.json --validator ----- - -You can distribute `mychain.json` so that everyone can synchronize and (depending on your authorities list) validate on your chain. - - -== Building - -=== Hacking on Substrate - -If you'd actually like to hack on Substrate, you can just grab the source code and -build it. Ensure you have Rust and the support software installed: - -==== Linux and Mac - -For Unix-based operating systems, you should run the following commands: - -[source, shell] ----- -curl https://sh.rustup.rs -sSf | sh - -rustup update nightly -rustup target add wasm32-unknown-unknown --toolchain nightly -rustup update stable ----- - -You will also need to install the following packages: - - - Linux: -[source, shell] -sudo apt install cmake pkg-config libssl-dev git clang libclang-dev llvm - -- Linux on ARM: -`rust-lld` is required for linking wasm, but is missing on non Tier 1 platforms. -So, use this https://github.com/Plume-org/Plume/blob/master/script/wasm-deps.sh[script] -to build `lld` and create the symlink `/usr/bin/rust-lld` to the build binary. - - - Mac: -[source, shell] -brew install cmake pkg-config openssl git llvm - -To finish installation of Substrate, jump down to <>. - -==== Windows - -If you are trying to set up Substrate on Windows, you should do the following: - -1. First, you will need to download and install "Build Tools for Visual Studio:" - - * You can get it at this link: https://aka.ms/buildtools - * Run the installation file: `vs_buildtools.exe` - * Please ensure the Windows 10 SDK component is included when installing the Visual C++ Build Tools. - * image:https://i.imgur.com/zayVLmu.png[image] - * Restart your computer. - -2. Next, you need to install Rust: - - * Detailed instructions are provided by the https://doc.rust-lang.org/book/ch01-01-installation.html#installing-rustup-on-windows[Rust Book]. - * Download from: https://www.rust-lang.org/tools/install - * Run the installation file: `rustup-init.exe` - > Note that it should not prompt you to install vs_buildtools since you did it in step 1. - * Choose "Default Installation." - * To get started, you need Cargo's bin directory (%USERPROFILE%\.cargo\bin) in your PATH environment variable. Future applications will automatically have the correct environment, but you may need to restart your current shell. - -3. Then, you will need to run some commands in CMD to set up your Wasm Build Environment: - - rustup update nightly - rustup update stable - rustup target add wasm32-unknown-unknown --toolchain nightly - -4. Then, you need to install LLVM: https://releases.llvm.org/download.html - -5. Next, you need to install OpenSSL, which we will do with `vcpkg`: - - mkdir \Tools - cd \Tools - git clone https://github.com/Microsoft/vcpkg.git - cd vcpkg - .\bootstrap-vcpkg.bat - .\vcpkg.exe install openssl:x64-windows-static - -6. After, you need to add OpenSSL to your System Variables. Note that in order for the following commands to work, you need to use Windows Powershell: - - $env:OPENSSL_DIR = 'C:\Tools\vcpkg\installed\x64-windows-static' - $env:OPENSSL_STATIC = 'Yes' - [System.Environment]::SetEnvironmentVariable('OPENSSL_DIR', $env:OPENSSL_DIR, [System.EnvironmentVariableTarget]::User) - [System.Environment]::SetEnvironmentVariable('OPENSSL_STATIC', $env:OPENSSL_STATIC, [System.EnvironmentVariableTarget]::User) - -7. Finally, you need to install `cmake`: https://cmake.org/download/ - -==== Docker - -You can use https://github.com/paritytech/scripts/tree/master/dockerfiles/ci-linux[Parity CI docker image] with all necessary dependencies to build Substrate: - -[source, shell] ----- -#run it in the folder with the Substrate source code -docker run --rm -it -w /shellhere/substrate \ - -v $(pwd):/shellhere/substrate \ - paritytech/ci-linux:production ----- - -You can find necessary cargo commands in <> - -==== Shared Steps - -Then, grab the Substrate source code: - -[source, shell] ----- -git clone https://github.com/paritytech/substrate.git -cd substrate ----- - -Then build the code: - -[source, shell] ----- -cargo build # Builds all native code ----- - -You can run all the tests if you like: - -[source, shell] -cargo test --all - -Or just run the tests of a specific package (i.e. `cargo test -p pallet-assets`) - -You can start a development chain with: - -[source, shell] -cargo run --release -- --dev - -Detailed logs may be shown by running the node with the following environment variables set: `RUST_LOG=debug RUST_BACKTRACE=1 cargo run --release \-- --dev`. - -If you want to see the multi-node consensus algorithm in action locally, then you can create a local testnet with two validator nodes for Alice and Bob, who are the initial authorities of the genesis chain specification that have been endowed with a testnet DOTs. We'll give each node a name and expose them so they are listed on link:https://telemetry.polkadot.io/#/Local%20Testnet[Telemetry]. You'll need two terminal windows open. - -We'll start Alice's Substrate node first on default TCP port 30333 with their chain database stored locally at `/tmp/alice`. The Bootnode ID of Alice's node is `QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR`, which is generated from the `--node-key` value that we specify below: - -[source, shell] -cargo run --release \-- \ - --base-path /tmp/alice \ - --chain=local \ - --alice \ - --node-key 0000000000000000000000000000000000000000000000000000000000000001 \ - --telemetry-url 'ws://telemetry.polkadot.io:1024 0' \ - --validator - -In the second terminal, we'll run the following to start Bob's Substrate node on a different TCP port of 30334, and with their chain database stored locally at `/tmp/bob`. We'll specify a value for the `--bootnodes` option that will connect Bob's node to Alice's Bootnode ID on TCP port 30333: - -[source, shell] -cargo run --release \-- \ - --base-path /tmp/bob \ - --bootnodes /ip4/127.0.0.1/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR \ - --chain=local \ - --bob \ - --port 30334 \ - --telemetry-url 'ws://telemetry.polkadot.io:1024 0' \ - --validator - -Additional Substrate CLI usage options are available and may be shown by running `cargo run \-- --help`. - -[[flaming-fir]] -=== Joining the Flaming Fir Testnet - -Flaming Fir is the new testnet for Substrate master (2.0) to test the latest development features. Please note that master is not compatible with the BBQ Birch, Charred Cherry, Dried Danta or Emberic Elm testnets. Ensure you have the dependencies listed above before compiling. - -Since Flaming Fir is targeting the master branch we make absolutely no guarantees of stability and/or persistence of the network. We might reset the chain at any time if it is necessary to deploy new changes. Currently, the validators are running with a client built from `d013bd900`, if you build from this commit you should be able to successfully sync, later commits may not work as new breaking changes may be introduced in master. - -Latest known working version: `a2a0eb5398d6223e531455b4c155ef053a4a3a2b` - -[source, shell] ----- -git clone https://github.com/paritytech/substrate.git -cd substrate -git checkout -b flaming-fir a2a0eb5398d6223e531455b4c155ef053a4a3a2b ----- - -You can run the tests if you like: - -[source, shell] -cargo test --all - -Start your node: - -[source, shell] -cargo run --release \-- - -To see a list of command line options, enter: - -[source, shell] -cargo run --release \-- --help - -For example, you can choose a custom node name: - -[source, shell] -cargo run --release \-- --name my_custom_name - -If you are successful, you will see your node syncing at https://telemetry.polkadot.io/#/Flaming%20Fir - -=== Joining the Emberic Elm Testnet - -Emberic Elm is the testnet for Substrate 1.0. Please note that 1.0 is not compatible with the BBQ Birch, Charred Cherry, Dried Danta or Flaming Fir testnets. -In order to join the Emberic Elm testnet you should build from the `v1.0` branch. Ensure you have the dependencies listed above before compiling. - -[source, shell] ----- -git clone https://github.com/paritytech/substrate.git -cd substrate -git checkout -b v1.0 origin/v1.0 ----- - -You can then follow the same steps for building and running as described above in <>. - -== Key management - -Keys in Substrate are stored in the keystore in the file system. To store keys into this keystore, -you need to use one of the two provided RPC calls. If your keys are encrypted or should be encrypted -by the keystore, you need to provide the key using one of the cli arguments `--password`, -`--password-interactive` or `--password-filename`. - -=== Recommended RPC call - -For most users who want to run a validator node, the `author_rotateKeys` RPC call is sufficient. -The RPC call will generate `N` Session keys for you and return their public keys. `N` is the number -of session keys configured in the runtime. The output of the RPC call can be used as input for the -`session::set_keys` transaction. - -``` -curl -H 'Content-Type: application/json' --data '{ "jsonrpc":"2.0", "method":"author_rotateKeys", "id":1 }' localhost:9933 -``` - -=== Advanced RPC call - -If the Session keys need to match a fixed seed, they can be set individually key by key. The RPC call -expects the key seed and the key type. The key types supported by default in Substrate are listed -https://github.com/paritytech/substrate/blob/master/core/primitives/src/crypto.rs#L767[here], but the -user can declare any key type. - -``` -curl -H 'Content-Type: application/json' --data '{ "jsonrpc":"2.0", "method":"author_insertKey", "params":["KEY_TYPE", "SEED", "PUBLIC"],"id":1 }' localhost:9933 -``` - -`KEY_TYPE` - needs to be replaced with the 4-character key type identifier. -`SEED` - is the seed of the key. -`PUBLIC` - public key for the given key. - -== Documentation - -=== Viewing documentation for Substrate packages - -You can generate documentation for a Substrate Rust package and have it automatically open in your web browser using https://doc.rust-lang.org/rustdoc/what-is-rustdoc.html#using-rustdoc-with-cargo[rustdoc with Cargo], -(of the The Rustdoc Book), by running the following command: - -``` -cargo doc --package --open -``` - -Replacing `` with one of the following (i.e. `cargo doc --package substrate --open`): - -* All Substrate Packages -[source, shell] -substrate -* Substrate Core -[source, shell] -substrate, substrate-cli, substrate-client, substrate-client-db, -substrate-consensus-common, substrate-consensus-rhd, -substrate-executor, substrate-finality-grandpa, substrate-keyring, substrate-keystore, substrate-network, -substrate-network-libp2p, substrate-primitives, substrate-rpc, substrate-rpc-servers, -substrate-serializer, substrate-service, substrate-service-test, substrate-state-db, -substrate-state-machine, substrate-telemetry, substrate-test-client, -substrate-test-runtime, substrate-transaction-graph, sp-transaction-pool, -substrate-trie -* Substrate Runtime -[source, shell] -sr-api, sr-io, sr-primitives, sr-sandbox, sr-std, sr-version -* FRAME Core -[source, shell] -frame-metadata, frame-support, frame-system -* FRAME Pallets -[source, shell] -pallet-assets, pallet-balances, pallet-consensus, pallet-contracts, pallet-council, pallet-democracy, pallet-example, -frame-executive, pallet-session, pallet-staking, pallet-timestamp, pallet-treasury -* Node -[source, shell] -node-cli, node-consensus, node-executor, node-network, node-primitives, kitchensink-runtime -* Subkey -[source, shell] -subkey - -=== Contributing to documentation for Substrate packages - -https://doc.rust-lang.org/1.9.0/book/documentation.html[Document source code] for Substrate packages by annotating the source code with documentation comments. - -Example (generic): -```markdown -/// Summary -/// -/// Description -/// -/// # Panics -/// -/// # Errors -/// -/// # Safety -/// -/// # Examples -/// -/// Summary of Example 1 -/// -/// ```rust -/// // insert example 1 code here -/// ``` -/// -``` - -* Important notes: -** Documentation comments must use annotations with a triple slash `///` -** Modules are documented using `//!` -``` -//! Summary (of module) -//! -//! Description (of module) -``` -* Special section header is indicated with a hash `#`. -** `Panics` section requires an explanation if the function triggers a panic -** `Errors` section is for describing conditions under which a function of method returns `Err(E)` if it returns a `Result` -** `Safety` section requires an explanation if the function is `unsafe` -** `Examples` section includes examples of using the function or method -* Code block annotations for examples are included between triple graves, as shown above. -Instead of including the programming language to use for syntax highlighting as the annotation -after the triple graves, alternative annotations include the `ignore`, `text`, `should_panic`, or `no_run`. -* Summary sentence is a short high level single sentence of its functionality -* Description paragraph is for details additional to the summary sentence -* Missing documentation annotations may be used to identify where to generate warnings with `#![warn(missing_docs)]` -or errors `#![deny(missing_docs)]` -* Hide documentation for items with `#[doc(hidden)]` - -=== Contributing to documentation (tests, extended examples, macros) for Substrate packages - -The code block annotations in the `# Example` section may be used as https://doc.rust-lang.org/1.9.0/book/documentation.html#documentation-as-tests[documentation as tests and for extended examples]. - -* Important notes: -** Rustdoc will automatically add a `main()` wrapper around the code block to test it -** https://doc.rust-lang.org/1.9.0/book/documentation.html#documenting-macros[Documenting macros]. -** Documentation as tests examples are included when running `cargo test` - -== Contributing - -=== Contributing Guidelines - -include::CONTRIBUTING.md[] - -=== Contributor Code of Conduct - -include::CODE_OF_CONDUCT.md[] - -== License - -https://github.com/paritytech/substrate/blob/master/LICENSE[LICENSE] diff --git a/substrate/docs/Structure.adoc b/substrate/docs/Structure.adoc deleted file mode 100644 index 6c810a83c51b..000000000000 --- a/substrate/docs/Structure.adoc +++ /dev/null @@ -1,121 +0,0 @@ -= Structure -:Author: Substrate developers -:Revision: 0.3.0 -:toc: -:sectnums: - - -== Overview - -Substrate is split into multiple levels with increasing opinion and decreasing flexibility: - -* primitives -* client -* FRAME (formerly `srml`) - -Putting all these components together we have: - -* Integration Tests -* Node -* Node template -* Subkey - -=== Runtime - -* _found in_: `/primitives` -* _crates prefix_: `sp-` -* _constraints_: -** must be `[no_std]` -** crates may not (dev-)depend on crates in other subfolders of this repo - -In the lowest level, Substrate defines primitives, interfaces and traits to implement any on-chain Substrate transition system and its interactions with the outside world. This is the lowest level of abstraction and opinion that everything else builds upon. - -=== Client - -* _found in_: `/client` -* _crates prefix_: `sc-` -* _constraints_: -** crates may not (dev-)depend on any `frame-`-crates - -In the client you can find a set of crates to construct the outer substrate-node, implementing outer runtime interfaces, thus it depends on `runtime`. It provides the outer building blocks like transaction queue, networking layer, database backend, full* and light-client support. - -=== FRAME (formerly `srml`) - -* _found in_: `/frame` -* _crates prefix_: `frame-` and `pallet-` -* _constraints_: -** all crates that go on chain must be `[no_std]` -** must not (dev-)depend on anything in `/client` - -FRAME is a set of modules that implement specific transition functions and features one might want to have in their runtime. - -_Pallets_ are individual modules within _FRAME._ These are containers that host domain-specific logic. They have the `pallet-` prefix. For example, `pallet-staking` contains logic for staking tokens. - -There are a few crates with the `frame-` prefix. These do not contain domain-specific logic. Rather, they are the main FRAME support infrastructure. These are: - -- Executive -- Metadata -- Support -- System -- Utility - -=== Integration Tests - -* _found in_: `/test` -* _crates prefix_: `substrate-test` -* _constraints_: -** only helpers may be published -** purely testing crates must be `publish = false` - -All tests that have to pull (dev)-dependencies out of their subtree and would thus break the dependency rules are considered integration tests and should be stored in here. Only helper-crates in here shall be published, everything else is expected to be non-publish. - -=== Binaries and template - -* _found in_: `/bin` - -We also provide some binaries pulling from the components creating full applications. - -==== Node - -* _found in_: `/bin/node` - -The default (testing) application pulling together our recommended setup of substrate-client with a wasm-contracts-supporting frame-runtime. The node pulls it all together, constructs the (upgradable) runtime, and wires up the client around it. You can find an example client, which includes a full wasm-contracts chain in `node`. This is also what is being built and run if you do `cargo run`. - -==== Node Template - -* _found in_: `/bin/node-template` - -We also provide a template to get you started building your own node. - -==== Utils - -* _found in_: `/bin/utils` - -- **subkey** - Subkey is a client library to generate keys and sign transactions to send to a substrate node. -- **chain-spec-builder** - The chain spec builder builds a chain specification that includes a Substrate runtime compiled as WASM. To ensure proper functioning of the included runtime compile (or run) the chain spec builder binary in `--release` mode. - -== Internal Dependency Tree - -[ditaa] -.... -+---------------+ +----------------+ -| | | | -| runtime +<------+ frame | -| | | | -+------+-----+--+ +-------------+--+ - ^ ^ ^ - | +----------------+ | - | | | -+------+--------+ | | -| | | | -| client | +--+-------+--------+ -| +<---------+ | -+---------------+ | | - | test /bin/* | - | | - | | - +-------------------+ - -.... From 89bfdb15dab74359b91011578efa204ea9884b07 Mon Sep 17 00:00:00 2001 From: ordian Date: Mon, 4 Sep 2023 16:48:12 +0200 Subject: [PATCH 04/28] approval-voting: use proper hash when querying session info (#1387) --- polkadot/node/core/approval-voting/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 0087f8e14350..ddef736feab7 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -2350,7 +2350,7 @@ async fn process_wakeup( match get_extended_session_info( session_info_provider, ctx.sender(), - block_entry.parent_hash(), + block_entry.block_hash(), block_entry.session(), ) .await From ddab7156b4d579516d81a8ae3b6233250bd2ba45 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Mon, 4 Sep 2023 16:58:32 +0200 Subject: [PATCH 05/28] [ci] Remove runtime-benchmarks from tests (#1335) * [ci] Remove runtime-benchmarks from tests * Update .gitlab/pipeline/test.yml Co-authored-by: Oliver Tale-Yazdi * remove ing * move benchmark tests to additional tests * rm -q option * try release profile * use testnet profile * move to a separate job * rm dup --------- Co-authored-by: Oliver Tale-Yazdi --- .gitlab/pipeline/test.yml | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index c81750d49f92..d128cb099021 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -34,7 +34,7 @@ test-linux-stable: --locked \ --release \ --no-fail-fast \ - --features runtime-benchmarks,try-runtime,experimental \ + --features try-runtime,experimental \ --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} # run runtime-api tests with `enable-staging-api` feature on the 1st node - if [ ${CI_NODE_INDEX} == 1 ]; then time cargo nextest run -p sp-api-test --features enable-staging-api; fi @@ -92,7 +92,22 @@ test-linux-stable-additional-tests: --locked \ --release \ --features runtime-benchmarks,try-runtime - allow_failure: true + +# https://github.com/paritytech/ci_cd/issues/864 +test-linux-stable-runtime-benchmarks: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + - .pipeline-stopper-artifacts + variables: + RUST_TOOLCHAIN: stable + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + script: + - time cargo nextest run --features runtime-benchmarks benchmark --locked --cargo-profile testnet # these ones can be really slow so it's better to run them separately test-linux-stable-slow: From 01cdae878d2d6f71395abd6e4014136a9147bea4 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 4 Sep 2023 18:27:53 +0300 Subject: [PATCH 06/28] Extract block announce validation from `ChainSync` (#1170) --- substrate/client/network/common/src/sync.rs | 68 +-- .../sync/src/block_announce_validator.rs | 405 +++++++++++++++ substrate/client/network/sync/src/engine.rs | 153 +++--- .../client/network/sync/src/futures_stream.rs | 134 +++++ substrate/client/network/sync/src/lib.rs | 488 ++++-------------- substrate/client/network/sync/src/mock.rs | 15 +- substrate/client/utils/src/mpsc.rs | 10 + 7 files changed, 727 insertions(+), 546 deletions(-) create mode 100644 substrate/client/network/sync/src/block_announce_validator.rs create mode 100644 substrate/client/network/sync/src/futures_stream.rs diff --git a/substrate/client/network/common/src/sync.rs b/substrate/client/network/common/src/sync.rs index b142925aeb10..461c4ae411d6 100644 --- a/substrate/client/network/common/src/sync.rs +++ b/substrate/client/network/common/src/sync.rs @@ -22,12 +22,12 @@ pub mod message; pub mod metrics; pub mod warp; -use crate::{role::Roles, types::ReputationChange}; +use crate::{role::Roles, sync::message::BlockAnnounce, types::ReputationChange}; use futures::Stream; use libp2p_identity::PeerId; -use message::{BlockAnnounce, BlockData, BlockRequest, BlockResponse}; +use message::{BlockData, BlockRequest, BlockResponse}; use sc_consensus::{import_queue::RuntimeOrigin, IncomingBlock}; use sp_consensus::BlockOrigin; use sp_runtime::{ @@ -157,38 +157,6 @@ pub enum ImportResult { JustificationImport(RuntimeOrigin, B::Hash, NumberFor, Justifications), } -/// Value polled from `ChainSync` -#[derive(Debug)] -pub enum PollResult { - Import(ImportResult), - Announce(PollBlockAnnounceValidation), -} - -/// Result of [`ChainSync::poll_block_announce_validation`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum PollBlockAnnounceValidation { - /// The announcement failed at validation. - /// - /// The peer reputation should be decreased. - Failure { - /// Who sent the processed block announcement? - who: PeerId, - /// Should the peer be disconnected? - disconnect: bool, - }, - /// The announcement does not require further handling. - Nothing { - /// Who sent the processed block announcement? - who: PeerId, - /// Was this their new best block? - is_best: bool, - /// The announcement. - announce: BlockAnnounce, - }, - /// The block announcement should be skipped. - Skip, -} - /// Sync operation mode. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum SyncMode { @@ -408,29 +376,14 @@ pub trait ChainSync: Send { /// Notify about finalization of the given block. fn on_block_finalized(&mut self, hash: &Block::Hash, number: NumberFor); - /// Push a block announce validation. - /// - /// It is required that [`ChainSync::poll_block_announce_validation`] is called - /// to check for finished block announce validations. - fn push_block_announce_validation( + /// Notify about pre-validated block announcement. + fn on_validated_block_announce( &mut self, - who: PeerId, - hash: Block::Hash, - announce: BlockAnnounce, is_best: bool, + who: PeerId, + announce: &BlockAnnounce, ); - /// Poll block announce validation. - /// - /// Block announce validations can be pushed by using - /// [`ChainSync::push_block_announce_validation`]. - /// - /// This should be polled until it returns [`Poll::Pending`]. - fn poll_block_announce_validation( - &mut self, - cx: &mut std::task::Context<'_>, - ) -> Poll>; - /// Call when a peer has disconnected. /// Canceled obsolete block request may result in some blocks being ready for /// import, so this functions checks for such blocks and returns them. @@ -447,14 +400,7 @@ pub trait ChainSync: Send { ) -> Result>, String>; /// Advance the state of `ChainSync` - /// - /// Internally calls [`ChainSync::poll_block_announce_validation()`] and - /// this function should be polled until it returns [`Poll::Pending`] to - /// consume all pending events. - fn poll( - &mut self, - cx: &mut std::task::Context, - ) -> Poll>; + fn poll(&mut self, cx: &mut std::task::Context) -> Poll<()>; /// Send block request to peer fn send_block_request(&mut self, who: PeerId, request: BlockRequest); diff --git a/substrate/client/network/sync/src/block_announce_validator.rs b/substrate/client/network/sync/src/block_announce_validator.rs new file mode 100644 index 000000000000..f083f9e29e44 --- /dev/null +++ b/substrate/client/network/sync/src/block_announce_validator.rs @@ -0,0 +1,405 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! `BlockAnnounceValidator` is responsible for async validation of block announcements. + +use crate::futures_stream::FuturesStream; +use futures::{Future, FutureExt, Stream, StreamExt}; +use libp2p::PeerId; +use log::{debug, error, trace, warn}; +use sc_network_common::sync::message::BlockAnnounce; +use sp_consensus::block_validation::Validation; +use sp_runtime::traits::{Block as BlockT, Header, Zero}; +use std::{ + collections::{hash_map::Entry, HashMap}, + default::Default, + pin::Pin, + task::{Context, Poll}, +}; + +/// Log target for this file. +const LOG_TARGET: &str = "sync"; + +/// Maximum number of concurrent block announce validations. +/// +/// If the queue reaches the maximum, we drop any new block +/// announcements. +const MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS: usize = 256; + +/// Maximum number of concurrent block announce validations per peer. +/// +/// See [`MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS`] for more information. +const MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS_PER_PEER: usize = 4; + +/// Item that yields [`Stream`] implementation of [`BlockAnnounceValidator`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) enum BlockAnnounceValidationResult { + /// The announcement failed at validation. + /// + /// The peer reputation should be decreased. + Failure { + /// The id of the peer that send us the announcement. + peer_id: PeerId, + /// Should the peer be disconnected? + disconnect: bool, + }, + /// The announcement was validated successfully and should be passed to [`crate::ChainSync`]. + Process { + /// The id of the peer that send us the announcement. + peer_id: PeerId, + /// Was this their new best block? + is_new_best: bool, + /// The announcement. + announce: BlockAnnounce, + }, + /// The block announcement should be skipped. + Skip { + /// The id of the peer that send us the announcement. + peer_id: PeerId, + }, +} + +impl BlockAnnounceValidationResult { + fn peer_id(&self) -> &PeerId { + match self { + BlockAnnounceValidationResult::Failure { peer_id, .. } | + BlockAnnounceValidationResult::Process { peer_id, .. } | + BlockAnnounceValidationResult::Skip { peer_id } => peer_id, + } + } +} + +/// Result of [`BlockAnnounceValidator::allocate_slot_for_block_announce_validation`]. +enum AllocateSlotForBlockAnnounceValidation { + /// Success, there is a slot for the block announce validation. + Allocated, + /// We reached the total maximum number of validation slots. + TotalMaximumSlotsReached, + /// We reached the maximum number of validation slots for the given peer. + MaximumPeerSlotsReached, +} + +pub(crate) struct BlockAnnounceValidator { + /// A type to check incoming block announcements. + validator: Box + Send>, + /// All block announcements that are currently being validated. + validations: FuturesStream< + Pin> + Send>>, + >, + /// Number of concurrent block announce validations per peer. + validations_per_peer: HashMap, +} + +impl BlockAnnounceValidator { + pub(crate) fn new( + validator: Box + Send>, + ) -> Self { + Self { + validator, + validations: Default::default(), + validations_per_peer: Default::default(), + } + } + + /// Push a block announce validation. + pub(crate) fn push_block_announce_validation( + &mut self, + peer_id: PeerId, + hash: B::Hash, + announce: BlockAnnounce, + is_best: bool, + ) { + let header = &announce.header; + let number = *header.number(); + debug!( + target: LOG_TARGET, + "Pre-validating received block announcement {:?} with number {:?} from {}", + hash, + number, + peer_id, + ); + + if number.is_zero() { + warn!( + target: LOG_TARGET, + "💔 Ignored genesis block (#0) announcement from {}: {}", + peer_id, + hash, + ); + return + } + + // Try to allocate a slot for this block announce validation. + match self.allocate_slot_for_block_announce_validation(&peer_id) { + AllocateSlotForBlockAnnounceValidation::Allocated => {}, + AllocateSlotForBlockAnnounceValidation::TotalMaximumSlotsReached => { + warn!( + target: LOG_TARGET, + "💔 Ignored block (#{} -- {}) announcement from {} because all validation slots are occupied.", + number, + hash, + peer_id, + ); + return + }, + AllocateSlotForBlockAnnounceValidation::MaximumPeerSlotsReached => { + warn!( + target: LOG_TARGET, + "💔 Ignored block (#{} -- {}) announcement from {} because all validation slots for this peer are occupied.", + number, + hash, + peer_id, + ); + return + }, + } + + // Let external validator check the block announcement. + let assoc_data = announce.data.as_ref().map_or(&[][..], |v| v.as_slice()); + let future = self.validator.validate(header, assoc_data); + + self.validations.push( + async move { + match future.await { + Ok(Validation::Success { is_new_best }) => { + let is_new_best = is_new_best || is_best; + + trace!( + target: LOG_TARGET, + "Block announcement validated successfully: from {}: {:?}. Local best: {}.", + peer_id, + announce.summary(), + is_new_best, + ); + + BlockAnnounceValidationResult::Process { is_new_best, announce, peer_id } + }, + Ok(Validation::Failure { disconnect }) => { + debug!( + target: LOG_TARGET, + "Block announcement validation failed: from {}, block {:?}. Disconnect: {}.", + peer_id, + hash, + disconnect, + ); + + BlockAnnounceValidationResult::Failure { peer_id, disconnect } + }, + Err(e) => { + debug!( + target: LOG_TARGET, + "💔 Ignoring block announcement validation from {} of block {:?} due to internal error: {}.", + peer_id, + hash, + e, + ); + + BlockAnnounceValidationResult::Skip { peer_id } + }, + } + } + .boxed(), + ); + } + + /// Checks if there is a slot for a block announce validation. + /// + /// The total number and the number per peer of concurrent block announce validations + /// is capped. + /// + /// Returns [`AllocateSlotForBlockAnnounceValidation`] to inform about the result. + /// + /// # Note + /// + /// It is *required* to call [`Self::deallocate_slot_for_block_announce_validation`] when the + /// validation is finished to clear the slot. + fn allocate_slot_for_block_announce_validation( + &mut self, + peer_id: &PeerId, + ) -> AllocateSlotForBlockAnnounceValidation { + if self.validations.len() >= MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS { + return AllocateSlotForBlockAnnounceValidation::TotalMaximumSlotsReached + } + + match self.validations_per_peer.entry(*peer_id) { + Entry::Vacant(entry) => { + entry.insert(1); + AllocateSlotForBlockAnnounceValidation::Allocated + }, + Entry::Occupied(mut entry) => { + if *entry.get() < MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS_PER_PEER { + *entry.get_mut() += 1; + AllocateSlotForBlockAnnounceValidation::Allocated + } else { + AllocateSlotForBlockAnnounceValidation::MaximumPeerSlotsReached + } + }, + } + } + + /// Should be called when a block announce validation is finished, to update the slots + /// of the peer that send the block announce. + fn deallocate_slot_for_block_announce_validation(&mut self, peer_id: &PeerId) { + match self.validations_per_peer.entry(*peer_id) { + Entry::Vacant(_) => { + error!( + target: LOG_TARGET, + "💔 Block announcement validation from peer {} finished for a slot that was not allocated!", + peer_id, + ); + }, + Entry::Occupied(mut entry) => match entry.get().checked_sub(1) { + Some(value) => + if value == 0 { + entry.remove(); + } else { + *entry.get_mut() = value; + }, + None => { + entry.remove(); + + error!( + target: LOG_TARGET, + "Invalid (zero) block announce validation slot counter for peer {peer_id}.", + ); + debug_assert!( + false, + "Invalid (zero) block announce validation slot counter for peer {peer_id}.", + ); + }, + }, + } + } +} + +impl Stream for BlockAnnounceValidator { + type Item = BlockAnnounceValidationResult; + + /// Poll for finished block announce validations. The stream never terminates. + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let validation = futures::ready!(self.validations.poll_next_unpin(cx)) + .expect("`FuturesStream` never terminates; qed"); + self.deallocate_slot_for_block_announce_validation(validation.peer_id()); + + Poll::Ready(Some(validation)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::block_announce_validator::AllocateSlotForBlockAnnounceValidation; + use libp2p::PeerId; + use sp_consensus::block_validation::DefaultBlockAnnounceValidator; + use substrate_test_runtime_client::runtime::Block; + + #[test] + fn allocate_one_validation_slot() { + let mut validator = + BlockAnnounceValidator::::new(Box::new(DefaultBlockAnnounceValidator {})); + let peer_id = PeerId::random(); + + assert!(matches!( + validator.allocate_slot_for_block_announce_validation(&peer_id), + AllocateSlotForBlockAnnounceValidation::Allocated, + )); + } + + #[test] + fn allocate_validation_slots_for_two_peers() { + let mut validator = + BlockAnnounceValidator::::new(Box::new(DefaultBlockAnnounceValidator {})); + let peer_id_1 = PeerId::random(); + let peer_id_2 = PeerId::random(); + + assert!(matches!( + validator.allocate_slot_for_block_announce_validation(&peer_id_1), + AllocateSlotForBlockAnnounceValidation::Allocated, + )); + assert!(matches!( + validator.allocate_slot_for_block_announce_validation(&peer_id_2), + AllocateSlotForBlockAnnounceValidation::Allocated, + )); + } + + #[test] + fn maximum_validation_slots_per_peer() { + let mut validator = + BlockAnnounceValidator::::new(Box::new(DefaultBlockAnnounceValidator {})); + let peer_id = PeerId::random(); + + for _ in 0..MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS_PER_PEER { + assert!(matches!( + validator.allocate_slot_for_block_announce_validation(&peer_id), + AllocateSlotForBlockAnnounceValidation::Allocated, + )); + } + + assert!(matches!( + validator.allocate_slot_for_block_announce_validation(&peer_id), + AllocateSlotForBlockAnnounceValidation::MaximumPeerSlotsReached, + )); + } + + #[test] + fn validation_slots_per_peer_deallocated() { + let mut validator = + BlockAnnounceValidator::::new(Box::new(DefaultBlockAnnounceValidator {})); + let peer_id = PeerId::random(); + + for _ in 0..MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS_PER_PEER { + assert!(matches!( + validator.allocate_slot_for_block_announce_validation(&peer_id), + AllocateSlotForBlockAnnounceValidation::Allocated, + )); + } + + assert!(matches!( + validator.allocate_slot_for_block_announce_validation(&peer_id), + AllocateSlotForBlockAnnounceValidation::MaximumPeerSlotsReached, + )); + + validator.deallocate_slot_for_block_announce_validation(&peer_id); + + assert!(matches!( + validator.allocate_slot_for_block_announce_validation(&peer_id), + AllocateSlotForBlockAnnounceValidation::Allocated, + )); + } + + #[test] + fn maximum_validation_slots_for_all_peers() { + let mut validator = + BlockAnnounceValidator::::new(Box::new(DefaultBlockAnnounceValidator {})); + + for _ in 0..MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS { + validator.validations.push( + futures::future::ready(BlockAnnounceValidationResult::Skip { + peer_id: PeerId::random(), + }) + .boxed(), + ); + } + + let peer_id = PeerId::random(); + assert!(matches!( + validator.allocate_slot_for_block_announce_validation(&peer_id), + AllocateSlotForBlockAnnounceValidation::TotalMaximumSlotsReached, + )); + } +} diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 65bd56a28958..9b97bf2b7c34 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -20,6 +20,9 @@ //! to tip and keep the blockchain up to date with network updates. use crate::{ + block_announce_validator::{ + BlockAnnounceValidationResult, BlockAnnounceValidator as BlockAnnounceValidatorStream, + }, service::{self, chain_sync::ToServiceCommand}, ChainSync, ClientError, SyncingService, }; @@ -45,7 +48,7 @@ use sc_network_common::{ sync::{ message::{BlockAnnounce, BlockAnnouncesHandshake, BlockState}, warp::WarpSyncParams, - BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, PollBlockAnnounceValidation, SyncEvent, + BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, SyncEvent, }, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -239,6 +242,9 @@ pub struct SyncingEngine { /// Number of inbound peers accepted so far. num_in_peers: usize, + /// Async processor of block announce validations. + block_announce_validator: BlockAnnounceValidatorStream, + /// A cache for the data that was associated to a block announcement. block_announce_data_cache: LruMap>, @@ -352,7 +358,6 @@ where protocol_id, fork_id, roles, - block_announce_validator, max_parallel_downloads, max_blocks_per_request, warp_sync_params, @@ -389,6 +394,9 @@ where peers: HashMap::new(), block_announce_data_cache: LruMap::new(ByLength::new(cache_capacity)), block_announce_protocol_name, + block_announce_validator: BlockAnnounceValidatorStream::new( + block_announce_validator, + ), num_connected: num_connected.clone(), is_major_syncing: is_major_syncing.clone(), service_rx, @@ -453,9 +461,9 @@ where } } - fn update_peer_info(&mut self, who: &PeerId) { - if let Some(info) = self.chain_sync.peer_info(who) { - if let Some(ref mut peer) = self.peers.get_mut(who) { + fn update_peer_info(&mut self, peer_id: &PeerId) { + if let Some(info) = self.chain_sync.peer_info(peer_id) { + if let Some(ref mut peer) = self.peers.get_mut(peer_id) { peer.info.best_hash = info.best_hash; peer.info.best_number = info.best_number; } @@ -463,14 +471,16 @@ where } /// Process the result of the block announce validation. - pub fn process_block_announce_validation_result( + fn process_block_announce_validation_result( &mut self, - validation_result: PollBlockAnnounceValidation, + validation_result: BlockAnnounceValidationResult, ) { match validation_result { - PollBlockAnnounceValidation::Skip => {}, - PollBlockAnnounceValidation::Nothing { is_best: _, who, announce } => { - self.update_peer_info(&who); + BlockAnnounceValidationResult::Skip { peer_id: _ } => {}, + BlockAnnounceValidationResult::Process { is_new_best, peer_id, announce } => { + self.chain_sync.on_validated_block_announce(is_new_best, peer_id, &announce); + + self.update_peer_info(&peer_id); if let Some(data) = announce.data { if !data.is_empty() { @@ -478,41 +488,29 @@ where } } }, - PollBlockAnnounceValidation::Failure { who, disconnect } => { + BlockAnnounceValidationResult::Failure { peer_id, disconnect } => { if disconnect { self.network_service - .disconnect_peer(who, self.block_announce_protocol_name.clone()); + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); } - self.network_service.report_peer(who, rep::BAD_BLOCK_ANNOUNCEMENT); + self.network_service.report_peer(peer_id, rep::BAD_BLOCK_ANNOUNCEMENT); }, } } /// Push a block announce validation. - /// - /// It is required that [`ChainSync::poll_block_announce_validation`] is - /// called later to check for finished validations. The result of the validation - /// needs to be passed to [`SyncingEngine::process_block_announce_validation_result`] - /// to finish the processing. - /// - /// # Note - /// - /// This will internally create a future, but this future will not be registered - /// in the task before being polled once. So, it is required to call - /// [`ChainSync::poll_block_announce_validation`] to ensure that the future is - /// registered properly and will wake up the task when being ready. pub fn push_block_announce_validation( &mut self, - who: PeerId, + peer_id: PeerId, announce: BlockAnnounce, ) { let hash = announce.header.hash(); - let peer = match self.peers.get_mut(&who) { + let peer = match self.peers.get_mut(&peer_id) { Some(p) => p, None => { - log::error!(target: "sync", "Received block announce from disconnected peer {}", who); + log::error!(target: "sync", "Received block announce from disconnected peer {}", peer_id); debug_assert!(false); return }, @@ -525,7 +523,8 @@ where BlockState::Normal => false, }; - self.chain_sync.push_block_announce_validation(who, hash, announce, is_best); + self.block_announce_validator + .push_block_announce_validation(peer_id, hash, announce, is_best); } } @@ -558,10 +557,10 @@ where .or_else(|| self.block_announce_data_cache.get(&hash).cloned()) .unwrap_or_default(); - for (who, ref mut peer) in self.peers.iter_mut() { + for (peer_id, ref mut peer) in self.peers.iter_mut() { let inserted = peer.known_blocks.insert(hash); if inserted { - log::trace!(target: "sync", "Announcing block {:?} to {}", hash, who); + log::trace!(target: "sync", "Announcing block {:?} to {}", hash, peer_id); let message = BlockAnnounce { header: header.clone(), state: if is_best { Some(BlockState::Best) } else { Some(BlockState::Normal) }, @@ -656,14 +655,14 @@ where } } }, - ToServiceCommand::JustificationImported(peer, hash, number, success) => { + ToServiceCommand::JustificationImported(peer_id, hash, number, success) => { self.chain_sync.on_justification_import(hash, number, success); if !success { - log::info!(target: "sync", "💔 Invalid justification provided by {} for #{}", peer, hash); + log::info!(target: "sync", "💔 Invalid justification provided by {} for #{}", peer_id, hash); self.network_service - .disconnect_peer(peer, self.block_announce_protocol_name.clone()); + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); self.network_service.report_peer( - peer, + peer_id, ReputationChange::new_fatal("Invalid justification"), ); } @@ -698,8 +697,11 @@ where let _ = tx.send(self.chain_sync.num_sync_requests()); }, ToServiceCommand::PeersInfo(tx) => { - let peers_info = - self.peers.iter().map(|(id, peer)| (*id, peer.info.clone())).collect(); + let peers_info = self + .peers + .iter() + .map(|(peer_id, peer)| (*peer_id, peer.info.clone())) + .collect(); let _ = tx.send(peers_info); }, ToServiceCommand::OnBlockFinalized(hash, header) => @@ -742,14 +744,6 @@ where if let Ok(announce) = BlockAnnounce::decode(&mut message.as_ref()) { self.last_notification_io = Instant::now(); self.push_block_announce_validation(remote, announce); - - // Make sure that the newly added block announce validation future - // was polled once to be registered in the task. - if let Poll::Ready(res) = - self.chain_sync.poll_block_announce_validation(cx) - { - self.process_block_announce_validation_result(res) - } } else { log::warn!(target: "sub-libp2p", "Failed to decode block announce"); } @@ -770,10 +764,14 @@ where } } - // poll `ChainSync` last because of a block announcement was received through the - // event stream between `SyncingEngine` and `Protocol` and the validation finished - // right after it as queued, the resulting block request (if any) can be sent right away. - while let Poll::Ready(result) = self.chain_sync.poll(cx) { + // Drive `ChainSync`. + while let Poll::Ready(()) = self.chain_sync.poll(cx) {} + + // Poll block announce validations last, because if a block announcement was received + // through the event stream between `SyncingEngine` and `Protocol` and the validation + // finished right after it is queued, the resulting block request (if any) can be sent + // right away. + while let Poll::Ready(Some(result)) = self.block_announce_validator.poll_next_unpin(cx) { self.process_block_announce_validation_result(result); } @@ -783,15 +781,15 @@ where /// Called by peer when it is disconnecting. /// /// Returns a result if the handshake of this peer was indeed accepted. - pub fn on_sync_peer_disconnected(&mut self, peer: PeerId) -> Result<(), ()> { - if let Some(info) = self.peers.remove(&peer) { - if self.important_peers.contains(&peer) { - log::warn!(target: "sync", "Reserved peer {} disconnected", peer); + pub fn on_sync_peer_disconnected(&mut self, peer_id: PeerId) -> Result<(), ()> { + if let Some(info) = self.peers.remove(&peer_id) { + if self.important_peers.contains(&peer_id) { + log::warn!(target: "sync", "Reserved peer {} disconnected", peer_id); } else { - log::debug!(target: "sync", "{} disconnected", peer); + log::debug!(target: "sync", "{} disconnected", peer_id); } - if !self.default_peers_set_no_slot_connected_peers.remove(&peer) && + if !self.default_peers_set_no_slot_connected_peers.remove(&peer_id) && info.inbound && info.info.roles.is_full() { match self.num_in_peers.checked_sub(1) { @@ -808,9 +806,10 @@ where } } - self.chain_sync.peer_disconnected(&peer); - self.event_streams - .retain(|stream| stream.unbounded_send(SyncEvent::PeerDisconnected(peer)).is_ok()); + self.chain_sync.peer_disconnected(&peer_id); + self.event_streams.retain(|stream| { + stream.unbounded_send(SyncEvent::PeerDisconnected(peer_id)).is_ok() + }); Ok(()) } else { Err(()) @@ -824,35 +823,35 @@ where /// from. pub fn on_sync_peer_connected( &mut self, - who: PeerId, + peer_id: PeerId, status: &BlockAnnouncesHandshake, sink: NotificationsSink, inbound: bool, ) -> Result<(), ()> { - log::trace!(target: "sync", "New peer {} {:?}", who, status); + log::trace!(target: "sync", "New peer {} {:?}", peer_id, status); - if self.peers.contains_key(&who) { - log::error!(target: "sync", "Called on_sync_peer_connected with already connected peer {}", who); + if self.peers.contains_key(&peer_id) { + log::error!(target: "sync", "Called on_sync_peer_connected with already connected peer {}", peer_id); debug_assert!(false); return Err(()) } if status.genesis_hash != self.genesis_hash { - self.network_service.report_peer(who, rep::GENESIS_MISMATCH); + self.network_service.report_peer(peer_id, rep::GENESIS_MISMATCH); - if self.important_peers.contains(&who) { + if self.important_peers.contains(&peer_id) { log::error!( target: "sync", "Reserved peer id `{}` is on a different chain (our genesis: {} theirs: {})", - who, + peer_id, self.genesis_hash, status.genesis_hash, ); - } else if self.boot_node_ids.contains(&who) { + } else if self.boot_node_ids.contains(&peer_id) { log::error!( target: "sync", "Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})", - who, + peer_id, self.genesis_hash, status.genesis_hash, ); @@ -867,7 +866,7 @@ where return Err(()) } - let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&who); + let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&peer_id); let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 }; // make sure to accept no more than `--in-peers` many full nodes @@ -875,7 +874,7 @@ where status.roles.is_full() && inbound && self.num_in_peers == self.max_in_peers { - log::debug!(target: "sync", "All inbound slots have been consumed, rejecting {who}"); + log::debug!(target: "sync", "All inbound slots have been consumed, rejecting {peer_id}"); return Err(()) } @@ -885,7 +884,7 @@ where self.default_peers_set_no_slot_connected_peers.len() + this_peer_reserved_slot { - log::debug!(target: "sync", "Too many full nodes, rejecting {}", who); + log::debug!(target: "sync", "Too many full nodes, rejecting {}", peer_id); return Err(()) } @@ -893,7 +892,7 @@ where (self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light { // Make sure that not all slots are occupied by light clients. - log::debug!(target: "sync", "Too many light nodes, rejecting {}", who); + log::debug!(target: "sync", "Too many light nodes, rejecting {}", peer_id); return Err(()) } @@ -911,7 +910,7 @@ where }; let req = if peer.info.roles.is_full() { - match self.chain_sync.new_peer(who, peer.info.best_hash, peer.info.best_number) { + match self.chain_sync.new_peer(peer_id, peer.info.best_hash, peer.info.best_number) { Ok(req) => req, Err(BadPeer(id, repu)) => { self.network_service.report_peer(id, repu); @@ -922,22 +921,22 @@ where None }; - log::debug!(target: "sync", "Connected {}", who); + log::debug!(target: "sync", "Connected {}", peer_id); - self.peers.insert(who, peer); + self.peers.insert(peer_id, peer); if no_slot_peer { - self.default_peers_set_no_slot_connected_peers.insert(who); + self.default_peers_set_no_slot_connected_peers.insert(peer_id); } else if inbound && status.roles.is_full() { self.num_in_peers += 1; } if let Some(req) = req { - self.chain_sync.send_block_request(who, req); + self.chain_sync.send_block_request(peer_id, req); } self.event_streams - .retain(|stream| stream.unbounded_send(SyncEvent::PeerConnected(who)).is_ok()); + .retain(|stream| stream.unbounded_send(SyncEvent::PeerConnected(peer_id)).is_ok()); Ok(()) } diff --git a/substrate/client/network/sync/src/futures_stream.rs b/substrate/client/network/sync/src/futures_stream.rs new file mode 100644 index 000000000000..c33d582345b6 --- /dev/null +++ b/substrate/client/network/sync/src/futures_stream.rs @@ -0,0 +1,134 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! A wrapper for [`FuturesUnordered`] that wakes the task up once a new future is pushed +//! for it to be polled automatically. It's [`Stream`] never terminates. + +use futures::{stream::FuturesUnordered, Future, Stream, StreamExt}; +use std::{ + pin::Pin, + task::{Context, Poll, Waker}, +}; + +/// Wrapper around [`FuturesUnordered`] that wakes a task up automatically. +pub struct FuturesStream { + futures: FuturesUnordered, + waker: Option, +} + +/// Surprizingly, `#[derive(Default)]` doesn't work on [`FuturesStream`]. +impl Default for FuturesStream { + fn default() -> FuturesStream { + FuturesStream { futures: Default::default(), waker: None } + } +} + +impl FuturesStream { + /// Push a future for processing. + pub fn push(&mut self, future: F) { + self.futures.push(future); + + if let Some(waker) = self.waker.take() { + waker.wake(); + } + } + + /// The number of futures in the stream. + pub fn len(&self) -> usize { + self.futures.len() + } +} + +impl Stream for FuturesStream { + type Item = ::Output; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let Poll::Ready(Some(result)) = self.futures.poll_next_unpin(cx) else { + self.waker = Some(cx.waker().clone()); + + return Poll::Pending + }; + + Poll::Ready(Some(result)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use futures::future::{BoxFuture, FutureExt}; + + /// [`Stream`] implementation for [`FuturesStream`] relies on the undocumented + /// feature that [`FuturesUnordered`] can be polled and repeatedly yield + /// `Poll::Ready(None)` before any futures are added into it. + #[tokio::test] + async fn empty_futures_unordered_can_be_polled() { + let mut unordered = FuturesUnordered::>::default(); + + futures::future::poll_fn(|cx| { + assert_eq!(unordered.poll_next_unpin(cx), Poll::Ready(None)); + assert_eq!(unordered.poll_next_unpin(cx), Poll::Ready(None)); + + Poll::Ready(()) + }) + .await; + } + + /// [`Stream`] implementation for [`FuturesStream`] relies on the undocumented + /// feature that [`FuturesUnordered`] can be polled and repeatedly yield + /// `Poll::Ready(None)` after all the futures in it have resolved. + #[tokio::test] + async fn deplenished_futures_unordered_can_be_polled() { + let mut unordered = FuturesUnordered::>::default(); + + unordered.push(futures::future::ready(()).boxed()); + assert_eq!(unordered.next().await, Some(())); + + futures::future::poll_fn(|cx| { + assert_eq!(unordered.poll_next_unpin(cx), Poll::Ready(None)); + assert_eq!(unordered.poll_next_unpin(cx), Poll::Ready(None)); + + Poll::Ready(()) + }) + .await; + } + + #[tokio::test] + async fn empty_futures_stream_yields_pending() { + let mut stream = FuturesStream::>::default(); + + futures::future::poll_fn(|cx| { + assert_eq!(stream.poll_next_unpin(cx), Poll::Pending); + Poll::Ready(()) + }) + .await; + } + + #[tokio::test] + async fn futures_stream_resolves_futures_and_yields_pending() { + let mut stream = FuturesStream::default(); + stream.push(futures::future::ready(17)); + + futures::future::poll_fn(|cx| { + assert_eq!(stream.poll_next_unpin(cx), Poll::Ready(Some(17))); + assert_eq!(stream.poll_next_unpin(cx), Poll::Pending); + Poll::Ready(()) + }) + .await; + } +} diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index 175c1c43f46f..0c2013b14977 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -37,9 +37,7 @@ use crate::{ use codec::{Decode, DecodeAll, Encode}; use extra_requests::ExtraRequests; -use futures::{ - channel::oneshot, stream::FuturesUnordered, task::Poll, Future, FutureExt, StreamExt, -}; +use futures::{channel::oneshot, task::Poll, Future, FutureExt}; use libp2p::{request_response::OutboundFailure, PeerId}; use log::{debug, error, info, trace, warn}; use prost::Message; @@ -66,16 +64,12 @@ use sc_network_common::{ warp::{EncodedProof, WarpProofRequest, WarpSyncParams, WarpSyncPhase, WarpSyncProgress}, BadPeer, ChainSync as ChainSyncT, ImportResult, Metrics, OnBlockData, OnBlockJustification, OnStateData, OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, - OpaqueStateResponse, PeerInfo, PeerRequest, PollBlockAnnounceValidation, SyncMode, - SyncState, SyncStatus, + OpaqueStateResponse, PeerInfo, PeerRequest, SyncMode, SyncState, SyncStatus, }, }; use sp_arithmetic::traits::Saturating; use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; -use sp_consensus::{ - block_validation::{BlockAnnounceValidator, Validation}, - BlockOrigin, BlockStatus, -}; +use sp_consensus::{BlockOrigin, BlockStatus}; use sp_runtime::{ traits::{ Block as BlockT, CheckedSub, Hash, HashingFor, Header as HeaderT, NumberFor, One, @@ -85,7 +79,7 @@ use sp_runtime::{ }; use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, + collections::{HashMap, HashSet}, iter, ops::Range, pin::Pin, @@ -94,7 +88,9 @@ use std::{ pub use service::chain_sync::SyncingService; +mod block_announce_validator; mod extra_requests; +mod futures_stream; mod schema; pub mod block_request_handler; @@ -117,17 +113,6 @@ const MAX_DOWNLOAD_AHEAD: u32 = 2048; /// common block of a node. const MAX_BLOCKS_TO_LOOK_BACKWARDS: u32 = MAX_DOWNLOAD_AHEAD / 2; -/// Maximum number of concurrent block announce validations. -/// -/// If the queue reaches the maximum, we drop any new block -/// announcements. -const MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS: usize = 256; - -/// Maximum number of concurrent block announce validations per peer. -/// -/// See [`MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS`] for more information. -const MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS_PER_PEER: usize = 4; - /// Pick the state to sync as the latest finalized number minus this. const STATE_SYNC_FINALITY_THRESHOLD: u32 = 8; @@ -307,19 +292,12 @@ pub struct ChainSync { fork_targets: HashMap>, /// A set of peers for which there might be potential block requests allowed_requests: AllowedRequests, - /// A type to check incoming block announcements. - block_announce_validator: Box + Send>, /// Maximum number of peers to ask the same blocks in parallel. max_parallel_downloads: u32, /// Maximum blocks per request. max_blocks_per_request: u32, /// Total number of downloaded blocks. downloaded_blocks: usize, - /// All block announcement that are currently being validated. - block_announce_validation: - FuturesUnordered> + Send>>>, - /// Stats per peer about the number of concurrent block announce validations. - block_announce_validation_per_peer_stats: HashMap, /// State sync in progress, if any. state_sync: Option>, /// Warp sync in progress, if any. @@ -424,51 +402,6 @@ impl PeerSyncState { } } -/// Result of [`ChainSync::block_announce_validation`]. -#[derive(Debug, Clone, PartialEq, Eq)] -enum PreValidateBlockAnnounce { - /// The announcement failed at validation. - /// - /// The peer reputation should be decreased. - Failure { - /// Who sent the processed block announcement? - who: PeerId, - /// Should the peer be disconnected? - disconnect: bool, - }, - /// The pre-validation was sucessful and the announcement should be - /// further processed. - Process { - /// Is this the new best block of the peer? - is_new_best: bool, - /// The id of the peer that send us the announcement. - who: PeerId, - /// The announcement. - announce: BlockAnnounce, - }, - /// The announcement validation returned an error. - /// - /// An error means that *this* node failed to validate it because some internal error happened. - /// If the block announcement was invalid, [`Self::Failure`] is the correct variant to express - /// this. - Error { who: PeerId }, - /// The block announcement should be skipped. - /// - /// This should *only* be returned when there wasn't a slot registered - /// for this block announcement validation. - Skip, -} - -/// Result of [`ChainSync::has_slot_for_block_announce_validation`]. -enum HasSlotForBlockAnnounceValidation { - /// Yes, there is a slot for the block announce validation. - Yes, - /// We reached the total maximum number of validation slots. - TotalMaximumSlotsReached, - /// We reached the maximum number of validation slots for the given peer. - MaximumPeerSlotsReached, -} - impl ChainSyncT for ChainSync where B: BlockT, @@ -692,7 +625,7 @@ where self.extra_justifications.reset(); } - // The implementation is similar to on_block_announce with unknown parent hash. + // The implementation is similar to `on_validated_block_announce` with unknown parent hash. fn set_sync_fork_request( &mut self, mut peers: Vec, @@ -1107,119 +1040,88 @@ where } } - fn push_block_announce_validation( + fn on_validated_block_announce( &mut self, - who: PeerId, - hash: B::Hash, - announce: BlockAnnounce, is_best: bool, + who: PeerId, + announce: &BlockAnnounce, ) { - let header = &announce.header; - let number = *header.number(); - debug!( - target: "sync", - "Pre-validating received block announcement {:?} with number {:?} from {}", - hash, - number, - who, - ); + let number = *announce.header.number(); + let hash = announce.header.hash(); + let parent_status = + self.block_status(announce.header.parent_hash()).unwrap_or(BlockStatus::Unknown); + let known_parent = parent_status != BlockStatus::Unknown; + let ancient_parent = parent_status == BlockStatus::InChainPruned; - if number.is_zero() { - self.block_announce_validation.push( - async move { - warn!( - target: "sync", - "💔 Ignored genesis block (#0) announcement from {}: {}", - who, - hash, - ); - PreValidateBlockAnnounce::Skip - } - .boxed(), - ); + let known = self.is_known(&hash); + let peer = if let Some(peer) = self.peers.get_mut(&who) { + peer + } else { + error!(target: "sync", "💔 Called `on_validated_block_announce` with a bad peer ID"); + return + }; + + if let PeerSyncState::AncestorSearch { .. } = peer.state { + trace!(target: "sync", "Peer {} is in the ancestor search state.", who); return } - // Check if there is a slot for this block announce validation. - match self.has_slot_for_block_announce_validation(&who) { - HasSlotForBlockAnnounceValidation::Yes => {}, - HasSlotForBlockAnnounceValidation::TotalMaximumSlotsReached => { - self.block_announce_validation.push( - async move { - warn!( - target: "sync", - "💔 Ignored block (#{} -- {}) announcement from {} because all validation slots are occupied.", - number, - hash, - who, - ); - PreValidateBlockAnnounce::Skip - } - .boxed(), - ); - return - }, - HasSlotForBlockAnnounceValidation::MaximumPeerSlotsReached => { - self.block_announce_validation.push(async move { - warn!( - target: "sync", - "💔 Ignored block (#{} -- {}) announcement from {} because all validation slots for this peer are occupied.", - number, - hash, - who, - ); - PreValidateBlockAnnounce::Skip - }.boxed()); - return - }, + if is_best { + // update their best block + peer.best_number = number; + peer.best_hash = hash; } - // Let external validator check the block announcement. - let assoc_data = announce.data.as_ref().map_or(&[][..], |v| v.as_slice()); - let future = self.block_announce_validator.validate(header, assoc_data); + // If the announced block is the best they have and is not ahead of us, our common number + // is either one further ahead or it's the one they just announced, if we know about it. + if is_best { + if known && self.best_queued_number >= number { + self.update_peer_common_number(&who, number); + } else if announce.header.parent_hash() == &self.best_queued_hash || + known_parent && self.best_queued_number >= number + { + self.update_peer_common_number(&who, number.saturating_sub(One::one())); + } + } + self.allowed_requests.add(&who); - self.block_announce_validation.push( - async move { - match future.await { - Ok(Validation::Success { is_new_best }) => PreValidateBlockAnnounce::Process { - is_new_best: is_new_best || is_best, - announce, - who, - }, - Ok(Validation::Failure { disconnect }) => { - debug!( - target: "sync", - "Block announcement validation of block {:?} from {} failed", - hash, - who, - ); - PreValidateBlockAnnounce::Failure { who, disconnect } - }, - Err(e) => { - debug!( - target: "sync", - "💔 Block announcement validation of block {:?} errored: {}", - hash, - e, - ); - PreValidateBlockAnnounce::Error { who } - }, - } + // known block case + if known || self.is_already_downloading(&hash) { + trace!(target: "sync", "Known block announce from {}: {}", who, hash); + if let Some(target) = self.fork_targets.get_mut(&hash) { + target.peers.insert(who); } - .boxed(), - ); - } + return + } - fn poll_block_announce_validation( - &mut self, - cx: &mut std::task::Context, - ) -> Poll> { - match self.block_announce_validation.poll_next_unpin(cx) { - Poll::Ready(Some(res)) => { - self.peer_block_announce_validation_finished(&res); - Poll::Ready(self.finish_block_announce_validation(res)) - }, - _ => Poll::Pending, + if ancient_parent { + trace!( + target: "sync", + "Ignored ancient block announced from {}: {} {:?}", + who, + hash, + announce.header, + ); + return + } + + if self.status().state == SyncState::Idle { + trace!( + target: "sync", + "Added sync target for block announced from {}: {} {:?}", + who, + hash, + announce.summary(), + ); + self.fork_targets + .entry(hash) + .or_insert_with(|| ForkTarget { + number, + parent_hash: Some(*announce.header.parent_hash()), + peers: Default::default(), + }) + .peers + .insert(who); } } @@ -1319,10 +1221,7 @@ where .map_err(|error: codec::Error| error.to_string()) } - fn poll( - &mut self, - cx: &mut std::task::Context, - ) -> Poll> { + fn poll(&mut self, cx: &mut std::task::Context) -> Poll<()> { // Should be called before `process_outbound_requests` to ensure // that a potential target block is directly leading to requests. if let Some(warp_sync) = &mut self.warp_sync { @@ -1339,10 +1238,6 @@ where } } - if let Poll::Ready(announce) = self.poll_block_announce_validation(cx) { - return Poll::Ready(announce) - } - Poll::Pending } @@ -1395,7 +1290,6 @@ where protocol_id: ProtocolId, fork_id: &Option, roles: Roles, - block_announce_validator: Box + Send>, max_parallel_downloads: u32, max_blocks_per_request: u32, warp_sync_params: Option>, @@ -1430,12 +1324,9 @@ where queue_blocks: Default::default(), fork_targets: Default::default(), allowed_requests: Default::default(), - block_announce_validator, max_parallel_downloads, max_blocks_per_request, downloaded_blocks: 0, - block_announce_validation: Default::default(), - block_announce_validation_per_peer_stats: Default::default(), state_sync: None, warp_sync: None, import_existing: false, @@ -1586,186 +1477,6 @@ where self.allowed_requests.set_all(); } - /// Checks if there is a slot for a block announce validation. - /// - /// The total number and the number per peer of concurrent block announce validations - /// is capped. - /// - /// Returns [`HasSlotForBlockAnnounceValidation`] to inform about the result. - /// - /// # Note - /// - /// It is *required* to call [`Self::peer_block_announce_validation_finished`] when the - /// validation is finished to clear the slot. - fn has_slot_for_block_announce_validation( - &mut self, - peer: &PeerId, - ) -> HasSlotForBlockAnnounceValidation { - if self.block_announce_validation.len() >= MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS { - return HasSlotForBlockAnnounceValidation::TotalMaximumSlotsReached - } - - match self.block_announce_validation_per_peer_stats.entry(*peer) { - Entry::Vacant(entry) => { - entry.insert(1); - HasSlotForBlockAnnounceValidation::Yes - }, - Entry::Occupied(mut entry) => { - if *entry.get() < MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS_PER_PEER { - *entry.get_mut() += 1; - HasSlotForBlockAnnounceValidation::Yes - } else { - HasSlotForBlockAnnounceValidation::MaximumPeerSlotsReached - } - }, - } - } - - /// Should be called when a block announce validation is finished, to update the slots - /// of the peer that send the block announce. - fn peer_block_announce_validation_finished( - &mut self, - res: &PreValidateBlockAnnounce, - ) { - let peer = match res { - PreValidateBlockAnnounce::Failure { who, .. } | - PreValidateBlockAnnounce::Process { who, .. } | - PreValidateBlockAnnounce::Error { who } => who, - PreValidateBlockAnnounce::Skip => return, - }; - - match self.block_announce_validation_per_peer_stats.entry(*peer) { - Entry::Vacant(_) => { - error!( - target: "sync", - "💔 Block announcement validation from peer {} finished for that no slot was allocated!", - peer, - ); - }, - Entry::Occupied(mut entry) => { - *entry.get_mut() = entry.get().saturating_sub(1); - if *entry.get() == 0 { - entry.remove(); - } - }, - } - } - - /// This will finish processing of the block announcement. - fn finish_block_announce_validation( - &mut self, - pre_validation_result: PreValidateBlockAnnounce, - ) -> PollBlockAnnounceValidation { - let (announce, is_best, who) = match pre_validation_result { - PreValidateBlockAnnounce::Failure { who, disconnect } => { - debug!( - target: "sync", - "Failed announce validation: {:?}, disconnect: {}", - who, - disconnect, - ); - return PollBlockAnnounceValidation::Failure { who, disconnect } - }, - PreValidateBlockAnnounce::Process { announce, is_new_best, who } => - (announce, is_new_best, who), - PreValidateBlockAnnounce::Error { .. } | PreValidateBlockAnnounce::Skip => { - debug!( - target: "sync", - "Ignored announce validation", - ); - return PollBlockAnnounceValidation::Skip - }, - }; - - trace!( - target: "sync", - "Finished block announce validation: from {:?}: {:?}. local_best={}", - who, - announce.summary(), - is_best, - ); - - let number = *announce.header.number(); - let hash = announce.header.hash(); - let parent_status = - self.block_status(announce.header.parent_hash()).unwrap_or(BlockStatus::Unknown); - let known_parent = parent_status != BlockStatus::Unknown; - let ancient_parent = parent_status == BlockStatus::InChainPruned; - - let known = self.is_known(&hash); - let peer = if let Some(peer) = self.peers.get_mut(&who) { - peer - } else { - error!(target: "sync", "💔 Called on_block_announce with a bad peer ID"); - return PollBlockAnnounceValidation::Nothing { is_best, who, announce } - }; - - if let PeerSyncState::AncestorSearch { .. } = peer.state { - trace!(target: "sync", "Peer state is ancestor search."); - return PollBlockAnnounceValidation::Nothing { is_best, who, announce } - } - - if is_best { - // update their best block - peer.best_number = number; - peer.best_hash = hash; - } - - // If the announced block is the best they have and is not ahead of us, our common number - // is either one further ahead or it's the one they just announced, if we know about it. - if is_best { - if known && self.best_queued_number >= number { - self.update_peer_common_number(&who, number); - } else if announce.header.parent_hash() == &self.best_queued_hash || - known_parent && self.best_queued_number >= number - { - self.update_peer_common_number(&who, number - One::one()); - } - } - self.allowed_requests.add(&who); - - // known block case - if known || self.is_already_downloading(&hash) { - trace!(target: "sync", "Known block announce from {}: {}", who, hash); - if let Some(target) = self.fork_targets.get_mut(&hash) { - target.peers.insert(who); - } - return PollBlockAnnounceValidation::Nothing { is_best, who, announce } - } - - if ancient_parent { - trace!( - target: "sync", - "Ignored ancient block announced from {}: {} {:?}", - who, - hash, - announce.header, - ); - return PollBlockAnnounceValidation::Nothing { is_best, who, announce } - } - - if self.status().state == SyncState::Idle { - trace!( - target: "sync", - "Added sync target for block announced from {}: {} {:?}", - who, - hash, - announce.summary(), - ); - self.fork_targets - .entry(hash) - .or_insert_with(|| ForkTarget { - number, - parent_hash: Some(*announce.header.parent_hash()), - peers: Default::default(), - }) - .peers - .insert(who); - } - - PollBlockAnnounceValidation::Nothing { is_best, who, announce } - } - /// Restart the sync process. This will reset all pending block requests and return an iterator /// of new block requests to make to peers. Peers that were downloading finality data (i.e. /// their state was `DownloadingJustification`) are unaffected and will stay in the same state. @@ -3162,14 +2873,13 @@ fn validate_blocks( mod test { use super::*; use crate::service::network::NetworkServiceProvider; - use futures::{executor::block_on, future::poll_fn}; + use futures::executor::block_on; use sc_block_builder::BlockBuilderProvider; use sc_network_common::{ role::Role, - sync::message::{BlockData, BlockState, FromBlock}, + sync::message::{BlockAnnounce, BlockData, BlockState, FromBlock}, }; use sp_blockchain::HeaderBackend; - use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use substrate_test_runtime_client::{ runtime::{Block, Hash, Header}, BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClient, @@ -3183,7 +2893,6 @@ mod test { // internally we should process the response as the justification not being available. let client = Arc::new(TestClientBuilder::new().build()); - let block_announce_validator = Box::new(DefaultBlockAnnounceValidator); let peer_id = PeerId::random(); let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); @@ -3195,7 +2904,6 @@ mod test { ProtocolId::from("test-protocol-name"), &Some(String::from("test-fork-id")), Roles::from(&Role::Full), - block_announce_validator, 1, 64, None, @@ -3262,7 +2970,6 @@ mod test { ProtocolId::from("test-protocol-name"), &Some(String::from("test-fork-id")), Roles::from(&Role::Full), - Box::new(DefaultBlockAnnounceValidator), 1, 64, None, @@ -3344,23 +3051,16 @@ mod test { /// Send a block annoucnement for the given `header`. fn send_block_announce( header: Header, - peer_id: &PeerId, + peer_id: PeerId, sync: &mut ChainSync, ) { - let block_annnounce = BlockAnnounce { + let announce = BlockAnnounce { header: header.clone(), state: Some(BlockState::Best), data: Some(Vec::new()), }; - sync.push_block_announce_validation(*peer_id, header.hash(), block_annnounce, true); - - // Poll until we have procssed the block announcement - block_on(poll_fn(|cx| loop { - if sync.poll_block_announce_validation(cx).is_pending() { - break Poll::Ready(()) - } - })) + sync.on_validated_block_announce(true, peer_id, &announce); } /// Create a block response from the given `blocks`. @@ -3444,7 +3144,6 @@ mod test { ProtocolId::from("test-protocol-name"), &Some(String::from("test-fork-id")), Roles::from(&Role::Full), - Box::new(DefaultBlockAnnounceValidator), 5, 64, None, @@ -3491,7 +3190,7 @@ mod test { assert!(sync.block_requests().is_empty()); // Let peer2 announce a fork of block 3 - send_block_announce(block3_fork.header().clone(), &peer_id2, &mut sync); + send_block_announce(block3_fork.header().clone(), peer_id2, &mut sync); // Import and tell sync that we now have the fork. block_on(client.import(BlockOrigin::Own, block3_fork.clone())).unwrap(); @@ -3500,13 +3199,13 @@ mod test { let block4 = build_block_at(block3_fork.hash(), false); // Let peer2 announce block 4 and check that sync wants to get the block. - send_block_announce(block4.header().clone(), &peer_id2, &mut sync); + send_block_announce(block4.header().clone(), peer_id2, &mut sync); let request = get_block_request(&mut sync, FromBlock::Hash(block4.hash()), 2, &peer_id2); // Peer1 announces the same block, but as the common block is still `1`, sync will request // block 2 again. - send_block_announce(block4.header().clone(), &peer_id1, &mut sync); + send_block_announce(block4.header().clone(), peer_id1, &mut sync); let request2 = get_block_request(&mut sync, FromBlock::Number(2), 1, &peer_id1); @@ -3571,7 +3270,6 @@ mod test { ProtocolId::from("test-protocol-name"), &Some(String::from("test-fork-id")), Roles::from(&Role::Full), - Box::new(DefaultBlockAnnounceValidator), 5, 64, None, @@ -3647,7 +3345,7 @@ mod test { sync.queue_blocks.clear(); // Let peer2 announce that it finished syncing - send_block_announce(best_block.header().clone(), &peer_id2, &mut sync); + send_block_announce(best_block.header().clone(), peer_id2, &mut sync); let (peer1_req, peer2_req) = sync.block_requests().into_iter().fold((None, None), |res, req| { @@ -3729,7 +3427,6 @@ mod test { ProtocolId::from("test-protocol-name"), &Some(String::from("test-fork-id")), Roles::from(&Role::Full), - Box::new(DefaultBlockAnnounceValidator), 5, 64, None, @@ -3754,7 +3451,7 @@ mod test { sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) .unwrap(); - send_block_announce(fork_blocks.last().unwrap().header().clone(), &peer_id1, &mut sync); + send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); let mut request = get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); @@ -3872,7 +3569,6 @@ mod test { ProtocolId::from("test-protocol-name"), &Some(String::from("test-fork-id")), Roles::from(&Role::Full), - Box::new(DefaultBlockAnnounceValidator), 5, 64, None, @@ -3897,7 +3593,7 @@ mod test { sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) .unwrap(); - send_block_announce(fork_blocks.last().unwrap().header().clone(), &peer_id1, &mut sync); + send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); let mut request = get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); @@ -4017,7 +3713,6 @@ mod test { ProtocolId::from("test-protocol-name"), &Some(String::from("test-fork-id")), Roles::from(&Role::Full), - Box::new(DefaultBlockAnnounceValidator), 1, 64, None, @@ -4039,7 +3734,7 @@ mod test { // Create a "new" header and announce it let mut header = blocks[0].header().clone(); header.number = 4; - send_block_announce(header, &peer_id1, &mut sync); + send_block_announce(header, peer_id1, &mut sync); assert!(sync.fork_targets.len() == 1); sync.peer_disconnected(&peer_id1); @@ -4063,7 +3758,6 @@ mod test { ProtocolId::from("test-protocol-name"), &Some(String::from("test-fork-id")), Roles::from(&Role::Full), - Box::new(DefaultBlockAnnounceValidator), 1, 64, None, @@ -4107,7 +3801,6 @@ mod test { #[test] fn sync_restart_removes_block_but_not_justification_requests() { let mut client = Arc::new(TestClientBuilder::new().build()); - let block_announce_validator = Box::new(DefaultBlockAnnounceValidator); let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); @@ -4117,7 +3810,6 @@ mod test { ProtocolId::from("test-protocol-name"), &Some(String::from("test-fork-id")), Roles::from(&Role::Full), - block_announce_validator, 1, 64, None, diff --git a/substrate/client/network/sync/src/mock.rs b/substrate/client/network/sync/src/mock.rs index 838c6cf7667a..d37095c17d2c 100644 --- a/substrate/client/network/sync/src/mock.rs +++ b/substrate/client/network/sync/src/mock.rs @@ -24,7 +24,7 @@ use libp2p::PeerId; use sc_network_common::sync::{ message::{BlockAnnounce, BlockData, BlockRequest, BlockResponse}, BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, - OpaqueBlockResponse, PeerInfo, PollBlockAnnounceValidation, SyncStatus, + OpaqueBlockResponse, PeerInfo, SyncStatus, }; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -71,17 +71,12 @@ mockall::mock! { success: bool, ); fn on_block_finalized(&mut self, hash: &Block::Hash, number: NumberFor); - fn push_block_announce_validation( + fn on_validated_block_announce( &mut self, - who: PeerId, - hash: Block::Hash, - announce: BlockAnnounce, is_best: bool, + who: PeerId, + announce: &BlockAnnounce, ); - fn poll_block_announce_validation<'a>( - &mut self, - cx: &mut std::task::Context<'a>, - ) -> Poll>; fn peer_disconnected(&mut self, who: &PeerId); fn metrics(&self) -> Metrics; fn block_response_into_blocks( @@ -92,7 +87,7 @@ mockall::mock! { fn poll<'a>( &mut self, cx: &mut std::task::Context<'a>, - ) -> Poll>; + ) -> Poll<()>; fn send_block_request( &mut self, who: PeerId, diff --git a/substrate/client/utils/src/mpsc.rs b/substrate/client/utils/src/mpsc.rs index 36e44be5e295..039e03f9e618 100644 --- a/substrate/client/utils/src/mpsc.rs +++ b/substrate/client/utils/src/mpsc.rs @@ -123,6 +123,11 @@ impl TracingUnboundedSender { s }) } + + /// The number of elements in the channel (proxy function to [`async_channel::Sender`]). + pub fn len(&self) -> usize { + self.inner.len() + } } impl TracingUnboundedReceiver { @@ -139,6 +144,11 @@ impl TracingUnboundedReceiver { s }) } + + /// The number of elements in the channel (proxy function to [`async_channel::Receiver`]). + pub fn len(&self) -> usize { + self.inner.len() + } } impl Drop for TracingUnboundedReceiver { From 076529ea550dc7362b695dbc20d04f3c718da9d4 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Mon, 4 Sep 2023 21:33:20 +0200 Subject: [PATCH 07/28] Contracts: Update read_sandbox (#1390) * Update runtime.rs * Fix * Revert "Update runtime.rs" This reverts commit 808f026a835b1f2707f640799454a50fd3a8be7b. * nit mono-repo fixes --- substrate/frame/contracts/build.rs | 2 +- substrate/frame/contracts/src/tests.rs | 4 +++- substrate/frame/contracts/src/wasm/runtime.rs | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/substrate/frame/contracts/build.rs b/substrate/frame/contracts/build.rs index 7817ace9c98e..42bc45d563d3 100644 --- a/substrate/frame/contracts/build.rs +++ b/substrate/frame/contracts/build.rs @@ -53,7 +53,7 @@ fn get_latest_version() -> u16 { fn main() -> Result<(), Box> { let out_dir = std::env::var("OUT_DIR")?; let path = std::path::Path::new(&out_dir).join("migration_codegen.rs"); - let mut f = std::fs::File::create(&path)?; + let mut f = std::fs::File::create(path)?; let version = get_latest_version(); write!( f, diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 0c0a2f7f9327..8d6c5c5ac728 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -564,7 +564,9 @@ where { let fixture_path = [ // When `CARGO_MANIFEST_DIR` is not set, Rust resolves relative paths from the root folder - std::env::var("CARGO_MANIFEST_DIR").as_deref().unwrap_or("frame/contracts"), + std::env::var("CARGO_MANIFEST_DIR") + .as_deref() + .unwrap_or("substrate/frame/contracts"), "/fixtures/", fixture_name, ".wat", diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 4bc00388f726..4fd52b471a0c 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -608,7 +608,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { let mut bound_checked = memory .get(ptr..ptr + D::max_encoded_len() as usize) .ok_or_else(|| Error::::OutOfBounds)?; - let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut bound_checked) + let decoded = D::decode_with_depth_limit(MAX_DECODE_NESTING, &mut bound_checked) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } From 7217a5da65142d104903eacffcdfa54b093cc645 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Tue, 5 Sep 2023 12:31:57 +1000 Subject: [PATCH 08/28] rust docs: add simple analytics (#1377) * inject simple analytics * comments * fix chown * comments * doc features --- .gitlab/pipeline/build.yml | 21 ++++++++++++++++++++- substrate/.maintain/rustdocs-release.sh | 3 ++- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 636f7e47afad..20fed5df3dfd 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -114,11 +114,30 @@ build-rustdoc: script: # FIXME: it fails with `RUSTDOCFLAGS="-Dwarnings"` and `--all-features` # FIXME: return to stable when https://github.com/rust-lang/rust/issues/96937 gets into stable - - time cargo doc --workspace --no-deps + - time cargo doc --features try-runtime,experimental --workspace --no-deps - rm -f ./target/doc/.lock - mv ./target/doc ./crate-docs # FIXME: remove me after CI image gets nonroot - chown -R nonroot:nonroot ./crate-docs + # Inject Simple Analytics (https://www.simpleanalytics.com/) privacy preserving tracker into + # all .html files + - | + inject_simple_analytics() { + local path="$1" + local script_content="" + + # Function that inject script into the head of an html file using sed. + process_file() { + local file="$1" + echo "Adding Simple Analytics script to $file" + sed -i "s||$script_content|" "$file" + } + export -f process_file + + # Modify .html files in parallel using xargs, otherwise it can take a long time. + find "$path" -name '*.html' | xargs -I {} -P "$(nproc)" bash -c 'process_file "$@"' _ {} + } + inject_simple_analytics "./crate-docs" - echo "" > ./crate-docs/index.html build-implementers-guide: diff --git a/substrate/.maintain/rustdocs-release.sh b/substrate/.maintain/rustdocs-release.sh index 2a1e141e63ad..091f9289e4e3 100755 --- a/substrate/.maintain/rustdocs-release.sh +++ b/substrate/.maintain/rustdocs-release.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash # set -x -# This script manages the deployment of Substrate rustdocs to https://paritytech.github.io/substrate/. +# This script used to manage the deployment of Substrate rustdocs to https://paritytech.github.io/substrate/. +# It is no longer used anywhere, and only here for historical/demonstration purposes. # - With `deploy` sub-command, it will checkout the passed-in branch/tag ref, build the rustdocs # locally (this takes some time), update the `index.html` index page, and push it to remote # `gh-pages` branch. So users running this command need to have write access to the remote From a14691804dce0815defa955450fecf1d9f2ca1c4 Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Tue, 5 Sep 2023 08:13:19 +0200 Subject: [PATCH 09/28] Move Relay-Specific Shared Code to One Place (#1193) * add common libs * asset hubs * add westend * bridge hubs * collectives * contracts * emulated tests * parachain bin * delete collectives constants and update docs * integration tests should have apache license (some missing, some needed changing) * propagate features * fmt --- Cargo.lock | 6 + cumulus/parachains/common/Cargo.toml | 11 ++ .../src/constants.rs => common/src/kusama.rs} | 31 ++--- cumulus/parachains/common/src/lib.rs | 4 + .../constants.rs => common/src/polkadot.rs} | 34 ++--- .../src/constants.rs => common/src/rococo.rs} | 4 +- .../constants.rs => common/src/westend.rs} | 4 +- .../assets/asset-hub-kusama/Cargo.toml | 1 + .../assets/asset-hub-kusama/src/lib.rs | 25 ++-- .../src/tests/hrmp_channels.rs | 27 ++-- .../assets/asset-hub-kusama/src/tests/mod.rs | 25 ++-- .../src/tests/reserve_transfer.rs | 27 ++-- .../assets/asset-hub-kusama/src/tests/send.rs | 27 ++-- .../src/tests/set_xcm_versions.rs | 25 ++-- .../assets/asset-hub-kusama/src/tests/swap.rs | 37 +++--- .../asset-hub-kusama/src/tests/teleport.rs | 25 ++-- .../assets/asset-hub-polkadot/Cargo.toml | 1 + .../assets/asset-hub-polkadot/src/lib.rs | 25 ++-- .../src/tests/hrmp_channels.rs | 27 ++-- .../asset-hub-polkadot/src/tests/mod.rs | 25 ++-- .../src/tests/reserve_transfer.rs | 27 ++-- .../asset-hub-polkadot/src/tests/send.rs | 27 ++-- .../src/tests/set_xcm_versions.rs | 25 ++-- .../asset-hub-polkadot/src/tests/teleport.rs | 25 ++-- .../assets/asset-hub-westend/Cargo.toml | 1 + .../assets/asset-hub-westend/src/lib.rs | 25 ++-- .../assets/asset-hub-westend/src/tests/mod.rs | 25 ++-- .../src/tests/reserve_transfer.rs | 27 ++-- .../asset-hub-westend/src/tests/send.rs | 27 ++-- .../src/tests/set_xcm_versions.rs | 25 ++-- .../asset-hub-westend/src/tests/swap.rs | 35 +++--- .../asset-hub-westend/src/tests/teleport.rs | 25 ++-- .../bridges/bridge-hub-rococo/Cargo.toml | 1 + .../bridges/bridge-hub-rococo/src/lib.rs | 25 ++-- .../bridge-hub-rococo/src/tests/example.rs | 25 ++-- .../bridge-hub-rococo/src/tests/mod.rs | 25 ++-- .../collectives-polkadot/Cargo.toml | 1 + .../collectives-polkadot/src/lib.rs | 25 ++-- .../src/tests/fellowship.rs | 25 ++-- .../collectives-polkadot/src/tests/mod.rs | 25 ++-- .../emulated/common/Cargo.toml | 1 + .../emulated/common/src/constants.rs | 39 +++--- .../emulated/common/src/impls.rs | 27 ++-- .../emulated/common/src/lib.rs | 25 ++-- .../assets/asset-hub-kusama/src/constants.rs | 118 ----------------- .../assets/asset-hub-kusama/src/lib.rs | 11 +- .../assets/asset-hub-kusama/tests/tests.rs | 5 +- .../asset-hub-polkadot/src/constants.rs | 119 ------------------ .../assets/asset-hub-polkadot/src/lib.rs | 6 +- .../assets/asset-hub-polkadot/tests/tests.rs | 10 +- .../assets/asset-hub-westend/src/lib.rs | 8 +- .../assets/asset-hub-westend/tests/tests.rs | 17 +-- .../bridge-hub-kusama/src/constants.rs | 117 ----------------- .../bridge-hubs/bridge-hub-kusama/src/lib.rs | 8 +- .../bridge-hub-kusama/tests/tests.rs | 6 +- .../bridge-hub-polkadot/src/constants.rs | 117 ----------------- .../bridge-hub-polkadot/src/lib.rs | 8 +- .../bridge-hub-polkadot/tests/tests.rs | 6 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 9 +- .../bridge-hub-rococo/tests/tests.rs | 3 +- .../src/fellowship/mod.rs | 5 +- .../collectives-polkadot/src/lib.rs | 10 +- .../contracts-rococo/src/contracts.rs | 6 +- .../contracts/contracts-rococo/src/lib.rs | 9 +- .../src/chain_spec/asset_hubs.rs | 6 +- .../src/chain_spec/bridge_hubs.rs | 6 +- .../src/chain_spec/collectives.rs | 2 +- .../src/chain_spec/contracts.rs | 2 +- 68 files changed, 534 insertions(+), 1009 deletions(-) rename cumulus/parachains/{runtimes/contracts/contracts-rococo/src/constants.rs => common/src/kusama.rs} (95%) rename cumulus/parachains/{runtimes/collectives/collectives-polkadot/src/constants.rs => common/src/polkadot.rs} (94%) rename cumulus/parachains/{runtimes/bridge-hubs/bridge-hub-rococo/src/constants.rs => common/src/rococo.rs} (96%) rename cumulus/parachains/{runtimes/assets/asset-hub-westend/src/constants.rs => common/src/westend.rs} (96%) delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/constants.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/constants.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/constants.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/constants.rs diff --git a/Cargo.lock b/Cargo.lock index 8af7f4f87217..38d0a3388d33 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10978,6 +10978,7 @@ dependencies = [ "cumulus-primitives-utility", "frame-support", "frame-system", + "kusama-runtime-constants", "log", "num-traits", "pallet-asset-tx-payment", @@ -10986,8 +10987,12 @@ dependencies = [ "pallet-balances", "pallet-collator-selection", "parity-scale-codec", + "polkadot-core-primitives", "polkadot-primitives", + "polkadot-runtime-constants", + "rococo-runtime-constants", "scale-info", + "smallvec", "sp-consensus-aura", "sp-core", "sp-io", @@ -10997,6 +11002,7 @@ dependencies = [ "staging-xcm-builder", "staging-xcm-executor", "substrate-wasm-builder", + "westend-runtime-constants", ] [[package]] diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index 0c863b7295b4..18cafde0d303 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -13,6 +13,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive log = { version = "0.4.19", default-features = false } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } num-traits = { version = "0.2", default-features = false} +smallvec = "1.11.0" # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false } @@ -28,6 +29,11 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false } # Polkadot +kusama-runtime-constants = { path = "../../../polkadot/runtime/kusama/constants", default-features = false} +polkadot-runtime-constants = { path = "../../../polkadot/runtime/polkadot/constants", default-features = false} +rococo-runtime-constants = { path = "../../../polkadot/runtime/rococo/constants", default-features = false} +westend-runtime-constants = { path = "../../../polkadot/runtime/westend/constants", default-features = false} +polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false} polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false} xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false} @@ -52,18 +58,23 @@ std = [ "cumulus-primitives-utility/std", "frame-support/std", "frame-system/std", + "kusama-runtime-constants/std", "log/std", "pallet-asset-tx-payment/std", "pallet-assets/std", "pallet-authorship/std", "pallet-balances/std", "pallet-collator-selection/std", + "polkadot-core-primitives/std", "polkadot-primitives/std", + "polkadot-runtime-constants/std", + "rococo-runtime-constants/std", "sp-consensus-aura/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", + "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/constants.rs b/cumulus/parachains/common/src/kusama.rs similarity index 95% rename from cumulus/parachains/runtimes/contracts/contracts-rococo/src/constants.rs rename to cumulus/parachains/common/src/kusama.rs index 9b0fe5182a25..308f7d081ced 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/constants.rs +++ b/cumulus/parachains/common/src/kusama.rs @@ -13,6 +13,19 @@ // See the License for the specific language governing permissions and // limitations under the License. +/// Consensus-related. +pub mod consensus { + /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included + /// into the relay chain. + pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; + /// How many parachain blocks are processed by the relay chain per parent. Limits the + /// number of blocks authored per slot. + pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; + /// Relay chain slot duration, in milliseconds. + pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; +} + +/// Constants relating to KSM. pub mod currency { use kusama_runtime_constants as constants; use polkadot_core_primitives::Balance; @@ -31,7 +44,7 @@ pub mod currency { } } -/// Fee-related. +/// Constants related to Kusama fee payment. pub mod fee { use frame_support::{ pallet_prelude::Weight, @@ -75,8 +88,8 @@ pub mod fee { impl WeightToFeePolynomial for RefTimeToFee { type Balance = Balance; fn polynomial() -> WeightToFeeCoefficients { - // in Kusama, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: - // in Rococo Contracts, we map to 1/10 of that, or 1/100 CENT + // In Kusama, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: + // The standard system parachain configuration is 1/10 of that, as in 1/100 CENT. let p = super::currency::CENTS; let q = 100 * Balance::from(ExtrinsicBaseWeight::get().ref_time()); @@ -107,15 +120,3 @@ pub mod fee { } } } - -/// Consensus-related. -pub mod consensus { - /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included - /// into the relay chain. - pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; - /// How many parachain blocks are processed by the relay chain per parent. Limits the - /// number of blocks authored per slot. - pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; - /// Relay chain slot duration, in milliseconds. - pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; -} diff --git a/cumulus/parachains/common/src/lib.rs b/cumulus/parachains/common/src/lib.rs index 797010d49a07..cb2ac1a1e3e4 100644 --- a/cumulus/parachains/common/src/lib.rs +++ b/cumulus/parachains/common/src/lib.rs @@ -16,6 +16,10 @@ #![cfg_attr(not(feature = "std"), no_std)] pub mod impls; +pub mod kusama; +pub mod polkadot; +pub mod rococo; +pub mod westend; pub mod xcm_config; pub use constants::*; pub use opaque::*; diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/constants.rs b/cumulus/parachains/common/src/polkadot.rs similarity index 94% rename from cumulus/parachains/runtimes/collectives/collectives-polkadot/src/constants.rs rename to cumulus/parachains/common/src/polkadot.rs index 46b562ea4de7..52cee939224c 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/constants.rs +++ b/cumulus/parachains/common/src/polkadot.rs @@ -13,6 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +/// Universally recognized accounts. pub mod account { use frame_support::PalletId; @@ -28,6 +29,19 @@ pub mod account { pub const REFERENDA_PALLET_ID: PalletId = PalletId(*b"py/refer"); } +/// Consensus-related. +pub mod consensus { + /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included + /// into the relay chain. + pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; + /// How many parachain blocks are processed by the relay chain per parent. Limits the + /// number of blocks authored per slot. + pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; + /// Relay chain slot duration, in milliseconds. + pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; +} + +/// Constants relating to DOT. pub mod currency { use polkadot_core_primitives::Balance; use polkadot_runtime_constants as constants; @@ -41,12 +55,12 @@ pub mod currency { pub const MILLICENTS: Balance = constants::currency::MILLICENTS; pub const fn deposit(items: u32, bytes: u32) -> Balance { - // 1/100 of Polkadot. + // 1/100 of Polkadot constants::currency::deposit(items, bytes) / 100 } } -/// Fee-related. +/// Constants related to Polkadot fee payment. pub mod fee { use frame_support::{ pallet_prelude::Weight, @@ -90,8 +104,8 @@ pub mod fee { impl WeightToFeePolynomial for RefTimeToFee { type Balance = Balance; fn polynomial() -> WeightToFeeCoefficients { - // in Polkadot, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: - // in a parachain, we map to 1/10 of that, or 1/100 CENT + // In Polkadot, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: + // The standard system parachain configuration is 1/10 of that, as in 1/100 CENT. let p = super::currency::CENTS; let q = 100 * Balance::from(ExtrinsicBaseWeight::get().ref_time()); @@ -122,15 +136,3 @@ pub mod fee { } } } - -/// Consensus-related. -pub mod consensus { - /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included - /// into the relay chain. - pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; - /// How many parachain blocks are processed by the relay chain per parent. Limits the - /// number of blocks authored per slot. - pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; - /// Relay chain slot duration, in milliseconds. - pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/constants.rs b/cumulus/parachains/common/src/rococo.rs similarity index 96% rename from cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/constants.rs rename to cumulus/parachains/common/src/rococo.rs index 80620feaedc6..6e31def4b55b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/constants.rs +++ b/cumulus/parachains/common/src/rococo.rs @@ -73,8 +73,8 @@ pub mod fee { impl WeightToFeePolynomial for RefTimeToFee { type Balance = Balance; fn polynomial() -> WeightToFeeCoefficients { - // in Rococo, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: - // in Bridge Hub, we map to 1/10 of that, or 1/100 CENT + // In Rococo, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: + // The standard system parachain configuration is 1/10 of that, as in 1/100 CENT. let p = super::currency::CENTS; let q = 100 * Balance::from(ExtrinsicBaseWeight::get().ref_time()); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/constants.rs b/cumulus/parachains/common/src/westend.rs similarity index 96% rename from cumulus/parachains/runtimes/assets/asset-hub-westend/src/constants.rs rename to cumulus/parachains/common/src/westend.rs index b2629ef10fcc..9d3e0bd1a0e2 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/constants.rs +++ b/cumulus/parachains/common/src/westend.rs @@ -75,8 +75,8 @@ pub mod fee { impl WeightToFeePolynomial for RefTimeToFee { type Balance = Balance; fn polynomial() -> WeightToFeeCoefficients { - // in Westend, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: - // in Asset Hub, we map to 1/10 of that, or 1/100 CENT + // In Westend, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: + // The standard system parachain configuration is 1/10 of that, as in 1/100 CENT. let p = super::currency::CENTS; let q = 100 * Balance::from(ExtrinsicBaseWeight::get().ref_time()); diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/Cargo.toml b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/Cargo.toml index d45a201a4d05..4f17b1ac55b5 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/Cargo.toml @@ -3,6 +3,7 @@ name = "asset-hub-kusama-integration-tests" version = "1.0.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" description = "Asset Hub Kusama runtime integration tests with xcm-emulator" publish = false diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/lib.rs index 9325ca54b068..ad74aa2301fc 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/lib.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. pub use codec::Encode; pub use frame_support::{ diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/hrmp_channels.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/hrmp_channels.rs index 8647c1aa0081..623b3ff599c8 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/hrmp_channels.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/hrmp_channels.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::*; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/mod.rs index 41d840658193..b3089a3b3826 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/mod.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. mod hrmp_channels; mod reserve_transfer; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/reserve_transfer.rs index 26b3cdf68b1b..645dca5035b1 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/reserve_transfer.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::*; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/send.rs index d633c25b7324..5891b694c8e4 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/send.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::*; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/set_xcm_versions.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/set_xcm_versions.rs index 155ada2ec2f3..a7af96096cdd 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/set_xcm_versions.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/set_xcm_versions.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::*; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/swap.rs index aad93db9922e..3a67b5435828 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/swap.rs @@ -1,22 +1,21 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::*; -use asset_hub_kusama_runtime::constants::currency::EXISTENTIAL_DEPOSIT; use frame_support::{instances::Instance2, BoundedVec}; +use parachains_common::kusama::currency::EXISTENTIAL_DEPOSIT; use sp_runtime::{DispatchError, ModuleError}; #[test] @@ -183,11 +182,9 @@ fn swap_locally_on_chain_using_foreign_assets() { .encode() .into(); - let buy_execution_fee_amount = - asset_hub_kusama_runtime::constants::fee::WeightToFee::weight_to_fee(&Weight::from_parts( - 10_100_000_000_000, - 300_000, - )); + let buy_execution_fee_amount = parachains_common::kusama::fee::WeightToFee::weight_to_fee( + &Weight::from_parts(10_100_000_000_000, 300_000), + ); let buy_execution_fee = MultiAsset { id: Concrete(MultiLocation { parents: 1, interior: Here }), fun: Fungible(buy_execution_fee_amount), diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/teleport.rs index b671fd5b4448..f69878f35435 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/teleport.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #![allow(dead_code)] // diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/Cargo.toml b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/Cargo.toml index 3d5d23e99636..ed383207228c 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/Cargo.toml @@ -3,6 +3,7 @@ name = "asset-hub-polkadot-integration-tests" version = "1.0.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" description = "Asset Hub Polkadot runtime integration tests with xcm-emulator" publish = false diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/lib.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/lib.rs index 6cbf6ab1ccb0..e8ba8e44f25c 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/lib.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. pub use codec::Encode; pub use frame_support::{ diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/hrmp_channels.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/hrmp_channels.rs index 337e4ba6113a..a1423f2ea90b 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/hrmp_channels.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/hrmp_channels.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::*; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/mod.rs index 547b59deadcb..c22de4f1c3eb 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/mod.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. mod hrmp_channels; mod reserve_transfer; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/reserve_transfer.rs index e6722d585bc0..e53693d85d2b 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/reserve_transfer.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::*; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/send.rs index 143ab06b4e99..244b428a7523 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/send.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::*; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/set_xcm_versions.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/set_xcm_versions.rs index 287bfa35ae9c..e121c4167993 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/set_xcm_versions.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/set_xcm_versions.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::*; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/teleport.rs index f0fbcf37fccf..644c51d75b66 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/teleport.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #![allow(dead_code)] // diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml index 34d34009ebd0..0c60a30a0b92 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml @@ -3,6 +3,7 @@ name = "asset-hub-westend-integration-tests" version = "1.0.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" description = "Asset Hub Westend runtime integration tests with xcm-emulator" publish = false diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/lib.rs index 2c89c0f9dd46..6e0f3434aedf 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/lib.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. pub use codec::Encode; pub use frame_support::{ diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/mod.rs index e2a60e0b3004..b3841af0e6c3 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/mod.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. mod reserve_transfer; mod send; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 67fc53a826aa..51fac43be125 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::*; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs index fcaffdabc4cf..424d222bef38 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::*; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/set_xcm_versions.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/set_xcm_versions.rs index 576325271745..2720095aac00 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/set_xcm_versions.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/set_xcm_versions.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::*; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/swap.rs index 1c4dd9d76837..7d1615c9e291 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/swap.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::*; @@ -171,11 +170,9 @@ fn swap_locally_on_chain_using_foreign_assets() { .encode() .into(); - let buy_execution_fee_amount = - asset_hub_westend_runtime::constants::fee::WeightToFee::weight_to_fee(&Weight::from_parts( - 10_100_000_000_000, - 300_000, - )); + let buy_execution_fee_amount = parachains_common::westend::fee::WeightToFee::weight_to_fee( + &Weight::from_parts(10_100_000_000_000, 300_000), + ); let buy_execution_fee = MultiAsset { id: Concrete(MultiLocation { parents: 1, interior: Here }), fun: Fungible(buy_execution_fee_amount), diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs index 233dc32b13b5..8de73a7420c6 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #![allow(dead_code)] // diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/Cargo.toml index 12a2ed51591f..ee6896855549 100644 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/Cargo.toml @@ -3,6 +3,7 @@ name = "bridge-hub-rococo-integration-tests" version = "1.0.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" description = "Bridge Hub Rococo runtime integration tests with xcm-emulator" publish = false diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/lib.rs index 0a923ec04de7..122d65461159 100644 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/lib.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. pub use bp_messages::LaneId; pub use frame_support::assert_ok; diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/example.rs b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/example.rs index 777acd2aa972..f24e13bb71b8 100644 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/example.rs +++ b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/example.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::*; diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/mod.rs index 6e11743aecbf..48347557ae77 100644 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/mod.rs @@ -1,17 +1,16 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. mod example; diff --git a/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/Cargo.toml b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/Cargo.toml index ee0e254befc5..5a28b1274151 100644 --- a/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/Cargo.toml @@ -3,6 +3,7 @@ name = "collectives-polkadot-integration-tests" version = "0.1.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" description = "Polkadot Collectives parachain runtime integration tests based on xcm-emulator" publish = false diff --git a/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/lib.rs index ad2b5a501117..aa716c7c9485 100644 --- a/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/lib.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. pub use codec::Encode; pub use frame_support::{assert_ok, sp_runtime::AccountId32}; diff --git a/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/fellowship.rs b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/fellowship.rs index 82e998f5a76c..c08a660205f6 100644 --- a/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/fellowship.rs +++ b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/fellowship.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Integration tests concerning the Fellowship. diff --git a/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/mod.rs index a9445ac8ec7e..fb3e235a25cd 100644 --- a/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/collectives/collectives-polkadot/src/tests/mod.rs @@ -1,17 +1,16 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. mod fellowship; diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index a8002158dbc1..ac1e650d5de7 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -3,6 +3,7 @@ name = "integration-tests-common" version = "1.0.0" authors.workspace = true edition.workspace = true +license = "Apache-2.0" description = "Common resources for integration testing with xcm-emulator" publish = false diff --git a/cumulus/parachains/integration-tests/emulated/common/src/constants.rs b/cumulus/parachains/integration-tests/emulated/common/src/constants.rs index fedc46fabfff..8725ebd140b9 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/constants.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/constants.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // Substrate use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; @@ -632,7 +631,7 @@ pub mod rococo { pub mod asset_hub_polkadot { use super::*; pub const PARA_ID: u32 = 1000; - pub const ED: Balance = asset_hub_polkadot_runtime::constants::currency::EXISTENTIAL_DEPOSIT; + pub const ED: Balance = parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; pub fn genesis() -> Storage { let genesis_config = asset_hub_polkadot_runtime::RuntimeGenesisConfig { @@ -689,7 +688,7 @@ pub mod asset_hub_polkadot { pub mod asset_hub_westend { use super::*; pub const PARA_ID: u32 = 1000; - pub const ED: Balance = asset_hub_westend_runtime::constants::currency::EXISTENTIAL_DEPOSIT; + pub const ED: Balance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; pub fn genesis() -> Storage { let genesis_config = asset_hub_westend_runtime::RuntimeGenesisConfig { @@ -746,7 +745,7 @@ pub mod asset_hub_westend { pub mod asset_hub_kusama { use super::*; pub const PARA_ID: u32 = 1000; - pub const ED: Balance = asset_hub_kusama_runtime::constants::currency::EXISTENTIAL_DEPOSIT; + pub const ED: Balance = parachains_common::kusama::currency::EXISTENTIAL_DEPOSIT; pub fn genesis() -> Storage { let genesis_config = asset_hub_kusama_runtime::RuntimeGenesisConfig { @@ -864,7 +863,7 @@ pub mod penpal { pub mod collectives { use super::*; pub const PARA_ID: u32 = 1001; - pub const ED: Balance = collectives_polkadot_runtime::constants::currency::EXISTENTIAL_DEPOSIT; + pub const ED: Balance = parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; pub fn genesis() -> Storage { let genesis_config = collectives_polkadot_runtime::RuntimeGenesisConfig { @@ -921,7 +920,7 @@ pub mod collectives { pub mod bridge_hub_kusama { use super::*; pub const PARA_ID: u32 = 1002; - pub const ED: Balance = bridge_hub_kusama_runtime::constants::currency::EXISTENTIAL_DEPOSIT; + pub const ED: Balance = parachains_common::kusama::currency::EXISTENTIAL_DEPOSIT; pub fn genesis() -> Storage { let genesis_config = bridge_hub_kusama_runtime::RuntimeGenesisConfig { @@ -978,7 +977,7 @@ pub mod bridge_hub_kusama { pub mod bridge_hub_polkadot { use super::*; pub const PARA_ID: u32 = 1002; - pub const ED: Balance = bridge_hub_polkadot_runtime::constants::currency::EXISTENTIAL_DEPOSIT; + pub const ED: Balance = parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; pub fn genesis() -> Storage { let genesis_config = bridge_hub_polkadot_runtime::RuntimeGenesisConfig { @@ -1035,7 +1034,7 @@ pub mod bridge_hub_polkadot { pub mod bridge_hub_rococo { use super::*; pub const PARA_ID: u32 = 1013; - pub const ED: Balance = bridge_hub_rococo_runtime::constants::currency::EXISTENTIAL_DEPOSIT; + pub const ED: Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; pub fn genesis() -> Storage { let genesis_config = bridge_hub_rococo_runtime::RuntimeGenesisConfig { diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index f13da0016205..eed61d941711 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. pub use codec::{Decode, Encode}; pub use paste; diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 49751136f7ba..7461165f2a19 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -1,18 +1,17 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. pub mod constants; pub mod impls; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/constants.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/constants.rs deleted file mode 100644 index 8daf8fda4b4a..000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/constants.rs +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod currency { - use kusama_runtime_constants as constants; - use polkadot_core_primitives::Balance; - - /// The existential deposit. Set to 1/10 of its parent Relay Chain. - pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; - - pub const UNITS: Balance = constants::currency::UNITS; - pub const CENTS: Balance = constants::currency::CENTS; - pub const GRAND: Balance = constants::currency::GRAND; - pub const MILLICENTS: Balance = constants::currency::MILLICENTS; - - pub const fn deposit(items: u32, bytes: u32) -> Balance { - // map to 1/100 of what the kusama relay chain charges (v9020) - constants::currency::deposit(items, bytes) / 100 - } -} - -/// Fee-related. -pub mod fee { - use frame_support::weights::{ - constants::ExtrinsicBaseWeight, FeePolynomial, Weight, WeightToFeeCoefficient, - WeightToFeeCoefficients, WeightToFeePolynomial, - }; - use polkadot_core_primitives::Balance; - use smallvec::smallvec; - pub use sp_runtime::Perbill; - - /// The block saturation level. Fees will be updates based on this value. - pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25); - - /// Handles converting a weight scalar to a fee value, based on the scale and granularity of the - /// node's balance type. - /// - /// This should typically create a mapping between the following ranges: - /// - [0, MAXIMUM_BLOCK_WEIGHT] - /// - [Balance::min, Balance::max] - /// - /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: - /// - Setting it to `0` will essentially disable the weight fee. - /// - Setting it to `1` will cause the literal `#[weight = x]` values to be charged. - pub struct WeightToFee; - impl frame_support::weights::WeightToFee for WeightToFee { - type Balance = Balance; - - fn weight_to_fee(weight: &Weight) -> Self::Balance { - let time_poly: FeePolynomial = RefTimeToFee::polynomial().into(); - let proof_poly: FeePolynomial = ProofSizeToFee::polynomial().into(); - - // Take the maximum instead of the sum to charge by the more scarce resource. - time_poly.eval(weight.ref_time()).max(proof_poly.eval(weight.proof_size())) - } - } - - /// Maps the reference time component of `Weight` to a fee. - pub struct RefTimeToFee; - impl WeightToFeePolynomial for RefTimeToFee { - type Balance = Balance; - fn polynomial() -> WeightToFeeCoefficients { - // in Kusama, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: - // in Asset Hub, we map to 1/10 of that, or 1/100 CENT - let p = super::currency::CENTS; - let q = 100 * Balance::from(ExtrinsicBaseWeight::get().ref_time()); - - smallvec![WeightToFeeCoefficient { - degree: 1, - negative: false, - coeff_frac: Perbill::from_rational(p % q, q), - coeff_integer: p / q, - }] - } - } - - /// Maps the proof size component of `Weight` to a fee. - pub struct ProofSizeToFee; - impl WeightToFeePolynomial for ProofSizeToFee { - type Balance = Balance; - fn polynomial() -> WeightToFeeCoefficients { - // Map 10kb proof to 1 CENT. - let p = super::currency::CENTS; - let q = 10_000; - - smallvec![WeightToFeeCoefficient { - degree: 1, - negative: false, - coeff_frac: Perbill::from_rational(p % q, q), - coeff_integer: p / q, - }] - } - } -} - -/// Consensus-related. -pub mod consensus { - /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included - /// into the relay chain. - pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; - /// How many parachain blocks are processed by the relay chain per parent. Limits the - /// number of blocks authored per slot. - pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; - /// Relay chain slot duration, in milliseconds. - pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index afccc5c068b5..828d1b4750a3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -24,7 +24,6 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -pub mod constants; mod weights; pub mod xcm_config; @@ -50,7 +49,6 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use codec::{Decode, Encode, MaxEncodedLen}; -use constants::{consensus::*, currency::*, fee::WeightToFee}; use frame_support::{ construct_runtime, dispatch::DispatchClass, @@ -70,8 +68,10 @@ use pallet_asset_conversion_tx_payment::AssetConversionAdapter; use pallet_nfts::PalletFeatures; pub use parachains_common as common; use parachains_common::{ - impls::DealWithFees, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, BlockNumber, - Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, + impls::DealWithFees, + kusama::{consensus::*, currency::*, fee::WeightToFee}, + AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, BlockNumber, Hash, Header, Nonce, + Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, }; use sp_runtime::RuntimeDebug; @@ -1395,8 +1395,9 @@ fn ensure_key_ss58() { #[cfg(test)] mod tests { - use super::{constants::fee, *}; + use super::*; use crate::{CENTS, MILLICENTS}; + use parachains_common::kusama::fee; use sp_runtime::traits::Zero; use sp_weights::WeightToFee; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs index 6d9eccdf3744..7d49b56e461a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs @@ -21,7 +21,6 @@ use asset_hub_kusama_runtime::xcm_config::{ AssetFeeAsExistentialDepositMultiplierFeeCharger, KsmLocation, TrustBackedAssetsPalletLocation, }; pub use asset_hub_kusama_runtime::{ - constants::fee::WeightToFee, xcm_config::{CheckingAccount, ForeignCreatorsSovereignAccountOf, XcmConfig}, AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, @@ -35,7 +34,9 @@ use frame_support::{ traits::fungibles::InspectEnumerable, weights::{Weight, WeightToFee as WeightToFeeT}, }; -use parachains_common::{AccountId, AssetIdForTrustBackedAssets, AuraId, Balance}; +use parachains_common::{ + kusama::fee::WeightToFee, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, +}; use sp_runtime::traits::MaybeEquivalence; use xcm::latest::prelude::*; use xcm_executor::traits::{Identity, JustTry, WeightTrader}; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/constants.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/constants.rs deleted file mode 100644 index d430e38f1af1..000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/constants.rs +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod currency { - use polkadot_core_primitives::Balance; - use polkadot_runtime_constants as constants; - - /// The existential deposit. Set to 1/10 of its parent Relay Chain. - pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; - - pub const UNITS: Balance = constants::currency::UNITS; - pub const DOLLARS: Balance = constants::currency::DOLLARS; - pub const CENTS: Balance = constants::currency::CENTS; - pub const MILLICENTS: Balance = constants::currency::MILLICENTS; - - pub const fn deposit(items: u32, bytes: u32) -> Balance { - // 1/100 of Polkadot - constants::currency::deposit(items, bytes) / 100 - } -} - -/// Fee-related. -pub mod fee { - use frame_support::weights::{ - constants::ExtrinsicBaseWeight, FeePolynomial, WeightToFeeCoefficient, - WeightToFeeCoefficients, WeightToFeePolynomial, - }; - use polkadot_core_primitives::Balance; - use smallvec::smallvec; - pub use sp_runtime::Perbill; - use sp_weights::Weight; - - /// The block saturation level. Fees will be updates based on this value. - pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25); - - /// Handles converting a weight scalar to a fee value, based on the scale and granularity of the - /// node's balance type. - /// - /// This should typically create a mapping between the following ranges: - /// - [0, MAXIMUM_BLOCK_WEIGHT] - /// - [Balance::min, Balance::max] - /// - /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: - /// - Setting it to `0` will essentially disable the weight fee. - /// - Setting it to `1` will cause the literal `#[weight = x]` values to be charged. - pub struct WeightToFee; - impl frame_support::weights::WeightToFee for WeightToFee { - type Balance = Balance; - - fn weight_to_fee(weight: &Weight) -> Self::Balance { - let time_poly: FeePolynomial = RefTimeToFee::polynomial().into(); - let proof_poly: FeePolynomial = ProofSizeToFee::polynomial().into(); - - // Take the maximum instead of the sum to charge by the more scarce resource. - time_poly.eval(weight.ref_time()).max(proof_poly.eval(weight.proof_size())) - } - } - - /// Maps the reference time component of `Weight` to a fee. - pub struct RefTimeToFee; - impl WeightToFeePolynomial for RefTimeToFee { - type Balance = Balance; - fn polynomial() -> WeightToFeeCoefficients { - // in Polkadot, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: - // in Asset Hub, we map to 1/10 of that, or 1/100 CENT - let p = super::currency::CENTS; - let q = 100 * Balance::from(ExtrinsicBaseWeight::get().ref_time()); - - smallvec![WeightToFeeCoefficient { - degree: 1, - negative: false, - coeff_frac: Perbill::from_rational(p % q, q), - coeff_integer: p / q, - }] - } - } - - /// Maps the proof size component of `Weight` to a fee. - pub struct ProofSizeToFee; - impl WeightToFeePolynomial for ProofSizeToFee { - type Balance = Balance; - fn polynomial() -> WeightToFeeCoefficients { - // Map 10kb proof to 1 CENT. - let p = super::currency::CENTS; - let q = 10_000; - - smallvec![WeightToFeeCoefficient { - degree: 1, - negative: false, - coeff_frac: Perbill::from_rational(p % q, q), - coeff_integer: p / q, - }] - } - } -} - -/// Consensus-related. -pub mod consensus { - /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included - /// into the relay chain. - pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; - /// How many parachain blocks are processed by the relay chain per parent. Limits the - /// number of blocks authored per slot. - pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; - /// Relay chain slot duration, in milliseconds. - pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs index 7275209802f1..0051af21f9a3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs @@ -59,7 +59,6 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -pub mod constants; mod weights; pub mod xcm_config; @@ -82,7 +81,6 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use codec::{Decode, Encode, MaxEncodedLen}; -use constants::{consensus::*, currency::*, fee::WeightToFee}; use frame_support::{ construct_runtime, dispatch::DispatchClass, @@ -102,6 +100,7 @@ use pallet_nfts::PalletFeatures; pub use parachains_common as common; use parachains_common::{ impls::{AssetsToBlockAuthor, DealWithFees}, + polkadot::{consensus::*, currency::*, fee::WeightToFee}, AccountId, AssetHubPolkadotAuraId as AuraId, AssetIdForTrustBackedAssets, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, @@ -1228,8 +1227,9 @@ cumulus_pallet_parachain_system::register_validate_block! { #[cfg(test)] mod tests { - use super::{constants::fee, *}; + use super::*; use crate::{CENTS, MILLICENTS}; + use parachains_common::polkadot::fee; use sp_runtime::traits::Zero; use sp_weights::WeightToFee; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs index 3eab6723ec20..7200ebc16a28 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs @@ -22,10 +22,9 @@ use asset_hub_polkadot_runtime::xcm_config::{ ForeignCreatorsSovereignAccountOf, TrustBackedAssetsPalletLocation, XcmConfig, }; pub use asset_hub_polkadot_runtime::{ - constants::fee::WeightToFee, AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, - ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, - MetadataDepositPerByte, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, - System, TrustBackedAssetsInstance, + AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, + ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, + RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, }; use asset_test_utils::{CollatorSessionKeys, ExtBuilder}; use codec::{Decode, Encode}; @@ -36,7 +35,8 @@ use frame_support::{ weights::{Weight, WeightToFee as WeightToFeeT}, }; use parachains_common::{ - AccountId, AssetHubPolkadotAuraId as AuraId, AssetIdForTrustBackedAssets, Balance, + polkadot::fee::WeightToFee, AccountId, AssetHubPolkadotAuraId as AuraId, + AssetIdForTrustBackedAssets, Balance, }; use sp_runtime::traits::MaybeEquivalence; use xcm::latest::prelude::*; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index b5f33e4e0145..4887fce1b0a4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -24,7 +24,6 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -pub mod constants; mod weights; pub mod xcm_config; @@ -36,7 +35,6 @@ use assets_common::{ AssetIdForTrustBackedAssetsConvert, }; use codec::{Decode, Encode, MaxEncodedLen}; -use constants::{consensus::*, currency::*, fee::WeightToFee}; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use frame_support::{ construct_runtime, @@ -57,8 +55,10 @@ use pallet_asset_conversion_tx_payment::AssetConversionAdapter; use pallet_nfts::PalletFeatures; pub use parachains_common as common; use parachains_common::{ - impls::DealWithFees, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, BlockNumber, - Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, + impls::DealWithFees, + westend::{consensus::*, currency::*, fee::WeightToFee}, + AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, BlockNumber, Hash, Header, Nonce, + Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, }; use sp_api::impl_runtime_apis; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index b2bb511182eb..599ff90e254a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -17,13 +17,6 @@ //! Tests for the Westmint (Westend Assets Hub) chain. -pub use asset_hub_westend_runtime::{ - constants::fee::WeightToFee, - xcm_config::{CheckingAccount, TrustBackedAssetsPalletLocation, XcmConfig}, - AllowMultiAssetPools, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, - ForeignAssetsInstance, ParachainSystem, Runtime, SessionKeys, System, - TrustBackedAssetsInstance, -}; use asset_hub_westend_runtime::{ xcm_config::{ AssetFeeAsExistentialDepositMultiplierFeeCharger, ForeignCreatorsSovereignAccountOf, @@ -32,6 +25,12 @@ use asset_hub_westend_runtime::{ AllPalletsWithoutSystem, MetadataDepositBase, MetadataDepositPerByte, RuntimeCall, RuntimeEvent, }; +pub use asset_hub_westend_runtime::{ + xcm_config::{CheckingAccount, TrustBackedAssetsPalletLocation, XcmConfig}, + AllowMultiAssetPools, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, + ForeignAssetsInstance, ParachainSystem, Runtime, SessionKeys, System, + TrustBackedAssetsInstance, +}; use asset_test_utils::{CollatorSessionKeys, ExtBuilder, XcmReceivedFrom}; use codec::{Decode, DecodeLimit, Encode}; use cumulus_primitives_utility::ChargeWeightInFungibles; @@ -40,7 +39,9 @@ use frame_support::{ traits::fungibles::InspectEnumerable, weights::{Weight, WeightToFee as WeightToFeeT}, }; -use parachains_common::{AccountId, AssetIdForTrustBackedAssets, AuraId, Balance}; +use parachains_common::{ + westend::fee::WeightToFee, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, +}; use sp_io; use sp_runtime::traits::MaybeEquivalence; use std::convert::Into; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/constants.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/constants.rs deleted file mode 100644 index 760bf7fb6d1f..000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/constants.rs +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod currency { - use kusama_runtime_constants as constants; - use polkadot_core_primitives::Balance; - - /// The existential deposit. Set to 1/10 of its parent Relay Chain. - pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; - - pub const UNITS: Balance = constants::currency::UNITS; - pub const CENTS: Balance = constants::currency::CENTS; - pub const MILLICENTS: Balance = constants::currency::MILLICENTS; - - pub const fn deposit(items: u32, bytes: u32) -> Balance { - // map to 1/100 of what the kusama relay chain charges (v9020) - constants::currency::deposit(items, bytes) / 100 - } -} - -/// Fee-related. -pub mod fee { - use frame_support::{ - pallet_prelude::Weight, - weights::{ - constants::ExtrinsicBaseWeight, FeePolynomial, WeightToFeeCoefficient, - WeightToFeeCoefficients, WeightToFeePolynomial, - }, - }; - use polkadot_core_primitives::Balance; - use smallvec::smallvec; - pub use sp_runtime::Perbill; - - /// Handles converting a weight scalar to a fee value, based on the scale and granularity of the - /// node's balance type. - /// - /// This should typically create a mapping between the following ranges: - /// - [0, MAXIMUM_BLOCK_WEIGHT] - /// - [Balance::min, Balance::max] - /// - /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: - /// - Setting it to `0` will essentially disable the weight fee. - /// - Setting it to `1` will cause the literal `#[weight = x]` values to be charged. - pub struct WeightToFee; - impl frame_support::weights::WeightToFee for WeightToFee { - type Balance = Balance; - - fn weight_to_fee(weight: &Weight) -> Self::Balance { - let time_poly: FeePolynomial = RefTimeToFee::polynomial().into(); - let proof_poly: FeePolynomial = ProofSizeToFee::polynomial().into(); - - // Take the maximum instead of the sum to charge by the more scarce resource. - time_poly.eval(weight.ref_time()).max(proof_poly.eval(weight.proof_size())) - } - } - - /// Maps the reference time component of `Weight` to a fee. - pub struct RefTimeToFee; - impl WeightToFeePolynomial for RefTimeToFee { - type Balance = Balance; - fn polynomial() -> WeightToFeeCoefficients { - // in Kusama, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: - // in Bridge Hub, we map to 1/10 of that, or 1/100 CENT - let p = super::currency::CENTS; - let q = 100 * Balance::from(ExtrinsicBaseWeight::get().ref_time()); - - smallvec![WeightToFeeCoefficient { - degree: 1, - negative: false, - coeff_frac: Perbill::from_rational(p % q, q), - coeff_integer: p / q, - }] - } - } - - /// Maps the proof size component of `Weight` to a fee. - pub struct ProofSizeToFee; - impl WeightToFeePolynomial for ProofSizeToFee { - type Balance = Balance; - fn polynomial() -> WeightToFeeCoefficients { - // Map 10kb proof to 1 CENT. - let p = super::currency::CENTS; - let q = 10_000; - - smallvec![WeightToFeeCoefficient { - degree: 1, - negative: false, - coeff_frac: Perbill::from_rational(p % q, q), - coeff_integer: p / q, - }] - } - } -} - -/// Consensus-related. -pub mod consensus { - /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included - /// into the relay chain. - pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; - /// How many parachain blocks are processed by the relay chain per parent. Limits the - /// number of blocks authored per slot. - pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; - /// Relay chain slot duration, in milliseconds. - pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs index 044ff845fe66..54b15e6b327b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs @@ -22,7 +22,6 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -pub mod constants; mod weights; pub mod xcm_config; @@ -41,7 +40,6 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use constants::{consensus::*, currency::*, fee::WeightToFee}; use frame_support::{ construct_runtime, dispatch::DispatchClass, @@ -69,8 +67,10 @@ use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; use parachains_common::{ - impls::DealWithFees, AccountId, Balance, BlockNumber, Hash, Header, Nonce, Signature, - AVERAGE_ON_INITIALIZE_RATIO, HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, + impls::DealWithFees, + kusama::{consensus::*, currency::*, fee::WeightToFee}, + AccountId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, + HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, }; // XCM Imports diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs index 5418e36bd120..893524e12f66 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs @@ -15,12 +15,12 @@ // along with Cumulus. If not, see . pub use bridge_hub_kusama_runtime::{ - constants::fee::WeightToFee, xcm_config::XcmConfig, AllPalletsWithoutSystem, Balances, - ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeEvent, SessionKeys, + xcm_config::XcmConfig, AllPalletsWithoutSystem, Balances, ExistentialDeposit, ParachainSystem, + PolkadotXcm, Runtime, RuntimeEvent, SessionKeys, }; use codec::Decode; use frame_support::parameter_types; -use parachains_common::{AccountId, AuraId}; +use parachains_common::{kusama::fee::WeightToFee, AccountId, AuraId}; const ALICE: [u8; 32] = [1u8; 32]; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/constants.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/constants.rs deleted file mode 100644 index 3bab7bd1eb33..000000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/constants.rs +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod currency { - use polkadot_core_primitives::Balance; - use polkadot_runtime_constants as constants; - - /// The existential deposit. Set to 1/10 of its parent Relay Chain. - pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; - - pub const UNITS: Balance = constants::currency::UNITS; - pub const CENTS: Balance = constants::currency::CENTS; - pub const MILLICENTS: Balance = constants::currency::MILLICENTS; - - pub const fn deposit(items: u32, bytes: u32) -> Balance { - // 1/100 of Polkadot - constants::currency::deposit(items, bytes) / 100 - } -} - -/// Fee-related. -pub mod fee { - use frame_support::{ - pallet_prelude::Weight, - weights::{ - constants::ExtrinsicBaseWeight, FeePolynomial, WeightToFeeCoefficient, - WeightToFeeCoefficients, WeightToFeePolynomial, - }, - }; - use polkadot_core_primitives::Balance; - use smallvec::smallvec; - pub use sp_runtime::Perbill; - - /// Handles converting a weight scalar to a fee value, based on the scale and granularity of the - /// node's balance type. - /// - /// This should typically create a mapping between the following ranges: - /// - [0, MAXIMUM_BLOCK_WEIGHT] - /// - [Balance::min, Balance::max] - /// - /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: - /// - Setting it to `0` will essentially disable the weight fee. - /// - Setting it to `1` will cause the literal `#[weight = x]` values to be charged. - pub struct WeightToFee; - impl frame_support::weights::WeightToFee for WeightToFee { - type Balance = Balance; - - fn weight_to_fee(weight: &Weight) -> Self::Balance { - let time_poly: FeePolynomial = RefTimeToFee::polynomial().into(); - let proof_poly: FeePolynomial = ProofSizeToFee::polynomial().into(); - - // Take the maximum instead of the sum to charge by the more scarce resource. - time_poly.eval(weight.ref_time()).max(proof_poly.eval(weight.proof_size())) - } - } - - /// Maps the reference time component of `Weight` to a fee. - pub struct RefTimeToFee; - impl WeightToFeePolynomial for RefTimeToFee { - type Balance = Balance; - fn polynomial() -> WeightToFeeCoefficients { - // in Polkadot, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: - // in Bridge Hub, we map to 1/10 of that, or 1/100 CENT - let p = super::currency::CENTS; - let q = 100 * Balance::from(ExtrinsicBaseWeight::get().ref_time()); - - smallvec![WeightToFeeCoefficient { - degree: 1, - negative: false, - coeff_frac: Perbill::from_rational(p % q, q), - coeff_integer: p / q, - }] - } - } - - /// Maps the proof size component of `Weight` to a fee. - pub struct ProofSizeToFee; - impl WeightToFeePolynomial for ProofSizeToFee { - type Balance = Balance; - fn polynomial() -> WeightToFeeCoefficients { - // Map 10kb proof to 1 CENT. - let p = super::currency::CENTS; - let q = 10_000; - - smallvec![WeightToFeeCoefficient { - degree: 1, - negative: false, - coeff_frac: Perbill::from_rational(p % q, q), - coeff_integer: p / q, - }] - } - } -} - -/// Consensus-related. -pub mod consensus { - /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included - /// into the relay chain. - pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; - /// How many parachain blocks are processed by the relay chain per parent. Limits the - /// number of blocks authored per slot. - pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; - /// Relay chain slot duration, in milliseconds. - pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs index f735858a9340..dbfdc249a3cd 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs @@ -22,7 +22,6 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -pub mod constants; mod weights; pub mod xcm_config; @@ -41,7 +40,6 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use constants::{consensus::*, currency::*, fee::WeightToFee}; use frame_support::{ construct_runtime, dispatch::DispatchClass, @@ -70,8 +68,10 @@ use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; use parachains_common::{ - impls::DealWithFees, AccountId, Balance, BlockNumber, Hash, Header, Nonce, Signature, - AVERAGE_ON_INITIALIZE_RATIO, HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, + impls::DealWithFees, + polkadot::{consensus::*, currency::*, fee::WeightToFee}, + AccountId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, + HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, }; // XCM Imports use xcm::latest::prelude::BodyId; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs index 03b23cdd7ac2..0be87bd46fac 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs @@ -15,12 +15,12 @@ // along with Cumulus. If not, see . pub use bridge_hub_polkadot_runtime::{ - constants::fee::WeightToFee, xcm_config::XcmConfig, AllPalletsWithoutSystem, Balances, - ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeEvent, SessionKeys, + xcm_config::XcmConfig, AllPalletsWithoutSystem, Balances, ExistentialDeposit, ParachainSystem, + PolkadotXcm, Runtime, RuntimeEvent, SessionKeys, }; use codec::Decode; use frame_support::parameter_types; -use parachains_common::{AccountId, AuraId}; +use parachains_common::{polkadot::fee::WeightToFee, AccountId, AuraId}; const ALICE: [u8; 32] = [1u8; 32]; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index a872e2327303..4311a6a629f9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -24,11 +24,9 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod bridge_hub_rococo_config; pub mod bridge_hub_wococo_config; -pub mod constants; mod weights; pub mod xcm_config; -use constants::{consensus::*, currency::*}; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; @@ -79,7 +77,6 @@ use crate::{ BridgeRefundBridgeHubRococoMessages, OnBridgeHubWococoBlobDispatcher, WithBridgeHubRococoMessageBridge, }, - constants::fee::WeightToFee, xcm_config::XcmRouter, }; use bridge_runtime_common::{ @@ -87,8 +84,10 @@ use bridge_runtime_common::{ messages_xcm_extension::{XcmAsPlainPayload, XcmBlobMessageDispatch}, }; use parachains_common::{ - impls::DealWithFees, AccountId, Balance, BlockNumber, Hash, Header, Nonce, Signature, - AVERAGE_ON_INITIALIZE_RATIO, HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, + impls::DealWithFees, + rococo::{consensus::*, currency::*, fee::WeightToFee}, + AccountId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, + HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, }; use xcm_executor::XcmExecutor; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 77e7e0382d31..e5fe67f2a8e5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -19,7 +19,6 @@ use bp_polkadot_core::Signature; use bridge_hub_rococo_runtime::{ bridge_hub_rococo_config, bridge_hub_wococo_config, - constants::fee::WeightToFee, xcm_config::{RelayNetwork, XcmConfig}, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, DeliveryRewardInBalance, Executive, ExistentialDeposit, ParachainSystem, PolkadotXcm, RequiredStakeForStakeAndSlash, @@ -28,7 +27,7 @@ use bridge_hub_rococo_runtime::{ use codec::{Decode, Encode}; use frame_support::parameter_types; use frame_system::pallet_prelude::HeaderFor; -use parachains_common::{AccountId, AuraId, Balance}; +use parachains_common::{rococo::fee::WeightToFee, AccountId, AuraId, Balance}; use sp_keyring::AccountKeyring::Alice; use sp_runtime::{ generic::{Era, SignedPayload}, diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/mod.rs index 9b8675331295..b97e44dda1be 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/mod.rs @@ -20,7 +20,7 @@ pub(crate) mod migration; mod origins; mod tracks; use crate::{ - constants, impls::ToParentTreasury, weights, AccountId, Balance, Balances, FellowshipReferenda, + impls::ToParentTreasury, weights, AccountId, Balance, Balances, FellowshipReferenda, GovernanceLocation, PolkadotTreasuryAccount, Preimage, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, Scheduler, DAYS, }; @@ -36,6 +36,7 @@ pub use origins::{ }; use pallet_ranked_collective::EnsureOfRank; use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use parachains_common::polkadot::account; use polkadot_runtime_constants::{time::HOURS, xcm::body::FELLOWSHIP_ADMIN_INDEX}; use sp_core::{ConstU128, ConstU32}; use sp_runtime::traits::{AccountIdConversion, ConstU16, ConvertToValue, Replace, TakeFirst}; @@ -62,7 +63,7 @@ pub mod ranks { parameter_types! { // Referenda pallet account, used to temporarily deposit slashed imbalance before teleporting. - pub ReferendaPalletAccount: AccountId = constants::account::REFERENDA_PALLET_ID.into_account_truncating(); + pub ReferendaPalletAccount: AccountId = account::REFERENDA_PALLET_ID.into_account_truncating(); pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); } diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs index 5033a2d8beb1..238db08a0c9e 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs @@ -36,7 +36,6 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -pub mod constants; pub mod impls; mod weights; pub mod xcm_config; @@ -64,7 +63,6 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use codec::{Decode, Encode, MaxEncodedLen}; -use constants::{consensus::*, currency::*, fee::WeightToFee}; use frame_support::{ construct_runtime, dispatch::DispatchClass, @@ -79,7 +77,9 @@ use frame_system::{ }; pub use parachains_common as common; use parachains_common::{ - impls::DealWithFees, AccountId, AuraId, Balance, BlockNumber, Hash, Header, Nonce, Signature, + impls::DealWithFees, + polkadot::{account::*, consensus::*, currency::*, fee::WeightToFee}, + AccountId, AuraId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, MINUTES, NORMAL_DISPATCH_RATIO, SLOT_DURATION, }; @@ -484,8 +484,8 @@ parameter_types! { pub const AllyDeposit: Balance = 1_000 * UNITS; // 1,000 DOT bond to join as an Ally // The Alliance pallet account, used as a temporary place to deposit a slashed imbalance // before the teleport to the Treasury. - pub AlliancePalletAccount: AccountId = constants::account::ALLIANCE_PALLET_ID.into_account_truncating(); - pub PolkadotTreasuryAccount: AccountId = constants::account::POLKADOT_TREASURY_PALLET_ID.into_account_truncating(); + pub AlliancePalletAccount: AccountId = ALLIANCE_PALLET_ID.into_account_truncating(); + pub PolkadotTreasuryAccount: AccountId = POLKADOT_TREASURY_PALLET_ID.into_account_truncating(); // The number of blocks a member must wait between giving a retirement notice and retiring. // Supposed to be greater than time required to `kick_member` with alliance motion. pub const AllianceRetirementPeriod: BlockNumber = (90 * DAYS) + ALLIANCE_MOTION_DURATION; diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs index 6598fd3fae0a..1c99393d5e52 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs @@ -14,8 +14,8 @@ // limitations under the License. use crate::{ - constants::currency::deposit, Balance, Balances, RandomnessCollectiveFlip, Runtime, - RuntimeCall, RuntimeEvent, RuntimeHoldReason, Timestamp, + Balance, Balances, RandomnessCollectiveFlip, Runtime, RuntimeCall, RuntimeEvent, + RuntimeHoldReason, Timestamp, }; use frame_support::{ parameter_types, @@ -28,7 +28,7 @@ use pallet_contracts::{ }; use sp_runtime::Perbill; -pub use parachains_common::AVERAGE_ON_INITIALIZE_RATIO; +pub use parachains_common::{rococo::currency::deposit, AVERAGE_ON_INITIALIZE_RATIO}; // Prints debug output of the `contracts` pallet to stdout if the node is // started with `-lruntime::contracts=debug`. diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index b5815ab057e6..399ada1be2c7 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -25,7 +25,6 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -pub mod constants; mod contracts; mod weights; mod xcm_config; @@ -45,7 +44,6 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use constants::{consensus::*, currency::*, fee::WeightToFee}; use frame_support::{ construct_runtime, dispatch::DispatchClass, @@ -57,9 +55,10 @@ use frame_support::{ use frame_system::limits::{BlockLength, BlockWeights}; pub use parachains_common as common; use parachains_common::{ - impls::DealWithFees, AccountId, BlockNumber, Hash, Header, Nonce, Signature, - AVERAGE_ON_INITIALIZE_RATIO, MAXIMUM_BLOCK_WEIGHT, MINUTES, NORMAL_DISPATCH_RATIO, - SLOT_DURATION, + impls::DealWithFees, + rococo::{consensus::*, currency::*, fee::WeightToFee}, + AccountId, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, + MAXIMUM_BLOCK_WEIGHT, MINUTES, NORMAL_DISPATCH_RATIO, SLOT_DURATION, }; pub use parachains_common::{AuraId, Balance}; use xcm_config::CollatorSelectionUpdateOrigin; diff --git a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs index c1fb60374aef..c1edeb98cd0a 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs @@ -32,11 +32,11 @@ pub type AssetHubWestendChainSpec = sc_service::GenericChainSpec; const ASSET_HUB_POLKADOT_ED: AssetHubBalance = - asset_hub_polkadot_runtime::constants::currency::EXISTENTIAL_DEPOSIT; + parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; const ASSET_HUB_KUSAMA_ED: AssetHubBalance = - asset_hub_kusama_runtime::constants::currency::EXISTENTIAL_DEPOSIT; + parachains_common::kusama::currency::EXISTENTIAL_DEPOSIT; const ASSET_HUB_WESTEND_ED: AssetHubBalance = - asset_hub_westend_runtime::constants::currency::EXISTENTIAL_DEPOSIT; + parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; /// Generate the session keys from individual elements. /// diff --git a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs index 4cb81f57081a..5de4a49f8275 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs @@ -192,7 +192,7 @@ pub mod rococo { pub(crate) const BRIDGE_HUB_ROCOCO_LOCAL: &str = "bridge-hub-rococo-local"; pub(crate) const BRIDGE_HUB_ROCOCO_DEVELOPMENT: &str = "bridge-hub-rococo-dev"; const BRIDGE_HUB_ROCOCO_ED: BridgeHubBalance = - bridge_hub_rococo_runtime::constants::currency::EXISTENTIAL_DEPOSIT; + parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; /// Specialized `ChainSpec` for the normal parachain runtime. pub type BridgeHubChainSpec = @@ -372,7 +372,7 @@ pub mod kusama { pub(crate) const BRIDGE_HUB_KUSAMA_LOCAL: &str = "bridge-hub-kusama-local"; pub(crate) const BRIDGE_HUB_KUSAMA_DEVELOPMENT: &str = "bridge-hub-kusama-dev"; const BRIDGE_HUB_KUSAMA_ED: BridgeHubBalance = - bridge_hub_kusama_runtime::constants::currency::EXISTENTIAL_DEPOSIT; + parachains_common::kusama::currency::EXISTENTIAL_DEPOSIT; /// Specialized `ChainSpec` for the normal parachain runtime. pub type BridgeHubChainSpec = @@ -509,7 +509,7 @@ pub mod polkadot { pub(crate) const BRIDGE_HUB_POLKADOT_LOCAL: &str = "bridge-hub-polkadot-local"; pub(crate) const BRIDGE_HUB_POLKADOT_DEVELOPMENT: &str = "bridge-hub-polkadot-dev"; const BRIDGE_HUB_POLKADOT_ED: BridgeHubBalance = - bridge_hub_polkadot_runtime::constants::currency::EXISTENTIAL_DEPOSIT; + parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; /// Specialized `ChainSpec` for the normal parachain runtime. pub type BridgeHubChainSpec = diff --git a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs index 6126fbb114f0..fbf49b9535a4 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs @@ -26,7 +26,7 @@ pub type CollectivesPolkadotChainSpec = sc_service::GenericChainSpec; const COLLECTIVES_POLKADOT_ED: CollectivesBalance = - collectives_polkadot_runtime::constants::currency::EXISTENTIAL_DEPOSIT; + parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; /// Generate the session keys from individual elements. /// diff --git a/cumulus/polkadot-parachain/src/chain_spec/contracts.rs b/cumulus/polkadot-parachain/src/chain_spec/contracts.rs index bf318e448f0e..0d5012858edc 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/contracts.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/contracts.rs @@ -31,7 +31,7 @@ const CONTRACTS_PARACHAIN_ID: u32 = 1002; /// The existential deposit is determined by the runtime "contracts-rococo". const CONTRACTS_ROCOCO_ED: contracts_rococo_runtime::Balance = - contracts_rococo_runtime::constants::currency::EXISTENTIAL_DEPOSIT; + parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; pub fn contracts_rococo_development_config() -> ContractsRococoChainSpec { let mut properties = sc_chain_spec::Properties::new(); From 711132c961b1f62b4f1bf23aa015728bab41a5d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 09:27:19 +0200 Subject: [PATCH 10/28] Bump thiserror from 1.0.47 to 1.0.48 (#1396) Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.47 to 1.0.48. - [Release notes](https://github.com/dtolnay/thiserror/releases) - [Commits](https://github.com/dtolnay/thiserror/compare/1.0.47...1.0.48) --- updated-dependencies: - dependency-name: thiserror dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Liam Aharon --- Cargo.lock | 8 ++++---- cumulus/client/consensus/proposer/Cargo.toml | 2 +- cumulus/client/relay-chain-interface/Cargo.toml | 2 +- cumulus/client/relay-chain-rpc-interface/Cargo.toml | 2 +- polkadot/cli/Cargo.toml | 2 +- polkadot/erasure-coding/Cargo.toml | 2 +- polkadot/node/collation-generation/Cargo.toml | 2 +- polkadot/node/core/approval-voting/Cargo.toml | 2 +- polkadot/node/core/av-store/Cargo.toml | 2 +- polkadot/node/core/backing/Cargo.toml | 2 +- polkadot/node/core/bitfield-signing/Cargo.toml | 2 +- polkadot/node/core/chain-selection/Cargo.toml | 2 +- polkadot/node/core/dispute-coordinator/Cargo.toml | 2 +- polkadot/node/core/parachains-inherent/Cargo.toml | 2 +- polkadot/node/core/prospective-parachains/Cargo.toml | 2 +- polkadot/node/core/provisioner/Cargo.toml | 2 +- polkadot/node/core/pvf-checker/Cargo.toml | 2 +- polkadot/node/jaeger/Cargo.toml | 2 +- .../node/network/availability-distribution/Cargo.toml | 2 +- polkadot/node/network/availability-recovery/Cargo.toml | 2 +- polkadot/node/network/collator-protocol/Cargo.toml | 2 +- polkadot/node/network/dispute-distribution/Cargo.toml | 2 +- polkadot/node/network/protocol/Cargo.toml | 2 +- polkadot/node/network/statement-distribution/Cargo.toml | 2 +- polkadot/node/primitives/Cargo.toml | 2 +- polkadot/node/service/Cargo.toml | 2 +- polkadot/node/subsystem-types/Cargo.toml | 2 +- polkadot/node/subsystem-util/Cargo.toml | 2 +- polkadot/node/test/performance-test/Cargo.toml | 2 +- polkadot/node/zombienet-backchannel/Cargo.toml | 2 +- polkadot/utils/staking-miner/Cargo.toml | 2 +- substrate/client/allocator/Cargo.toml | 2 +- substrate/client/api/Cargo.toml | 2 +- substrate/client/cli/Cargo.toml | 2 +- substrate/client/consensus/common/Cargo.toml | 2 +- substrate/client/executor/common/Cargo.toml | 2 +- substrate/client/service/Cargo.toml | 2 +- substrate/client/storage-monitor/Cargo.toml | 2 +- substrate/client/sync-state-rpc/Cargo.toml | 2 +- substrate/client/telemetry/Cargo.toml | 2 +- substrate/client/tracing/Cargo.toml | 2 +- substrate/client/transaction-pool/Cargo.toml | 2 +- substrate/client/transaction-pool/api/Cargo.toml | 2 +- substrate/primitives/api/Cargo.toml | 2 +- substrate/primitives/blockchain/Cargo.toml | 2 +- substrate/primitives/consensus/common/Cargo.toml | 2 +- substrate/primitives/core/Cargo.toml | 2 +- substrate/primitives/inherents/Cargo.toml | 2 +- substrate/primitives/state-machine/Cargo.toml | 2 +- substrate/primitives/timestamp/Cargo.toml | 2 +- substrate/primitives/trie/Cargo.toml | 2 +- substrate/primitives/version/Cargo.toml | 2 +- substrate/utils/frame/benchmarking-cli/Cargo.toml | 2 +- 53 files changed, 56 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 38d0a3388d33..c254302517f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18661,9 +18661,9 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.47" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" +checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" dependencies = [ "thiserror-impl", ] @@ -18690,9 +18690,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "1.0.47" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" +checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index f7edbc695e38..29720a8f4791 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -8,7 +8,7 @@ edition.workspace = true [dependencies] anyhow = "1.0" async-trait = "0.1.73" -thiserror = "1.0.47" +thiserror = "1.0.48" # Substrate sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index b81cc1b47807..3da7ab0b0e82 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -16,6 +16,6 @@ sc-client-api = { path = "../../../substrate/client/api" } futures = "0.3.28" async-trait = "0.1.73" -thiserror = "1.0.47" +thiserror = "1.0.48" jsonrpsee-core = "0.16.2" parity-scale-codec = "3.6.4" diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 9797c512505c..305ab82b064c 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -38,6 +38,6 @@ schnellru = "0.2.1" smoldot = { version = "0.11.0", default_features = false, features = ["std"]} smoldot-light = { version = "0.9.0", default_features = false, features = ["std"] } either = "1.8.1" -thiserror = "1.0.38" +thiserror = "1.0.48" rand = "0.8.5" pin-project = "1.1.3" diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index bcb4f2ac1308..c8c3f91f9835 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -17,7 +17,7 @@ crate-type = ["cdylib", "rlib"] [dependencies] clap = { version = "4.4.2", features = ["derive"], optional = true } log = "0.4.17" -thiserror = "1.0.31" +thiserror = "1.0.48" futures = "0.3.21" pyro = { package = "pyroscope", version = "0.5.3", optional = true } pyroscope_pprofrs = { version = "0.2", optional = true } diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index f74a88038825..d07b77ec4ddf 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -12,7 +12,7 @@ novelpoly = { package = "reed-solomon-novelpoly", version = "1.0.0" } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["std", "derive"] } sp-core = { path = "../../substrate/primitives/core" } sp-trie = { path = "../../substrate/primitives/trie" } -thiserror = "1.0.31" +thiserror = "1.0.48" [dev-dependencies] criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support"] } diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index e2870dc2cc8a..b110540140f9 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -15,7 +15,7 @@ polkadot-node-subsystem-util = { path = "../subsystem-util" } polkadot-primitives = { path = "../../primitives" } sp-core = { path = "../../../substrate/primitives/core" } sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } -thiserror = "1.0.31" +thiserror = "1.0.48" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } [dev-dependencies] diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index 307d9947a961..acad0d1fa4e4 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -16,7 +16,7 @@ merlin = "2.0" schnorrkel = "0.9.1" kvdb = "0.13.0" derive_more = "0.99.17" -thiserror = "1.0.31" +thiserror = "1.0.48" polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index efbbb27754e2..955fe37d7c39 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -9,7 +9,7 @@ license.workspace = true futures = "0.3.21" futures-timer = "3.0.2" kvdb = "0.13.0" -thiserror = "1.0.31" +thiserror = "1.0.48" gum = { package = "tracing-gum", path = "../../gum" } bitvec = "1.0.0" diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index 0005f6f6a30d..e7e6358e8a46 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -16,7 +16,7 @@ erasure-coding = { package = "polkadot-erasure-coding", path = "../../../erasure statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } gum = { package = "tracing-gum", path = "../../gum" } -thiserror = "1.0.31" +thiserror = "1.0.48" fatality = "0.0.6" [dev-dependencies] diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index c2df6cc709e6..de38d18d9706 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -13,7 +13,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } wasm-timer = "0.2.5" -thiserror = "1.0.31" +thiserror = "1.0.48" [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index 57ba7908315b..7678379870e0 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -15,7 +15,7 @@ polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } kvdb = "0.13.0" -thiserror = "1.0.31" +thiserror = "1.0.48" parity-scale-codec = "3.6.1" [dev-dependencies] diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index c49bd507127d..6061c52b7e81 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -10,7 +10,7 @@ futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "3.6.1" kvdb = "0.13.0" -thiserror = "1.0.31" +thiserror = "1.0.48" schnellru = "0.2.1" fatality = "0.0.6" diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index 515d70dad827..18d91dcfb565 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -9,7 +9,7 @@ license.workspace = true futures = "0.3.21" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } -thiserror = "1.0.31" +thiserror = "1.0.48" async-trait = "0.1.57" polkadot-node-subsystem = { path = "../../subsystem" } polkadot-overseer = { path = "../../overseer" } diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 9fa17ec0c154..77a59d87f3fa 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -9,7 +9,7 @@ license.workspace = true futures = "0.3.19" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "3.6.4" -thiserror = "1.0.30" +thiserror = "1.0.48" fatality = "0.0.6" bitvec = "1" diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index dc7914171668..05ea92caa976 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -9,7 +9,7 @@ license.workspace = true bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } -thiserror = "1.0.31" +thiserror = "1.0.48" polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml index 783ac19009a3..0326a20e5a52 100644 --- a/polkadot/node/core/pvf-checker/Cargo.toml +++ b/polkadot/node/core/pvf-checker/Cargo.toml @@ -8,7 +8,7 @@ license.workspace = true [dependencies] futures = "0.3.21" -thiserror = "1.0.31" +thiserror = "1.0.48" gum = { package = "tracing-gum", path = "../../gum" } polkadot-node-primitives = { path = "../../primitives" } diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml index 7b4b5e1c8bce..fcfbbaec611e 100644 --- a/polkadot/node/jaeger/Cargo.toml +++ b/polkadot/node/jaeger/Cargo.toml @@ -14,7 +14,7 @@ polkadot-primitives = { path = "../../primitives" } polkadot-node-primitives = { path = "../primitives" } sc-network = { path = "../../../substrate/client/network" } sp-core = { path = "../../../substrate/primitives/core" } -thiserror = "1.0.31" +thiserror = "1.0.48" tokio = "1.24.2" log = "0.4.17" parity-scale-codec = { version = "3.6.1", default-features = false } diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 581192e9560f..c3c7aa4e0ea5 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -17,7 +17,7 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-primitives = { path = "../../primitives" } sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } sp-keystore = { path = "../../../../substrate/primitives/keystore" } -thiserror = "1.0.31" +thiserror = "1.0.48" rand = "0.8.5" derive_more = "0.99.17" schnellru = "0.2.1" diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index bf95fb1e9f4c..07ff09c7e70e 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -10,7 +10,7 @@ futures = "0.3.21" schnellru = "0.2.1" rand = "0.8.5" fatality = "0.0.6" -thiserror = "1.0.31" +thiserror = "1.0.48" gum = { package = "tracing-gum", path = "../../gum" } polkadot-erasure-coding = { path = "../../../erasure-coding" } diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index ac6284b305bd..e5328cf16629 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -21,7 +21,7 @@ polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-subsystem = { path = "../../subsystem" } fatality = "0.0.6" -thiserror = "1.0.31" +thiserror = "1.0.48" tokio-util = "0.7.1" [dev-dependencies] diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index ece89f34c88d..5d8e245d289a 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -20,7 +20,7 @@ polkadot-node-primitives = { path = "../../primitives" } sc-network = { path = "../../../../substrate/client/network" } sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } -thiserror = "1.0.31" +thiserror = "1.0.48" fatality = "0.0.6" schnellru = "0.2.1" indexmap = "1.9.1" diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index 2a56f197b854..c33b9eae3252 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -18,7 +18,7 @@ sc-network = { path = "../../../../substrate/client/network" } sc-authority-discovery = { path = "../../../../substrate/client/authority-discovery" } strum = { version = "0.24", features = ["derive"] } futures = "0.3.21" -thiserror = "1.0.31" +thiserror = "1.0.48" fatality = "0.0.6" rand = "0.8" derive_more = "0.99" diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index 5ff1ba9de02b..bf516e7b7ba9 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -21,7 +21,7 @@ polkadot-node-network-protocol = { path = "../protocol" } arrayvec = "0.7.4" indexmap = "1.9.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -thiserror = "1.0.31" +thiserror = "1.0.48" fatality = "0.0.6" bitvec = "1" diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index ef03c02f7bc6..55dfa6738709 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -19,7 +19,7 @@ sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compres sp-runtime = { path = "../../../substrate/primitives/runtime" } polkadot-parachain-primitives = { path = "../../parachain", default-features = false } schnorrkel = "0.9.1" -thiserror = "1.0.31" +thiserror = "1.0.48" serde = { version = "1.0.188", features = ["derive"] } [target.'cfg(not(target_os = "unknown"))'.dependencies] diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 90881aa051a0..03814af19cd2 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -79,7 +79,7 @@ hex-literal = "0.4.1" gum = { package = "tracing-gum", path = "../gum" } serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.96" -thiserror = "1.0.31" +thiserror = "1.0.48" kvdb = "0.13.0" kvdb-rocksdb = { version = "0.19.0", optional = true } parity-db = { version = "0.4.8", optional = true } diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index 7ca3a0faf31a..317b079a316c 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -22,5 +22,5 @@ sp-authority-discovery = { path = "../../../substrate/primitives/authority-disco sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } smallvec = "1.8.0" substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } -thiserror = "1.0.31" +thiserror = "1.0.48" async-trait = "0.1.57" diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index d243a90a2bd6..204df0dad117 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -15,7 +15,7 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ parking_lot = "0.11.2" pin-project = "1.0.9" rand = "0.8.5" -thiserror = "1.0.31" +thiserror = "1.0.48" fatality = "0.0.6" gum = { package = "tracing-gum", path = "../gum" } derive_more = "0.99.17" diff --git a/polkadot/node/test/performance-test/Cargo.toml b/polkadot/node/test/performance-test/Cargo.toml index 98b67615a6f2..5747ac88b1e4 100644 --- a/polkadot/node/test/performance-test/Cargo.toml +++ b/polkadot/node/test/performance-test/Cargo.toml @@ -7,7 +7,7 @@ edition.workspace = true license.workspace = true [dependencies] -thiserror = "1.0.31" +thiserror = "1.0.48" quote = "1.0.28" env_logger = "0.9" log = "0.4" diff --git a/polkadot/node/zombienet-backchannel/Cargo.toml b/polkadot/node/zombienet-backchannel/Cargo.toml index 3d59a4a3cddc..9bf56b550bbc 100644 --- a/polkadot/node/zombienet-backchannel/Cargo.toml +++ b/polkadot/node/zombienet-backchannel/Cargo.toml @@ -16,7 +16,7 @@ futures-util = "0.3.23" lazy_static = "1.4.0" parity-scale-codec = { version = "3.6.1", features = ["derive"] } reqwest = { version = "0.11", features = ["rustls-tls"], default-features = false } -thiserror = "1.0.31" +thiserror = "1.0.48" gum = { package = "tracing-gum", path = "../gum" } serde = { version = "1.0", features = ["derive"] } serde_json = "1" diff --git a/polkadot/utils/staking-miner/Cargo.toml b/polkadot/utils/staking-miner/Cargo.toml index 09e73bc10f2d..4b012e3ac73f 100644 --- a/polkadot/utils/staking-miner/Cargo.toml +++ b/polkadot/utils/staking-miner/Cargo.toml @@ -19,7 +19,7 @@ log = "0.4.17" paste = "1.0.7" serde = "1.0.188" serde_json = "1.0" -thiserror = "1.0.31" +thiserror = "1.0.48" tokio = { version = "1.24.2", features = ["macros", "rt-multi-thread", "sync"] } remote-externalities = { package = "frame-remote-externalities" , path = "../../../substrate/utils/frame/remote-externalities" } signal-hook-tokio = { version = "0.3", features = ["futures-v0_3"] } diff --git a/substrate/client/allocator/Cargo.toml b/substrate/client/allocator/Cargo.toml index ffbfe14e86c2..31c714180ce5 100644 --- a/substrate/client/allocator/Cargo.toml +++ b/substrate/client/allocator/Cargo.toml @@ -15,6 +15,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.17" -thiserror = "1.0.30" +thiserror = "1.0.48" sp-core = { path = "../../primitives/core" } sp-wasm-interface = { path = "../../primitives/wasm-interface" } diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index 43545095c0ea..b59149424ed3 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -37,6 +37,6 @@ sp-statement-store = { path = "../../primitives/statement-store" } sp-storage = { path = "../../primitives/storage" } [dev-dependencies] -thiserror = "1.0.30" +thiserror = "1.0.48" sp-test-primitives = { path = "../../primitives/test-primitives" } substrate-test-runtime = { path = "../../test-utils/runtime" } diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index 917cdc04d1d9..06967a890931 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -27,7 +27,7 @@ regex = "1.6.0" rpassword = "7.0.0" serde = "1.0.188" serde_json = "1.0.85" -thiserror = "1.0.30" +thiserror = "1.0.48" tiny-bip39 = "1.0.0" tokio = { version = "1.22.0", features = ["signal", "rt-multi-thread", "parking_lot"] } sc-client-api = { path = "../api" } diff --git a/substrate/client/consensus/common/Cargo.toml b/substrate/client/consensus/common/Cargo.toml index c9b3f221ecc0..f269e3752d43 100644 --- a/substrate/client/consensus/common/Cargo.toml +++ b/substrate/client/consensus/common/Cargo.toml @@ -21,7 +21,7 @@ log = "0.4.17" mockall = "0.11.3" parking_lot = "0.12.1" serde = { version = "1.0", features = ["derive"] } -thiserror = "1.0.30" +thiserror = "1.0.48" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } sc-utils = { path = "../../utils" } diff --git a/substrate/client/executor/common/Cargo.toml b/substrate/client/executor/common/Cargo.toml index e84b9f9c85b8..5118279b43b4 100644 --- a/substrate/client/executor/common/Cargo.toml +++ b/substrate/client/executor/common/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -thiserror = "1.0.30" +thiserror = "1.0.48" wasm-instrument = "0.3" sc-allocator = { path = "../../allocator" } sp-maybe-compressed-blob = { path = "../../../primitives/maybe-compressed-blob" } diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index 6f794d93fed3..87b341fe3123 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -26,7 +26,7 @@ runtime-benchmarks = [ [dependencies] jsonrpsee = { version = "0.16.2", features = ["server"] } -thiserror = "1.0.30" +thiserror = "1.0.48" futures = "0.3.21" rand = "0.8.5" parking_lot = "0.12.1" diff --git a/substrate/client/storage-monitor/Cargo.toml b/substrate/client/storage-monitor/Cargo.toml index c5b71260f97e..5354819e3a38 100644 --- a/substrate/client/storage-monitor/Cargo.toml +++ b/substrate/client/storage-monitor/Cargo.toml @@ -15,4 +15,4 @@ fs4 = "0.6.3" sc-client-db = { path = "../db", default-features = false} sp-core = { path = "../../primitives/core" } tokio = "1.22.0" -thiserror = "1.0.30" +thiserror = "1.0.48" diff --git a/substrate/client/sync-state-rpc/Cargo.toml b/substrate/client/sync-state-rpc/Cargo.toml index 59cc6ba40481..88d268cc93d9 100644 --- a/substrate/client/sync-state-rpc/Cargo.toml +++ b/substrate/client/sync-state-rpc/Cargo.toml @@ -16,7 +16,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.85" -thiserror = "1.0.30" +thiserror = "1.0.48" sc-chain-spec = { path = "../chain-spec" } sc-client-api = { path = "../api" } sc-consensus-babe = { path = "../consensus/babe" } diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml index 153629129091..2be5ad5c1435 100644 --- a/substrate/client/telemetry/Cargo.toml +++ b/substrate/client/telemetry/Cargo.toml @@ -24,5 +24,5 @@ sc-utils = { path = "../utils" } rand = "0.8.5" serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.85" -thiserror = "1.0.30" +thiserror = "1.0.48" wasm-timer = "0.2.5" diff --git a/substrate/client/tracing/Cargo.toml b/substrate/client/tracing/Cargo.toml index c9cd6ca313cd..ffcbf0749083 100644 --- a/substrate/client/tracing/Cargo.toml +++ b/substrate/client/tracing/Cargo.toml @@ -23,7 +23,7 @@ parking_lot = "0.12.1" regex = "1.6.0" rustc-hash = "1.1.0" serde = "1.0.188" -thiserror = "1.0.30" +thiserror = "1.0.48" tracing = "0.1.29" tracing-log = "0.1.3" tracing-subscriber = { version = "0.2.25", features = ["parking_lot"] } diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index 0e502cb39fba..b893dc839edd 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -21,7 +21,7 @@ linked-hash-map = "0.5.4" log = "0.4.17" parking_lot = "0.12.1" serde = { version = "1.0.188", features = ["derive"] } -thiserror = "1.0.30" +thiserror = "1.0.48" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } sc-client-api = { path = "../api" } sc-transaction-pool-api = { path = "api" } diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index edab1304e01c..5ff5a4149ca9 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -14,7 +14,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" log = "0.4.17" serde = { version = "1.0.188", features = ["derive"] } -thiserror = "1.0.30" +thiserror = "1.0.48" sp-blockchain = { path = "../../../primitives/blockchain" } sp-core = { path = "../../../primitives/core", default-features = false} sp-runtime = { path = "../../../primitives/runtime", default-features = false} diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index c5611b22017c..95b5dde37139 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -23,7 +23,7 @@ sp-version = { path = "../version", default-features = false} sp-state-machine = { path = "../state-machine", default-features = false, optional = true} sp-trie = { path = "../trie", default-features = false, optional = true} hash-db = { version = "0.16.0", optional = true } -thiserror = { version = "1.0.30", optional = true } +thiserror = { version = "1.0.48", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } sp-metadata-ir = { path = "../metadata-ir", default-features = false, optional = true} log = { version = "0.4.17", default-features = false } diff --git a/substrate/primitives/blockchain/Cargo.toml b/substrate/primitives/blockchain/Cargo.toml index 418f94589857..33db09ce0ac2 100644 --- a/substrate/primitives/blockchain/Cargo.toml +++ b/substrate/primitives/blockchain/Cargo.toml @@ -19,7 +19,7 @@ futures = "0.3.21" log = "0.4.17" parking_lot = "0.12.1" schnellru = "0.2.1" -thiserror = "1.0.30" +thiserror = "1.0.48" sp-api = { path = "../api" } sp-consensus = { path = "../consensus/common" } sp-database = { path = "../database" } diff --git a/substrate/primitives/consensus/common/Cargo.toml b/substrate/primitives/consensus/common/Cargo.toml index 284e00b272e6..e8f6b806f8c6 100644 --- a/substrate/primitives/consensus/common/Cargo.toml +++ b/substrate/primitives/consensus/common/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = "0.1.57" futures = { version = "0.3.21", features = ["thread-pool"] } log = "0.4.17" -thiserror = "1.0.30" +thiserror = "1.0.48" sp-core = { path = "../../core" } sp-inherents = { path = "../../inherents" } sp-runtime = { path = "../../runtime" } diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index efcaad1a6f61..4e5186f16878 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -39,7 +39,7 @@ sp-storage = { path = "../storage", default-features = false} sp-externalities = { path = "../externalities", optional = true} futures = { version = "0.3.21", optional = true } dyn-clonable = { version = "0.9.0", optional = true } -thiserror = { version = "1.0.30", optional = true } +thiserror = { version = "1.0.48", optional = true } tracing = { version = "0.1.29", optional = true } bitflags = "1.3" paste = "1.0.7" diff --git a/substrate/primitives/inherents/Cargo.toml b/substrate/primitives/inherents/Cargo.toml index d3ac94aa5fb3..aa0aa95b3f8d 100644 --- a/substrate/primitives/inherents/Cargo.toml +++ b/substrate/primitives/inherents/Cargo.toml @@ -18,7 +18,7 @@ async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" -thiserror = { version = "1.0.30", optional = true } +thiserror = { version = "1.0.48", optional = true } sp-runtime = { path = "../runtime", default-features = false, optional = true} sp-std = { path = "../std", default-features = false} diff --git a/substrate/primitives/state-machine/Cargo.toml b/substrate/primitives/state-machine/Cargo.toml index 3ab21308c3c8..8546345e5cae 100644 --- a/substrate/primitives/state-machine/Cargo.toml +++ b/substrate/primitives/state-machine/Cargo.toml @@ -20,7 +20,7 @@ log = { version = "0.4.17", default-features = false } parking_lot = { version = "0.12.1", optional = true } rand = { version = "0.8.5", optional = true } smallvec = "1.11.0" -thiserror = { version = "1.0.30", optional = true } +thiserror = { version = "1.0.48", optional = true } tracing = { version = "0.1.29", optional = true } sp-core = { path = "../core", default-features = false} sp-externalities = { path = "../externalities", default-features = false} diff --git a/substrate/primitives/timestamp/Cargo.toml b/substrate/primitives/timestamp/Cargo.toml index 7de2a7d904d0..44b0fdd831c0 100644 --- a/substrate/primitives/timestamp/Cargo.toml +++ b/substrate/primitives/timestamp/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -thiserror = { version = "1.0.30", optional = true } +thiserror = { version = "1.0.48", optional = true } sp-inherents = { path = "../inherents", default-features = false} sp-runtime = { path = "../runtime", default-features = false} sp-std = { path = "../std", default-features = false} diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index 31eb009328c1..0bce597414b1 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -27,7 +27,7 @@ memory-db = { version = "0.32.0", default-features = false } nohash-hasher = { version = "0.2.0", optional = true } parking_lot = { version = "0.12.1", optional = true } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -thiserror = { version = "1.0.30", optional = true } +thiserror = { version = "1.0.48", optional = true } tracing = { version = "0.1.29", optional = true } trie-db = { version = "0.27.0", default-features = false } trie-root = { version = "0.18.0", default-features = false } diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index 3002566f74ff..1ab51a08bbe3 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -19,7 +19,7 @@ impl-serde = { version = "0.4.0", default-features = false, optional = true } parity-wasm = { version = "0.45", optional = true } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } -thiserror = { version = "1.0.30", optional = true } +thiserror = { version = "1.0.48", optional = true } sp-core-hashing-proc-macro = { path = "../core/hashing/proc-macro" } sp-runtime = { path = "../runtime", default-features = false} sp-std = { path = "../std", default-features = false} diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index 096e4be82d4b..85be451fa60c 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -28,7 +28,7 @@ rand = { version = "0.8.4", features = ["small_rng"] } rand_pcg = "0.3.1" serde = "1.0.188" serde_json = "1.0.85" -thiserror = "1.0.30" +thiserror = "1.0.48" thousands = "0.2.0" frame-benchmarking = { path = "../../../frame/benchmarking" } frame-support = { path = "../../../frame/support" } From adf847a582feac6bb654993e75331d63c109357d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 11:47:08 +0300 Subject: [PATCH 11/28] Bump actions/checkout from 3 to 4 (#1398) * Bump actions/checkout from 3 to 4 Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update fmt-check.yml * Update .github/workflows/check-licenses.yml Co-authored-by: Chevdor * Update .github/workflows/check-markdown.yml Co-authored-by: Chevdor * Update .github/workflows/fmt-check.yml Co-authored-by: Chevdor --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sergejs Kostjucenko <85877331+sergejparity@users.noreply.github.com> Co-authored-by: Chevdor --- .github/workflows/check-licenses.yml | 2 +- .github/workflows/check-markdown.yml | 2 +- .github/workflows/fmt-check.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index 3da699a354f6..4d0afefc47aa 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -14,7 +14,7 @@ jobs: NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - uses: actions/setup-node@v3.8.1 with: node-version: "18.x" diff --git a/.github/workflows/check-markdown.yml b/.github/workflows/check-markdown.yml index b386fd6d1b1e..f1e46ca27351 100644 --- a/.github/workflows/check-markdown.yml +++ b/.github/workflows/check-markdown.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - uses: actions/setup-node@v3.8.1 with: diff --git a/.github/workflows/fmt-check.yml b/.github/workflows/fmt-check.yml index fd4b72061b92..df785404036e 100644 --- a/.github/workflows/fmt-check.yml +++ b/.github/workflows/fmt-check.yml @@ -16,7 +16,7 @@ jobs: container: image: paritytech/ci-unified:bullseye-1.70.0-2023-05-23-v20230706 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Cargo fmt run: cargo +nightly fmt --all -- --check From 12194445b222ec4a31ef247d1bced9ba127d7d52 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Tue, 5 Sep 2023 12:34:50 +0300 Subject: [PATCH 12/28] Get rid of polling in `WarpSync` (#1265) --- polkadot/node/service/src/lib.rs | 2 +- substrate/bin/node/cli/src/service.rs | 3 +- .../client/network/common/src/sync/warp.rs | 13 +- substrate/client/network/sync/src/engine.rs | 118 ++++-- substrate/client/network/sync/src/lib.rs | 335 ++++++++++-------- substrate/client/network/sync/src/warp.rs | 95 +++-- substrate/client/network/test/src/lib.rs | 5 +- substrate/client/service/src/builder.rs | 5 +- substrate/client/service/src/lib.rs | 2 +- 9 files changed, 344 insertions(+), 234 deletions(-) diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 95c887947c98..7f4eadaba7f8 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -732,7 +732,7 @@ pub fn new_full( }: NewFullParams, ) -> Result { use polkadot_node_network_protocol::request_response::IncomingRequest; - use sc_network_common::sync::warp::WarpSyncParams; + use sc_network_sync::warp::WarpSyncParams; let is_offchain_indexing_enabled = config.offchain_worker.indexing_enabled; let role = config.role.clone(); diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index ecca5c60db51..e49c60fe2fb7 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -32,8 +32,7 @@ use sc_client_api::{Backend, BlockBackend}; use sc_consensus_babe::{self, SlotProportion}; use sc_executor::NativeElseWasmExecutor; use sc_network::{event::Event, NetworkEventStream, NetworkService}; -use sc_network_common::sync::warp::WarpSyncParams; -use sc_network_sync::SyncingService; +use sc_network_sync::{warp::WarpSyncParams, SyncingService}; use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; use sc_statement_store::Store as StatementStore; use sc_telemetry::{Telemetry, TelemetryWorker}; diff --git a/substrate/client/network/common/src/sync/warp.rs b/substrate/client/network/common/src/sync/warp.rs index 91d6c4151a42..f4e39f438512 100644 --- a/substrate/client/network/common/src/sync/warp.rs +++ b/substrate/client/network/common/src/sync/warp.rs @@ -15,10 +15,9 @@ // along with Substrate. If not, see . use codec::{Decode, Encode}; -use futures::channel::oneshot; pub use sp_consensus_grandpa::{AuthorityList, SetId}; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::{fmt, sync::Arc}; +use std::fmt; /// Scale-encoded warp sync proof response. pub struct EncodedProof(pub Vec); @@ -30,16 +29,6 @@ pub struct WarpProofRequest { pub begin: B::Hash, } -/// The different types of warp syncing. -pub enum WarpSyncParams { - /// Standard warp sync for the chain. - WithProvider(Arc>), - /// Skip downloading proofs and wait for a header of the state that should be downloaded. - /// - /// It is expected that the header provider ensures that the header is trusted. - WaitForTarget(oneshot::Receiver<::Header>), -} - /// Proof verification result. pub enum VerificationResult { /// Proof is valid, but the target was not reached. diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 9b97bf2b7c34..23847d16c972 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -24,11 +24,16 @@ use crate::{ BlockAnnounceValidationResult, BlockAnnounceValidator as BlockAnnounceValidatorStream, }, service::{self, chain_sync::ToServiceCommand}, + warp::WarpSyncParams, ChainSync, ClientError, SyncingService, }; use codec::{Decode, Encode}; -use futures::{FutureExt, StreamExt}; +use futures::{ + channel::oneshot, + future::{BoxFuture, Fuse}, + FutureExt, StreamExt, +}; use futures_timer::Delay; use libp2p::PeerId; use prometheus_endpoint::{ @@ -47,7 +52,6 @@ use sc_network_common::{ role::Roles, sync::{ message::{BlockAnnounce, BlockAnnouncesHandshake, BlockState}, - warp::WarpSyncParams, BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, SyncEvent, }, }; @@ -67,6 +71,9 @@ use std::{ time::{Duration, Instant}, }; +/// Log target for this file. +const LOG_TARGET: &'static str = "sync"; + /// Interval at which we perform time based maintenance const TICK_TIMEOUT: std::time::Duration = std::time::Duration::from_millis(1100); @@ -251,6 +258,10 @@ pub struct SyncingEngine { /// The `PeerId`'s of all boot nodes. boot_node_ids: HashSet, + /// A channel to get target block header if we skip over proofs downloading during warp sync. + warp_sync_target_block_header_rx: + Fuse>>, + /// Protocol name used for block announcements block_announce_protocol_name: ProtocolName, @@ -299,7 +310,11 @@ where let max_blocks_per_request = if net_config.network_config.max_blocks_per_request > crate::MAX_BLOCKS_IN_RESPONSE as u32 { - log::info!(target: "sync", "clamping maximum blocks per request to {}", crate::MAX_BLOCKS_IN_RESPONSE); + log::info!( + target: LOG_TARGET, + "clamping maximum blocks per request to {}", + crate::MAX_BLOCKS_IN_RESPONSE, + ); crate::MAX_BLOCKS_IN_RESPONSE as u32 } else { net_config.network_config.max_blocks_per_request @@ -352,6 +367,19 @@ where total.saturating_sub(net_config.network_config.default_peers_set_num_full) as usize }; + // Split warp sync params into warp sync config and a channel to retreive target block + // header. + let (warp_sync_config, warp_sync_target_block_header_rx) = + warp_sync_params.map_or((None, None), |params| { + let (config, target_block_rx) = params.split(); + (Some(config), target_block_rx) + }); + + // Make sure polling of the target block channel is a no-op if there is no block to + // retrieve. + let warp_sync_target_block_header_rx = warp_sync_target_block_header_rx + .map_or(futures::future::pending().boxed().fuse(), |rx| rx.boxed().fuse()); + let (chain_sync, block_announce_config) = ChainSync::new( mode, client.clone(), @@ -360,7 +388,7 @@ where roles, max_parallel_downloads, max_blocks_per_request, - warp_sync_params, + warp_sync_config, metrics_registry, network_service.clone(), import_queue, @@ -404,6 +432,7 @@ where genesis_hash, important_peers, default_peers_set_no_slot_connected_peers: HashSet::new(), + warp_sync_target_block_header_rx, boot_node_ids, default_peers_set_no_slot_peers, default_peers_set_num_full, @@ -418,7 +447,7 @@ where match Metrics::register(r, is_major_syncing.clone()) { Ok(metrics) => Some(metrics), Err(err) => { - log::error!(target: "sync", "Failed to register metrics {err:?}"); + log::error!(target: LOG_TARGET, "Failed to register metrics {err:?}"); None }, } @@ -510,7 +539,10 @@ where let peer = match self.peers.get_mut(&peer_id) { Some(p) => p, None => { - log::error!(target: "sync", "Received block announce from disconnected peer {}", peer_id); + log::error!( + target: LOG_TARGET, + "Received block announce from disconnected peer {peer_id}", + ); debug_assert!(false); return }, @@ -536,11 +568,11 @@ where let header = match self.client.header(hash) { Ok(Some(header)) => header, Ok(None) => { - log::warn!(target: "sync", "Trying to announce unknown block: {}", hash); + log::warn!(target: LOG_TARGET, "Trying to announce unknown block: {hash}"); return }, Err(e) => { - log::warn!(target: "sync", "Error reading block header {}: {}", hash, e); + log::warn!(target: LOG_TARGET, "Error reading block header {hash}: {e}"); return }, }; @@ -551,7 +583,7 @@ where } let is_best = self.client.info().best_hash == hash; - log::debug!(target: "sync", "Reannouncing block {:?} is_best: {}", hash, is_best); + log::debug!(target: LOG_TARGET, "Reannouncing block {hash:?} is_best: {is_best}"); let data = data .or_else(|| self.block_announce_data_cache.get(&hash).cloned()) @@ -560,7 +592,7 @@ where for (peer_id, ref mut peer) in self.peers.iter_mut() { let inserted = peer.known_blocks.insert(hash); if inserted { - log::trace!(target: "sync", "Announcing block {:?} to {}", hash, peer_id); + log::trace!(target: LOG_TARGET, "Announcing block {hash:?} to {peer_id}"); let message = BlockAnnounce { header: header.clone(), state: if is_best { Some(BlockState::Best) } else { Some(BlockState::Normal) }, @@ -575,7 +607,7 @@ where /// Inform sync about new best imported block. pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { - log::debug!(target: "sync", "New best block imported {:?}/#{}", hash, number); + log::debug!(target: LOG_TARGET, "New best block imported {hash:?}/#{number}"); self.chain_sync.update_chain_info(&hash, number); self.network_service.set_notification_handshake( @@ -619,7 +651,10 @@ where // consider it connected or are also all stalled. In order to unstall the node, // disconnect all peers and allow `ProtocolController` to establish new connections. if self.last_notification_io.elapsed() > INACTIVITY_EVICT_THRESHOLD { - log::debug!(target: "sync", "syncing has halted due to inactivity, evicting all peers"); + log::debug!( + target: LOG_TARGET, + "syncing has halted due to inactivity, evicting all peers", + ); for peer in self.peers.keys() { self.network_service.report_peer(*peer, rep::INACTIVE_SUBSTREAM); @@ -658,7 +693,10 @@ where ToServiceCommand::JustificationImported(peer_id, hash, number, success) => { self.chain_sync.on_justification_import(hash, number, success); if !success { - log::info!(target: "sync", "💔 Invalid justification provided by {} for #{}", peer_id, hash); + log::info!( + target: LOG_TARGET, + "💔 Invalid justification provided by {peer_id} for #{hash}", + ); self.network_service .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); self.network_service.report_peer( @@ -723,7 +761,7 @@ where }, Err(()) => { log::debug!( - target: "sync", + target: LOG_TARGET, "Failed to register peer {remote:?}: {received_handshake:?}", ); let _ = tx.send(false); @@ -732,7 +770,7 @@ where sc_network::SyncEvent::NotificationStreamClosed { remote } => { if self.on_sync_peer_disconnected(remote).is_err() { log::trace!( - target: "sync", + target: LOG_TARGET, "Disconnected peer which had earlier been refused by on_sync_peer_connected {}", remote ); @@ -749,9 +787,8 @@ where } } else { log::trace!( - target: "sync", - "Received sync for peer earlier refused by sync layer: {}", - remote + target: LOG_TARGET, + "Received sync for peer earlier refused by sync layer: {remote}", ); } } @@ -764,6 +801,21 @@ where } } + // Retreive warp sync target block header just before polling `ChainSync` + // to make progress as soon as we receive it. + match self.warp_sync_target_block_header_rx.poll_unpin(cx) { + Poll::Ready(Ok(target)) => { + self.chain_sync.set_warp_sync_target_block(target); + }, + Poll::Ready(Err(err)) => { + log::error!( + target: LOG_TARGET, + "Failed to get target block for warp sync. Error: {err:?}", + ); + }, + Poll::Pending => {}, + } + // Drive `ChainSync`. while let Poll::Ready(()) = self.chain_sync.poll(cx) {} @@ -784,9 +836,9 @@ where pub fn on_sync_peer_disconnected(&mut self, peer_id: PeerId) -> Result<(), ()> { if let Some(info) = self.peers.remove(&peer_id) { if self.important_peers.contains(&peer_id) { - log::warn!(target: "sync", "Reserved peer {} disconnected", peer_id); + log::warn!(target: LOG_TARGET, "Reserved peer {peer_id} disconnected"); } else { - log::debug!(target: "sync", "{} disconnected", peer_id); + log::debug!(target: LOG_TARGET, "{peer_id} disconnected"); } if !self.default_peers_set_no_slot_connected_peers.remove(&peer_id) && @@ -798,7 +850,7 @@ where }, None => { log::error!( - target: "sync", + target: LOG_TARGET, "trying to disconnect an inbound node which is not counted as inbound" ); debug_assert!(false); @@ -828,10 +880,13 @@ where sink: NotificationsSink, inbound: bool, ) -> Result<(), ()> { - log::trace!(target: "sync", "New peer {} {:?}", peer_id, status); + log::trace!(target: LOG_TARGET, "New peer {peer_id} {status:?}"); if self.peers.contains_key(&peer_id) { - log::error!(target: "sync", "Called on_sync_peer_connected with already connected peer {}", peer_id); + log::error!( + target: LOG_TARGET, + "Called on_sync_peer_connected with already connected peer {peer_id}", + ); debug_assert!(false); return Err(()) } @@ -841,7 +896,7 @@ where if self.important_peers.contains(&peer_id) { log::error!( - target: "sync", + target: LOG_TARGET, "Reserved peer id `{}` is on a different chain (our genesis: {} theirs: {})", peer_id, self.genesis_hash, @@ -849,7 +904,7 @@ where ); } else if self.boot_node_ids.contains(&peer_id) { log::error!( - target: "sync", + target: LOG_TARGET, "Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})", peer_id, self.genesis_hash, @@ -857,7 +912,7 @@ where ); } else { log::debug!( - target: "sync", + target: LOG_TARGET, "Peer is on different chain (our genesis: {} theirs: {})", self.genesis_hash, status.genesis_hash ); @@ -874,7 +929,10 @@ where status.roles.is_full() && inbound && self.num_in_peers == self.max_in_peers { - log::debug!(target: "sync", "All inbound slots have been consumed, rejecting {peer_id}"); + log::debug!( + target: LOG_TARGET, + "All inbound slots have been consumed, rejecting {peer_id}", + ); return Err(()) } @@ -884,7 +942,7 @@ where self.default_peers_set_no_slot_connected_peers.len() + this_peer_reserved_slot { - log::debug!(target: "sync", "Too many full nodes, rejecting {}", peer_id); + log::debug!(target: LOG_TARGET, "Too many full nodes, rejecting {peer_id}"); return Err(()) } @@ -892,7 +950,7 @@ where (self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light { // Make sure that not all slots are occupied by light clients. - log::debug!(target: "sync", "Too many light nodes, rejecting {}", peer_id); + log::debug!(target: LOG_TARGET, "Too many light nodes, rejecting {peer_id}"); return Err(()) } @@ -921,7 +979,7 @@ where None }; - log::debug!(target: "sync", "Connected {}", peer_id); + log::debug!(target: LOG_TARGET, "Connected {peer_id}"); self.peers.insert(peer_id, peer); diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index 0c2013b14977..df0ed2c45410 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -32,7 +32,7 @@ use crate::{ blocks::BlockCollection, schema::v1::{StateRequest, StateResponse}, state::StateSync, - warp::{WarpProofImportResult, WarpSync}, + warp::{WarpProofImportResult, WarpSync, WarpSyncConfig}, }; use codec::{Decode, DecodeAll, Encode}; @@ -61,7 +61,7 @@ use sc_network_common::{ BlockAnnounce, BlockAnnouncesHandshake, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, FromBlock, }, - warp::{EncodedProof, WarpProofRequest, WarpSyncParams, WarpSyncPhase, WarpSyncProgress}, + warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress}, BadPeer, ChainSync as ChainSyncT, ImportResult, Metrics, OnBlockData, OnBlockJustification, OnStateData, OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, PeerRequest, SyncMode, SyncState, SyncStatus, @@ -103,6 +103,9 @@ pub mod state_request_handler; pub mod warp; pub mod warp_request_handler; +/// Log target for this file. +const LOG_TARGET: &'static str = "sync"; + /// Maximum blocks to store in the import queue. const MAX_IMPORTING_BLOCKS: usize = 2048; @@ -302,10 +305,12 @@ pub struct ChainSync { state_sync: Option>, /// Warp sync in progress, if any. warp_sync: Option>, - /// Warp sync params. + /// Warp sync configuration. /// /// Will be `None` after `self.warp_sync` is `Some(_)`. - warp_sync_params: Option>, + warp_sync_config: Option>, + /// A temporary storage for warp sync target block until warp sync is initialized. + warp_sync_target_block_header: Option, /// Enable importing existing blocks. This is used used after the state download to /// catch up to the latest state while re-importing blocks. import_existing: bool, @@ -351,7 +356,7 @@ impl PeerSync { fn update_common_number(&mut self, new_common: NumberFor) { if self.common_number < new_common { trace!( - target: "sync", + target: LOG_TARGET, "Updating peer {} common number from={} => to={}.", self.peer_id, self.common_number, @@ -497,7 +502,7 @@ where // There is nothing sync can get from the node that has no blockchain data. match self.block_status(&best_hash) { Err(e) => { - debug!(target:"sync", "Error reading blockchain: {}", e); + debug!(target:LOG_TARGET, "Error reading blockchain: {e}"); Err(BadPeer(who, rep::BLOCKCHAIN_READ_ERROR)) }, Ok(BlockStatus::KnownBad) => { @@ -515,7 +520,7 @@ where // an ancestor search, which is what we do in the next match case below. if self.queue_blocks.len() > MAJOR_SYNC_BLOCKS.into() { debug!( - target:"sync", + target:LOG_TARGET, "New peer with unknown best hash {} ({}), assuming common block.", self.best_queued_hash, self.best_queued_number @@ -536,10 +541,8 @@ where // If we are at genesis, just start downloading. let (state, req) = if self.best_queued_number.is_zero() { debug!( - target:"sync", - "New peer with best hash {} ({}).", - best_hash, - best_number, + target:LOG_TARGET, + "New peer with best hash {best_hash} ({best_number}).", ); (PeerSyncState::Available, None) @@ -547,7 +550,7 @@ where let common_best = std::cmp::min(self.best_queued_number, best_number); debug!( - target:"sync", + target:LOG_TARGET, "New peer with unknown best hash {} ({}), searching for common ancestor.", best_hash, best_number @@ -578,9 +581,14 @@ where if let SyncMode::Warp = self.mode { if self.peers.len() >= MIN_PEERS_TO_START_WARP_SYNC && self.warp_sync.is_none() { - log::debug!(target: "sync", "Starting warp state sync."); - if let Some(params) = self.warp_sync_params.take() { - self.warp_sync = Some(WarpSync::new(self.client.clone(), params)); + log::debug!(target: LOG_TARGET, "Starting warp state sync."); + + if let Some(config) = self.warp_sync_config.take() { + let mut warp_sync = WarpSync::new(self.client.clone(), config); + if let Some(header) = self.warp_sync_target_block_header.take() { + warp_sync.set_target_block(header); + } + self.warp_sync = Some(warp_sync); } } } @@ -590,10 +598,8 @@ where Ok(BlockStatus::InChainWithState) | Ok(BlockStatus::InChainPruned) => { debug!( - target: "sync", - "New peer with known best hash {} ({}).", - best_hash, - best_number, + target: LOG_TARGET, + "New peer with known best hash {best_hash} ({best_number}).", ); self.peers.insert( who, @@ -642,21 +648,23 @@ where .collect(); debug!( - target: "sync", - "Explicit sync request for block {:?} with no peers specified. \ - Syncing from these peers {:?} instead.", - hash, peers, + target: LOG_TARGET, + "Explicit sync request for block {hash:?} with no peers specified. \ + Syncing from these peers {peers:?} instead.", ); } else { - debug!(target: "sync", "Explicit sync request for block {:?} with {:?}", hash, peers); + debug!( + target: LOG_TARGET, + "Explicit sync request for block {hash:?} with {peers:?}", + ); } if self.is_known(hash) { - debug!(target: "sync", "Refusing to sync known hash {:?}", hash); + debug!(target: LOG_TARGET, "Refusing to sync known hash {hash:?}"); return } - trace!(target: "sync", "Downloading requested old fork {:?}", hash); + trace!(target: LOG_TARGET, "Downloading requested old fork {hash:?}"); for peer_id in &peers { if let Some(peer) = self.peers.get_mut(peer_id) { if let PeerSyncState::AncestorSearch { .. } = peer.state { @@ -689,7 +697,7 @@ where let new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(who) { let mut blocks = response.blocks; if request.as_ref().map_or(false, |r| r.direction == Direction::Descending) { - trace!(target: "sync", "Reversing incoming block list"); + trace!(target: LOG_TARGET, "Reversing incoming block list"); blocks.reverse() } self.allowed_requests.add(who); @@ -740,17 +748,22 @@ where } }) .collect(); - debug!(target: "sync", "Drained {} gap blocks from {}", blocks.len(), gap_sync.best_queued_number); + debug!( + target: LOG_TARGET, + "Drained {} gap blocks from {}", + blocks.len(), + gap_sync.best_queued_number, + ); blocks } else { - debug!(target: "sync", "Unexpected gap block response from {}", who); + debug!(target: LOG_TARGET, "Unexpected gap block response from {who}"); return Err(BadPeer(*who, rep::NO_BLOCK)) } }, PeerSyncState::DownloadingStale(_) => { peer.state = PeerSyncState::Available; if blocks.is_empty() { - debug!(target: "sync", "Empty block response from {}", who); + debug!(target: LOG_TARGET, "Empty block response from {who}"); return Err(BadPeer(*who, rep::NO_BLOCK)) } validate_blocks::(&blocks, who, Some(request))?; @@ -779,7 +792,7 @@ where let matching_hash = match (blocks.get(0), self.client.hash(*current)) { (Some(block), Ok(maybe_our_block_hash)) => { trace!( - target: "sync", + target: LOG_TARGET, "Got ancestry block #{} ({}) from peer {}", current, block.hash, @@ -789,17 +802,15 @@ where }, (None, _) => { debug!( - target: "sync", - "Invalid response when searching for ancestor from {}", - who, + target: LOG_TARGET, + "Invalid response when searching for ancestor from {who}", ); return Err(BadPeer(*who, rep::UNKNOWN_ANCESTOR)) }, (_, Err(e)) => { info!( - target: "sync", - "❌ Error answering legitimate blockchain query: {}", - e, + target: LOG_TARGET, + "❌ Error answering legitimate blockchain query: {e}", ); return Err(BadPeer(*who, rep::BLOCKCHAIN_READ_ERROR)) }, @@ -817,7 +828,10 @@ where } } if matching_hash.is_none() && current.is_zero() { - trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who); + trace!( + target:LOG_TARGET, + "Ancestry search: genesis mismatch for peer {who}", + ); return Err(BadPeer(*who, rep::GENESIS_MISMATCH)) } if let Some((next_state, next_num)) = @@ -833,7 +847,7 @@ where // Ancestry search is complete. Check if peer is on a stale fork unknown // to us and add it to sync targets if necessary. trace!( - target: "sync", + target: LOG_TARGET, "Ancestry search complete. Ours={} ({}), Theirs={} ({}), Common={:?} ({})", self.best_queued_hash, self.best_queued_number, @@ -846,7 +860,7 @@ where peer.best_number < self.best_queued_number { trace!( - target: "sync", + target: LOG_TARGET, "Added fork target {} for {}", peer.best_hash, who, @@ -879,11 +893,11 @@ where return Err(BadPeer(*who, rep::VERIFICATION_FAIL)), } } else if blocks.is_empty() { - debug!(target: "sync", "Empty block response from {}", who); + debug!(target: LOG_TARGET, "Empty block response from {who}"); return Err(BadPeer(*who, rep::NO_BLOCK)) } else { debug!( - target: "sync", + target: LOG_TARGET, "Too many blocks ({}) in warp target block response from {}", blocks.len(), who, @@ -892,7 +906,7 @@ where } } else { debug!( - target: "sync", + target: LOG_TARGET, "Logic error: we think we are downloading warp target block from {}, but no warp sync is happening.", who, ); @@ -944,7 +958,10 @@ where let peer = if let Some(peer) = self.peers.get_mut(&who) { peer } else { - error!(target: "sync", "💔 Called on_block_justification with a peer ID of an unknown peer"); + error!( + target: LOG_TARGET, + "💔 Called on_block_justification with a peer ID of an unknown peer", + ); return Ok(OnBlockJustification::Nothing) }; @@ -956,7 +973,7 @@ where let justification = if let Some(block) = response.blocks.into_iter().next() { if hash != block.hash { warn!( - target: "sync", + target: LOG_TARGET, "💔 Invalid block justification provided by {}: requested: {:?} got: {:?}", who, hash, @@ -972,10 +989,8 @@ where // we might have asked the peer for a justification on a block that we assumed it // had but didn't (regardless of whether it had a justification for it or not). trace!( - target: "sync", - "Peer {:?} provided empty response for justification request {:?}", - who, - hash, + target: LOG_TARGET, + "Peer {who:?} provided empty response for justification request {hash:?}", ); None @@ -1013,10 +1028,8 @@ where if number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { if let Ok(Some(header)) = self.client.header(*hash) { log::debug!( - target: "sync", - "Starting state sync for #{} ({})", - number, - hash, + target: LOG_TARGET, + "Starting state sync for #{number} ({hash})", ); self.state_sync = Some(StateSync::new( self.client.clone(), @@ -1033,9 +1046,8 @@ where if let Err(err) = r { warn!( - target: "sync", - "💔 Error cleaning up pending extra justification data requests: {}", - err, + target: LOG_TARGET, + "💔 Error cleaning up pending extra justification data requests: {err}", ); } } @@ -1057,12 +1069,12 @@ where let peer = if let Some(peer) = self.peers.get_mut(&who) { peer } else { - error!(target: "sync", "💔 Called `on_validated_block_announce` with a bad peer ID"); + error!(target: LOG_TARGET, "💔 Called `on_validated_block_announce` with a bad peer ID"); return }; if let PeerSyncState::AncestorSearch { .. } = peer.state { - trace!(target: "sync", "Peer {} is in the ancestor search state.", who); + trace!(target: LOG_TARGET, "Peer {} is in the ancestor search state.", who); return } @@ -1222,12 +1234,6 @@ where } fn poll(&mut self, cx: &mut std::task::Context) -> Poll<()> { - // Should be called before `process_outbound_requests` to ensure - // that a potential target block is directly leading to requests. - if let Some(warp_sync) = &mut self.warp_sync { - let _ = warp_sync.poll(cx); - } - self.process_outbound_requests(); while let Poll::Ready(result) = self.poll_pending_responses(cx) { @@ -1262,9 +1268,8 @@ where }, Err(err) => { log::warn!( - target: "sync", - "Failed to encode block request {:?}: {:?}", - opaque_req, err + target: LOG_TARGET, + "Failed to encode block request {opaque_req:?}: {err:?}", ); }, } @@ -1292,7 +1297,7 @@ where roles: Roles, max_parallel_downloads: u32, max_blocks_per_request: u32, - warp_sync_params: Option>, + warp_sync_config: Option>, metrics_registry: Option<&Registry>, network_service: service::network::NetworkServiceHandle, import_queue: Box>, @@ -1334,7 +1339,8 @@ where network_service, block_request_protocol_name, state_request_protocol_name, - warp_sync_params, + warp_sync_config, + warp_sync_target_block_header: None, warp_sync_protocol_name, block_announce_protocol_name: block_announce_config .notifications_protocol @@ -1346,7 +1352,10 @@ where match SyncingMetrics::register(r) { Ok(metrics) => Some(metrics), Err(err) => { - error!(target: "sync", "Failed to register metrics for ChainSync: {err:?}"); + error!( + target: LOG_TARGET, + "Failed to register metrics for ChainSync: {err:?}", + ); None }, } @@ -1403,7 +1412,7 @@ where new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); if new_blocks.len() != orig_len { debug!( - target: "sync", + target: LOG_TARGET, "Ignoring {} blocks that are already queued", orig_len - new_blocks.len(), ); @@ -1420,7 +1429,7 @@ where .and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) { trace!( - target:"sync", + target:LOG_TARGET, "Accepted {} blocks ({:?}) with origin {:?}", new_blocks.len(), h, @@ -1444,7 +1453,7 @@ where /// through all peers to update our view of their state as well. fn on_block_queued(&mut self, hash: &B::Hash, number: NumberFor) { if self.fork_targets.remove(hash).is_some() { - trace!(target: "sync", "Completed fork sync {:?}", hash); + trace!(target: LOG_TARGET, "Completed fork sync {hash:?}"); } if let Some(gap_sync) = &mut self.gap_sync { if number > gap_sync.best_queued_number && number <= gap_sync.target { @@ -1463,7 +1472,7 @@ where let new_common_number = if peer.best_number >= number { number } else { peer.best_number }; trace!( - target: "sync", + target: LOG_TARGET, "Updating peer {} info, ours={}, common={}->{}, their best={}", n, number, @@ -1483,10 +1492,15 @@ where fn restart(&mut self) -> impl Iterator), BadPeer>> + '_ { self.blocks.clear(); if let Err(e) = self.reset_sync_start_point() { - warn!(target: "sync", "💔 Unable to restart sync: {}", e); + warn!(target: LOG_TARGET, "💔 Unable to restart sync: {e}"); } self.allowed_requests.set_all(); - debug!(target:"sync", "Restarted with {} ({})", self.best_queued_number, self.best_queued_hash); + debug!( + target: LOG_TARGET, + "Restarted with {} ({})", + self.best_queued_number, + self.best_queued_hash, + ); let old_peers = std::mem::take(&mut self.peers); old_peers.into_iter().filter_map(move |(id, mut p)| { @@ -1517,14 +1531,14 @@ where let info = self.client.info(); if matches!(self.mode, SyncMode::LightState { .. }) && info.finalized_state.is_some() { warn!( - target: "sync", + target: LOG_TARGET, "Can't use fast sync mode with a partially synced database. Reverting to full sync mode." ); self.mode = SyncMode::Full; } if matches!(self.mode, SyncMode::Warp) && info.finalized_state.is_some() { warn!( - target: "sync", + target: LOG_TARGET, "Can't use warp sync mode with a partially synced database. Reverting to full sync mode." ); self.mode = SyncMode::Full; @@ -1539,25 +1553,30 @@ where self.import_existing = true; // Latest state is missing, start with the last finalized state or genesis instead. if let Some((hash, number)) = info.finalized_state { - debug!(target: "sync", "Starting from finalized state #{}", number); + debug!(target: LOG_TARGET, "Starting from finalized state #{number}"); self.best_queued_hash = hash; self.best_queued_number = number; } else { - debug!(target: "sync", "Restarting from genesis"); + debug!(target: LOG_TARGET, "Restarting from genesis"); self.best_queued_hash = Default::default(); self.best_queued_number = Zero::zero(); } } if let Some((start, end)) = info.block_gap { - debug!(target: "sync", "Starting gap sync #{} - #{}", start, end); + debug!(target: LOG_TARGET, "Starting gap sync #{start} - #{end}"); self.gap_sync = Some(GapSync { best_queued_number: start - One::one(), target: end, blocks: BlockCollection::new(), }); } - trace!(target: "sync", "Restarted sync at #{} ({:?})", self.best_queued_number, self.best_queued_hash); + trace!( + target: LOG_TARGET, + "Restarted sync at #{} ({:?})", + self.best_queued_number, + self.best_queued_hash, + ); Ok(()) } @@ -1607,6 +1626,15 @@ where .collect() } + /// Set warp sync target block externally in case we skip warp proof downloading. + pub fn set_warp_sync_target_block(&mut self, header: B::Header) { + if let Some(ref mut warp_sync) = self.warp_sync { + warp_sync.set_target_block(header); + } else { + self.warp_sync_target_block_header = Some(header); + } + } + /// Generate block request for downloading of the target block body during warp sync. fn warp_target_block_request(&mut self) -> Option<(PeerId, BlockRequest)> { let sync = &self.warp_sync.as_ref()?; @@ -1625,7 +1653,7 @@ where // Find a random peer that has a block with the target number. for (id, peer) in self.peers.iter_mut() { if peer.state.is_available() && peer.best_number >= target_number { - trace!(target: "sync", "New warp target block request for {}", id); + trace!(target: LOG_TARGET, "New warp target block request for {id}"); peer.state = PeerSyncState::DownloadingWarpTargetBlock; self.allowed_requests.clear(); return Some((*id, request)) @@ -1716,9 +1744,8 @@ where }, Err(err) => { log::warn!( - target: "sync", - "Failed to encode state request {:?}: {:?}", - request, err + target: LOG_TARGET, + "Failed to encode state request {request:?}: {err:?}", ); }, } @@ -1742,9 +1769,8 @@ where ), None => { log::warn!( - target: "sync", - "Trying to send warp sync request when no protocol is configured {:?}", - request, + target: LOG_TARGET, + "Trying to send warp sync request when no protocol is configured {request:?}", ); }, } @@ -1759,7 +1785,12 @@ where let blocks = match self.block_response_into_blocks(&request, response) { Ok(blocks) => blocks, Err(err) => { - debug!(target: "sync", "Failed to decode block response from {}: {}", peer_id, err); + debug!( + target: LOG_TARGET, + "Failed to decode block response from {}: {}", + peer_id, + err, + ); self.network_service.report_peer(peer_id, rep::BAD_MESSAGE); return None }, @@ -1779,7 +1810,7 @@ where _ => Default::default(), }; trace!( - target: "sync", + target: LOG_TARGET, "BlockResponse {} from {} with {} blocks {}", block_response.id, peer_id, @@ -1888,10 +1919,8 @@ where Ok(proto) => proto, Err(e) => { debug!( - target: "sync", - "Failed to decode block response from peer {:?}: {:?}.", - id, - e + target: LOG_TARGET, + "Failed to decode block response from peer {id:?}: {e:?}.", ); self.network_service.report_peer(id, rep::BAD_MESSAGE); self.network_service @@ -1909,10 +1938,8 @@ where Ok(proto) => proto, Err(e) => { debug!( - target: "sync", - "Failed to decode state response from peer {:?}: {:?}.", - id, - e + target: LOG_TARGET, + "Failed to decode state response from peer {id:?}: {e:?}.", ); self.network_service.report_peer(id, rep::BAD_MESSAGE); self.network_service @@ -1930,7 +1957,7 @@ where }, }, Ok(Err(e)) => { - debug!(target: "sync", "Request to peer {:?} failed: {:?}.", id, e); + debug!(target: LOG_TARGET, "Request to peer {id:?} failed: {e:?}."); match e { RequestFailure::Network(OutboundFailure::Timeout) => { @@ -1971,9 +1998,8 @@ where }, Err(oneshot::Canceled) => { trace!( - target: "sync", - "Request to peer {:?} failed due to oneshot being canceled.", - id, + target: LOG_TARGET, + "Request to peer {id:?} failed due to oneshot being canceled.", ); self.network_service .disconnect_peer(id, self.block_announce_protocol_name.clone()); @@ -2058,7 +2084,7 @@ where } if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { - trace!(target: "sync", "Too many blocks in the queue."); + trace!(target: LOG_TARGET, "Too many blocks in the queue."); return Vec::new() } let is_major_syncing = self.status().state.is_major_syncing(); @@ -2093,7 +2119,7 @@ where queue.len() <= MAJOR_SYNC_BLOCKS.into() { trace!( - target: "sync", + target: LOG_TARGET, "Peer {:?} common block {} too far behind of our best {}. Starting ancestry search.", id, peer.common_number, @@ -2118,7 +2144,7 @@ where ) { peer.state = PeerSyncState::DownloadingNew(range.start); trace!( - target: "sync", + target: LOG_TARGET, "New block request for {}, (best:{}, common:{}) {:?}", id, peer.best_number, @@ -2141,7 +2167,7 @@ where }, max_blocks_per_request, ) { - trace!(target: "sync", "Downloading fork {:?} from {}", hash, id); + trace!(target: LOG_TARGET, "Downloading fork {hash:?} from {id}"); peer.state = PeerSyncState::DownloadingStale(hash); Some((id, req)) } else if let Some((range, req)) = gap_sync.as_mut().and_then(|sync| { @@ -2157,7 +2183,7 @@ where }) { peer.state = PeerSyncState::DownloadingGap(range.start); trace!( - target: "sync", + target: LOG_TARGET, "New gap block request for {}, (best:{}, common:{}) {:?}", id, peer.best_number, @@ -2192,7 +2218,7 @@ where if peer.state.is_available() && peer.common_number >= sync.target_block_num() { peer.state = PeerSyncState::DownloadingState; let request = sync.next_request(); - trace!(target: "sync", "New StateRequest for {}: {:?}", id, request); + trace!(target: LOG_TARGET, "New StateRequest for {}: {:?}", id, request); self.allowed_requests.clear(); return Some((*id, OpaqueStateRequest(Box::new(request)))) } @@ -2207,7 +2233,7 @@ where { for (id, peer) in self.peers.iter_mut() { if peer.state.is_available() && peer.best_number >= target { - trace!(target: "sync", "New StateRequest for {}: {:?}", id, request); + trace!(target: LOG_TARGET, "New StateRequest for {id}: {request:?}"); peer.state = PeerSyncState::DownloadingState; self.allowed_requests.clear(); return Some((*id, OpaqueStateRequest(Box::new(request)))) @@ -2237,7 +2263,7 @@ where // Find a random peer that is synced as much as peer majority. for (id, peer) in self.peers.iter_mut() { if peer.state.is_available() && peer.best_number >= median { - trace!(target: "sync", "New WarpProofRequest for {}", id); + trace!(target: LOG_TARGET, "New WarpProofRequest for {id}"); peer.state = PeerSyncState::DownloadingWarpProof; self.allowed_requests.clear(); return Some((*id, request)) @@ -2256,7 +2282,7 @@ where ) -> Result, BadPeer> { let response: Box = response.0.downcast().map_err(|_error| { error!( - target: "sync", + target: LOG_TARGET, "Failed to downcast opaque state response, this is an implementation bug." ); @@ -2271,7 +2297,7 @@ where } let import_result = if let Some(sync) = &mut self.state_sync { debug!( - target: "sync", + target: LOG_TARGET, "Importing state data from {} with {} keys, {} proof nodes.", who, response.entries.len(), @@ -2280,7 +2306,7 @@ where sync.import(*response) } else if let Some(sync) = &mut self.warp_sync { debug!( - target: "sync", + target: LOG_TARGET, "Importing state data from {} with {} keys, {} proof nodes.", who, response.entries.len(), @@ -2288,7 +2314,7 @@ where ); sync.import_state(*response) } else { - debug!(target: "sync", "Ignored obsolete state response from {}", who); + debug!(target: LOG_TARGET, "Ignored obsolete state response from {who}"); return Err(BadPeer(*who, rep::NOT_REQUESTED)) }; @@ -2307,12 +2333,12 @@ where skip_execution: self.skip_execution(), state: Some(state), }; - debug!(target: "sync", "State download is complete. Import is queued"); + debug!(target: LOG_TARGET, "State download is complete. Import is queued"); Ok(OnStateData::Import(origin, block)) }, state::ImportResult::Continue => Ok(OnStateData::Continue), state::ImportResult::BadResponse => { - debug!(target: "sync", "Bad state data received from {}", who); + debug!(target: LOG_TARGET, "Bad state data received from {who}"); Err(BadPeer(*who, rep::BAD_BLOCK)) }, } @@ -2327,21 +2353,21 @@ where } let import_result = if let Some(sync) = &mut self.warp_sync { debug!( - target: "sync", + target: LOG_TARGET, "Importing warp proof data from {}, {} bytes.", who, response.0.len(), ); sync.import_warp_proof(response) } else { - debug!(target: "sync", "Ignored obsolete warp sync response from {}", who); + debug!(target: LOG_TARGET, "Ignored obsolete warp sync response from {who}"); return Err(BadPeer(*who, rep::NOT_REQUESTED)) }; match import_result { WarpProofImportResult::Success => Ok(()), WarpProofImportResult::BadResponse => { - debug!(target: "sync", "Bad proof data received from {}", who); + debug!(target: LOG_TARGET, "Bad proof data received from {who}"); Err(BadPeer(*who, rep::BAD_BLOCK)) }, } @@ -2379,7 +2405,7 @@ where count: usize, results: Vec<(Result>, BlockImportError>, B::Hash)>, ) -> Box), BadPeer>>> { - trace!(target: "sync", "Imported {} of {}", imported, count); + trace!(target: LOG_TARGET, "Imported {imported} of {count}"); let mut output = Vec::new(); @@ -2406,7 +2432,7 @@ where Ok(BlockImportStatus::ImportedUnknown(number, aux, who)) => { if aux.clear_justification_requests { trace!( - target: "sync", + target: LOG_TARGET, "Block imported clears all pending justification requests {number}: {hash:?}", ); self.clear_justification_requests(); @@ -2414,7 +2440,7 @@ where if aux.needs_justification { trace!( - target: "sync", + target: LOG_TARGET, "Block imported but requires justification {number}: {hash:?}", ); self.request_justification(&hash, number); @@ -2434,7 +2460,7 @@ where self.state_sync.as_ref().map_or(false, |s| s.target() == hash); if state_sync_complete { info!( - target: "sync", + target: LOG_TARGET, "State sync is complete ({} MiB), restarting block sync.", self.state_sync.as_ref().map_or(0, |s| s.progress().size / (1024 * 1024)), ); @@ -2448,7 +2474,7 @@ where .map_or(false, |s| s.target_block_hash() == Some(hash)); if warp_sync_complete { info!( - target: "sync", + target: LOG_TARGET, "Warp sync is complete ({} MiB), restarting block sync.", self.warp_sync.as_ref().map_or(0, |s| s.progress().total_bytes / (1024 * 1024)), ); @@ -2460,7 +2486,7 @@ where self.gap_sync.as_ref().map_or(false, |s| s.target == number); if gap_sync_complete { info!( - target: "sync", + target: LOG_TARGET, "Block history download is complete." ); self.gap_sync = None; @@ -2469,7 +2495,7 @@ where Err(BlockImportError::IncompleteHeader(who)) => if let Some(peer) = who { warn!( - target: "sync", + target: LOG_TARGET, "💔 Peer sent block with incomplete header to import", ); output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); @@ -2480,7 +2506,7 @@ where who.map_or_else(|| "".into(), |peer| format!(" received from ({peer})")); warn!( - target: "sync", + target: LOG_TARGET, "💔 Verification failed for block {hash:?}{extra_message}: {e:?}", ); @@ -2493,7 +2519,7 @@ where Err(BlockImportError::BadBlock(who)) => if let Some(peer) = who { warn!( - target: "sync", + target: LOG_TARGET, "💔 Block {hash:?} received from peer {peer} has been blacklisted", ); output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); @@ -2502,10 +2528,10 @@ where // This may happen if the chain we were requesting upon has been discarded // in the meantime because other chain has been finalized. // Don't mark it as bad as it still may be synced if explicitly requested. - trace!(target: "sync", "Obsolete block {hash:?}"); + trace!(target: LOG_TARGET, "Obsolete block {hash:?}"); }, e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => { - warn!(target: "sync", "💔 Error importing block {hash:?}: {}", e.unwrap_err()); + warn!(target: LOG_TARGET, "💔 Error importing block {hash:?}: {}", e.unwrap_err()); self.state_sync = None; self.warp_sync = None; output.extend(self.restart()); @@ -2625,7 +2651,7 @@ fn peer_block_request( return None } else if peer.common_number < finalized { trace!( - target: "sync", + target: LOG_TARGET, "Requesting pre-finalized chain from {:?}, common={}, finalized={}, peer best={}, our best={}", id, peer.common_number, finalized, peer.best_number, best_num, ); @@ -2704,11 +2730,21 @@ fn fork_sync_request( ) -> Option<(B::Hash, BlockRequest)> { targets.retain(|hash, r| { if r.number <= finalized { - trace!(target: "sync", "Removed expired fork sync request {:?} (#{})", hash, r.number); + trace!( + target: LOG_TARGET, + "Removed expired fork sync request {:?} (#{})", + hash, + r.number, + ); return false } if check_block(hash) != BlockStatus::Unknown { - trace!(target: "sync", "Removed obsolete fork sync request {:?} (#{})", hash, r.number); + trace!( + target: LOG_TARGET, + "Removed obsolete fork sync request {:?} (#{})", + hash, + r.number, + ); return false } true @@ -2729,7 +2765,10 @@ fn fork_sync_request( // request only single block 1 }; - trace!(target: "sync", "Downloading requested fork {:?} from {}, {} blocks", hash, id, count); + trace!( + target: LOG_TARGET, + "Downloading requested fork {hash:?} from {id}, {count} blocks", + ); return Some(( *hash, BlockRequest:: { @@ -2741,7 +2780,7 @@ fn fork_sync_request( }, )) } else { - trace!(target: "sync", "Fork too far in the future: {:?} (#{})", hash, r.number); + trace!(target: LOG_TARGET, "Fork too far in the future: {:?} (#{})", hash, r.number); } } None @@ -2778,7 +2817,7 @@ fn validate_blocks( if let Some(request) = request { if Some(blocks.len() as _) > request.max { debug!( - target: "sync", + target: LOG_TARGET, "Received more blocks than requested from {}. Expected in maximum {:?}, got {}.", who, request.max, @@ -2799,7 +2838,7 @@ fn validate_blocks( if !expected_block { debug!( - target: "sync", + target: LOG_TARGET, "Received block that was not requested. Requested {:?}, got {:?}.", request.from, block_header, @@ -2812,9 +2851,8 @@ fn validate_blocks( blocks.iter().any(|b| b.header.is_none()) { trace!( - target: "sync", - "Missing requested header for a block in response from {}.", - who, + target: LOG_TARGET, + "Missing requested header for a block in response from {who}.", ); return Err(BadPeer(*who, rep::BAD_RESPONSE)) @@ -2823,9 +2861,8 @@ fn validate_blocks( if request.fields.contains(BlockAttributes::BODY) && blocks.iter().any(|b| b.body.is_none()) { trace!( - target: "sync", - "Missing requested body for a block in response from {}.", - who, + target: LOG_TARGET, + "Missing requested body for a block in response from {who}.", ); return Err(BadPeer(*who, rep::BAD_RESPONSE)) @@ -2837,7 +2874,7 @@ fn validate_blocks( let hash = header.hash(); if hash != b.hash { debug!( - target:"sync", + target:LOG_TARGET, "Bad header received from {}. Expected hash {:?}, got {:?}", who, b.hash, @@ -2854,7 +2891,7 @@ fn validate_blocks( ); if expected != got { debug!( - target:"sync", + target:LOG_TARGET, "Bad extrinsic root for a block {} received from {}. Expected {:?}, got {:?}", b.hash, who, @@ -3092,7 +3129,7 @@ mod test { ) -> BlockRequest { let requests = sync.block_requests(); - log::trace!(target: "sync", "Requests: {:?}", requests); + log::trace!(target: LOG_TARGET, "Requests: {requests:?}"); assert_eq!(1, requests.len()); assert_eq!(*peer, requests[0].0); @@ -3469,7 +3506,7 @@ mod test { break }; - log::trace!(target: "sync", "Request: {:?}", request); + log::trace!(target: LOG_TARGET, "Request: {request:?}"); } // Now request and import the fork. @@ -3611,7 +3648,7 @@ mod test { break }; - log::trace!(target: "sync", "Request: {:?}", request); + log::trace!(target: LOG_TARGET, "Request: {request:?}"); } // Now request and import the fork. diff --git a/substrate/client/network/sync/src/warp.rs b/substrate/client/network/sync/src/warp.rs index 912ad78dfdd0..74835a6e015e 100644 --- a/substrate/client/network/sync/src/warp.rs +++ b/substrate/client/network/sync/src/warp.rs @@ -19,36 +19,75 @@ //! Warp sync support. use crate::{ - oneshot, schema::v1::{StateRequest, StateResponse}, state::{ImportResult, StateSync}, }; -use futures::FutureExt; +use futures::channel::oneshot; use log::error; use sc_client_api::ProofProvider; use sc_network_common::sync::{ message::{BlockAttributes, BlockData, BlockRequest, Direction, FromBlock}, warp::{ - EncodedProof, VerificationResult, WarpProofRequest, WarpSyncParams, WarpSyncPhase, - WarpSyncProgress, WarpSyncProvider, + EncodedProof, VerificationResult, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, + WarpSyncProvider, }, }; use sp_blockchain::HeaderBackend; use sp_consensus_grandpa::{AuthorityList, SetId}; use sp_runtime::traits::{Block as BlockT, Header, NumberFor, Zero}; -use std::{sync::Arc, task::Poll}; +use std::sync::Arc; +/// Log target for this file. +const LOG_TARGET: &'static str = "sync"; + +/// The different types of warp syncing, passed to `build_network`. +pub enum WarpSyncParams { + /// Standard warp sync for the chain. + WithProvider(Arc>), + /// Skip downloading proofs and wait for a header of the state that should be downloaded. + /// + /// It is expected that the header provider ensures that the header is trusted. + WaitForTarget(oneshot::Receiver<::Header>), +} + +/// Warp sync configuration as accepted by [`WarpSync`]. +pub enum WarpSyncConfig { + /// Standard warp sync for the chain. + WithProvider(Arc>), + /// Skip downloading proofs and wait for a header of the state that should be downloaded. + /// + /// It is expected that the header provider ensures that the header is trusted. + WaitForTarget, +} + +impl WarpSyncParams { + /// Split `WarpSyncParams` into `WarpSyncConfig` and warp sync target block header receiver. + pub fn split( + self, + ) -> (WarpSyncConfig, Option::Header>>) { + match self { + WarpSyncParams::WithProvider(provider) => + (WarpSyncConfig::WithProvider(provider), None), + WarpSyncParams::WaitForTarget(rx) => (WarpSyncConfig::WaitForTarget, Some(rx)), + } + } +} + +/// Warp sync phase. enum Phase { + /// Downloading warp proofs. WarpProof { set_id: SetId, authorities: AuthorityList, last_hash: B::Hash, warp_sync_provider: Arc>, }, - PendingTargetBlock { - target_block: Option>, - }, + /// Waiting for target block to be set externally if we skip warp proofs downloading, + /// and start straight from the target block (used by parachains warp sync). + PendingTargetBlock, + /// Downloading target block. TargetBlock(B::Header), + /// Downloading state. State(StateSync), } @@ -83,10 +122,10 @@ where /// Create a new instance. When passing a warp sync provider we will be checking for proof and /// authorities. Alternatively we can pass a target block when we want to skip downloading /// proofs, in this case we will continue polling until the target block is known. - pub fn new(client: Arc, warp_sync_params: WarpSyncParams) -> Self { + pub fn new(client: Arc, warp_sync_config: WarpSyncConfig) -> Self { let last_hash = client.hash(Zero::zero()).unwrap().expect("Genesis header always exists"); - match warp_sync_params { - WarpSyncParams::WithProvider(warp_sync_provider) => { + match warp_sync_config { + WarpSyncConfig::WithProvider(warp_sync_provider) => { let phase = Phase::WarpProof { set_id: 0, authorities: warp_sync_provider.current_authorities(), @@ -95,35 +134,23 @@ where }; Self { client, phase, total_proof_bytes: 0 } }, - WarpSyncParams::WaitForTarget(block) => Self { - client, - phase: Phase::PendingTargetBlock { target_block: Some(block) }, - total_proof_bytes: 0, - }, + WarpSyncConfig::WaitForTarget => + Self { client, phase: Phase::PendingTargetBlock, total_proof_bytes: 0 }, } } - /// Poll to make progress. - /// - /// This only makes progress when `phase = Phase::PendingTargetBlock` and the pending block was - /// sent. - pub fn poll(&mut self, cx: &mut std::task::Context) { - let new_phase = if let Phase::PendingTargetBlock { target_block: Some(target_block) } = - &mut self.phase - { - match target_block.poll_unpin(cx) { - Poll::Ready(Ok(target)) => Phase::TargetBlock(target), - Poll::Ready(Err(e)) => { - error!(target: "sync", "Failed to get target block. Error: {:?}",e); - Phase::PendingTargetBlock { target_block: None } - }, - _ => return, - } - } else { + /// Set target block externally in case we skip warp proof downloading. + pub fn set_target_block(&mut self, header: B::Header) { + let Phase::PendingTargetBlock = self.phase else { + error!( + target: LOG_TARGET, + "Attempt to set warp sync target block in invalid phase.", + ); + debug_assert!(false); return }; - self.phase = new_phase; + self.phase = Phase::TargetBlock(header); } /// Validate and import a state response. diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 2a20da5a556b..d350b0e54ae1 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -62,15 +62,14 @@ use sc_network::{ }; use sc_network_common::{ role::Roles, - sync::warp::{ - AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncParams, WarpSyncProvider, - }, + sync::warp::{AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider}, }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, service::{chain_sync::SyncingService, network::NetworkServiceProvider}, state_request_handler::StateRequestHandler, + warp::WarpSyncParams, warp_request_handler, }; use sc_service::client::Client; diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index fe18d1d002d5..917b3be8dc7c 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -47,12 +47,13 @@ use sc_network::{ NetworkService, NetworkStateInfo, NetworkStatusProvider, }; use sc_network_bitswap::BitswapRequestHandler; -use sc_network_common::{role::Roles, sync::warp::WarpSyncParams}; +use sc_network_common::role::Roles; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, engine::SyncingEngine, service::network::NetworkServiceProvider, state_request_handler::StateRequestHandler, - warp_request_handler::RequestHandler as WarpSyncRequestHandler, SyncingService, + warp::WarpSyncParams, warp_request_handler::RequestHandler as WarpSyncRequestHandler, + SyncingService, }; use sc_rpc::{ author::AuthorApiServer, diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 0961967f9ca2..cd720e1c1e09 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -79,7 +79,7 @@ pub use sc_chain_spec::{ pub use sc_consensus::ImportQueue; pub use sc_executor::NativeExecutionDispatch; -pub use sc_network_common::sync::warp::WarpSyncParams; +pub use sc_network_sync::warp::WarpSyncParams; #[doc(hidden)] pub use sc_network_transactions::config::{TransactionImport, TransactionImportFuture}; pub use sc_rpc::{ From ec8949f624305b32432e70cdcc8fd8db7535490b Mon Sep 17 00:00:00 2001 From: Marcin S Date: Tue, 5 Sep 2023 12:11:30 +0200 Subject: [PATCH 13/28] Remove redundant calls to `borrow()` (#1393) Co-authored-by: Keith Yeung Co-authored-by: Francisco Aguirre --- substrate/client/api/src/lib.rs | 5 +---- substrate/frame/message-queue/src/lib.rs | 4 ++-- substrate/frame/support/src/storage/generator/map.rs | 5 ++--- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/substrate/client/api/src/lib.rs b/substrate/client/api/src/lib.rs index faadf3663a59..f614a1e30b48 100644 --- a/substrate/client/api/src/lib.rs +++ b/substrate/client/api/src/lib.rs @@ -49,7 +49,6 @@ pub trait UsageProvider { pub mod utils { use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; use sp_runtime::traits::Block as BlockT; - use std::borrow::Borrow; /// Returns a function for checking block ancestry, the returned function will /// return `true` if the given hash (second parameter) is a descendent of the @@ -69,10 +68,8 @@ pub mod utils { return Ok(false) } - let current = current.as_ref().map(|(c, p)| (c.borrow(), p.borrow())); - let mut hash = hash; - if let Some((current_hash, current_parent_hash)) = current { + if let Some((current_hash, current_parent_hash)) = ¤t { if base == current_hash { return Ok(false) } diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index 9ded84bb035c..7c38dec4b080 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -1092,7 +1092,7 @@ impl Pallet { origin.clone(), page_index, page.first_index, - payload.deref(), + payload, weight, overweight_limit, ) { @@ -1242,7 +1242,7 @@ impl Pallet { if let Some((_, processed, message)) = page.peek_index(i.try_into().expect("std-only code")) { - let msg = String::from_utf8_lossy(message.deref()); + let msg = String::from_utf8_lossy(message); if processed { page_info.push('*'); } diff --git a/substrate/frame/support/src/storage/generator/map.rs b/substrate/frame/support/src/storage/generator/map.rs index 90fac4b41c75..1d2511e324dc 100644 --- a/substrate/frame/support/src/storage/generator/map.rs +++ b/substrate/frame/support/src/storage/generator/map.rs @@ -21,7 +21,6 @@ use crate::{ Never, }; use codec::{Decode, Encode, EncodeLike, FullCodec, FullEncode}; -use sp_std::borrow::Borrow; #[cfg(not(feature = "std"))] use sp_std::prelude::*; @@ -297,7 +296,7 @@ impl> storage::StorageMap let ret = f(&mut val); if ret.is_ok() { match G::from_query_to_optional_value(val) { - Some(ref val) => unhashed::put(final_key.as_ref(), &val.borrow()), + Some(ref val) => unhashed::put(final_key.as_ref(), &val), None => unhashed::kill(final_key.as_ref()), } } @@ -314,7 +313,7 @@ impl> storage::StorageMap let ret = f(&mut val); if ret.is_ok() { match val { - Some(ref val) => unhashed::put(final_key.as_ref(), &val.borrow()), + Some(ref val) => unhashed::put(final_key.as_ref(), &val), None => unhashed::kill(final_key.as_ref()), } } From 336916827f78a320fddbf1925984ec0a1f884376 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Tue, 5 Sep 2023 13:24:19 +0200 Subject: [PATCH 14/28] Remove dynamic dispatch using `Ext` (#1399) --- substrate/frame/contracts/src/exec.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index 894667280b71..fdb30310ef70 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -1625,13 +1625,13 @@ mod tests { } struct MockCtx<'a> { - ext: &'a mut dyn Ext, + ext: &'a mut MockStack<'a>, input_data: Vec, } #[derive(Clone)] struct MockExecutable { - func: Rc ExecResult + 'static>, + func: Rc Fn(MockCtx<'a>, &Self) -> ExecResult + 'static>, func_type: ExportedFunction, code_hash: CodeHash, code_info: CodeInfo, @@ -1724,6 +1724,16 @@ mod tests { if let &Constructor = function { Self::increment_refcount(self.code_hash).unwrap(); } + // # Safety + // + // We know that we **always** call execute with a `MockStack` in this test. + // + // # Note + // + // The transmute is necessary because `execute` has to be generic over all + // `E: Ext`. However, `MockExecutable` can't be generic over `E` as it would + // constitute a cycle. + let ext = unsafe { std::mem::transmute(ext) }; if function == &self.func_type { (self.func)(MockCtx { ext, input_data }, &self) } else { From 51dcc9fbc873de6616dd67e7b919c6de249b222b Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Tue, 5 Sep 2023 18:56:32 -0300 Subject: [PATCH 15/28] Enforce a decoding limit in MultiAssets (#1395) * Enforce a decoding limit in MultiAssets * ".git/.scripts/commands/fmt/fmt.sh" * Update polkadot/xcm/src/v3/multiasset.rs Co-authored-by: Keith Yeung * Just use a BoundedVec * Conflicts --------- Co-authored-by: command-bot <> Co-authored-by: Keith Yeung --- polkadot/xcm/src/v3/multiasset.rs | 38 +++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/polkadot/xcm/src/v3/multiasset.rs b/polkadot/xcm/src/v3/multiasset.rs index 1668d1b870dc..a5a74368289d 100644 --- a/polkadot/xcm/src/v3/multiasset.rs +++ b/polkadot/xcm/src/v3/multiasset.rs @@ -40,6 +40,7 @@ use core::{ }; use parity_scale_codec::{self as codec, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; +use bounded_collections::{BoundedVec, ConstU32}; /// A general identifier for an instance of a non-fungible asset class. #[derive( @@ -506,9 +507,8 @@ impl TryFrom for MultiAsset { #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct MultiAssets(Vec); -/// Maximum number of items we expect in a single `MultiAssets` value. Note this is not (yet) -/// enforced, and just serves to provide a sensible `max_encoded_len` for `MultiAssets`. -const MAX_ITEMS_IN_MULTIASSETS: usize = 20; +/// Maximum number of items in a single `MultiAssets` value that can be decoded. +pub const MAX_ITEMS_IN_MULTIASSETS: usize = 20; impl MaxEncodedLen for MultiAssets { fn max_encoded_len() -> usize { @@ -517,8 +517,9 @@ impl MaxEncodedLen for MultiAssets { } impl Decode for MultiAssets { - fn decode(input: &mut I) -> Result { - Self::from_sorted_and_deduplicated(Vec::::decode(input)?) + fn decode(input: &mut I) -> Result { + let bounded_instructions = BoundedVec::>::decode(input)?; + Self::from_sorted_and_deduplicated(bounded_instructions.into_inner()) .map_err(|()| "Out of order".into()) } } @@ -974,4 +975,31 @@ mod tests { let r = MultiAssets::from_sorted_and_deduplicated(mixed_bad); assert!(r.is_err()); } + + #[test] + fn decoding_respects_limit() { + use super::*; + + // Having lots of one asset will work since they are deduplicated + let lots_of_one_asset: MultiAssets = + vec![(GeneralIndex(1), 1u128).into(); MAX_ITEMS_IN_MULTIASSETS + 1].into(); + let encoded = lots_of_one_asset.encode(); + assert!(MultiAssets::decode(&mut &encoded[..]).is_ok()); + + // Fewer assets than the limit works + let mut few_assets: MultiAssets = Vec::new().into(); + for i in 0..MAX_ITEMS_IN_MULTIASSETS { + few_assets.push((GeneralIndex(i as u128), 1u128).into()); + } + let encoded = few_assets.encode(); + assert!(MultiAssets::decode(&mut &encoded[..]).is_ok()); + + // Having lots of different assets will not work + let mut too_many_different_assets: MultiAssets = Vec::new().into(); + for i in 0..MAX_ITEMS_IN_MULTIASSETS + 1 { + too_many_different_assets.push((GeneralIndex(i as u128), 1u128).into()); + } + let encoded = too_many_different_assets.encode(); + assert!(MultiAssets::decode(&mut &encoded[..]).is_err()); + } } From 7986b12624f7fd6ecc55a151afca5df31608ed9f Mon Sep 17 00:00:00 2001 From: Chevdor Date: Wed, 6 Sep 2023 10:12:55 +0200 Subject: [PATCH 16/28] fmt fixes (#1413) --- polkadot/xcm/src/v3/multiasset.rs | 5 +++-- substrate/frame/broker/src/dispatchable_impls.rs | 8 ++------ substrate/frame/broker/src/tick_impls.rs | 8 ++------ 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/polkadot/xcm/src/v3/multiasset.rs b/polkadot/xcm/src/v3/multiasset.rs index a5a74368289d..188555318c8c 100644 --- a/polkadot/xcm/src/v3/multiasset.rs +++ b/polkadot/xcm/src/v3/multiasset.rs @@ -34,13 +34,13 @@ use crate::v2::{ WildMultiAsset as OldWildMultiAsset, }; use alloc::{vec, vec::Vec}; +use bounded_collections::{BoundedVec, ConstU32}; use core::{ cmp::Ordering, convert::{TryFrom, TryInto}, }; use parity_scale_codec::{self as codec, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -use bounded_collections::{BoundedVec, ConstU32}; /// A general identifier for an instance of a non-fungible asset class. #[derive( @@ -518,7 +518,8 @@ impl MaxEncodedLen for MultiAssets { impl Decode for MultiAssets { fn decode(input: &mut I) -> Result { - let bounded_instructions = BoundedVec::>::decode(input)?; + let bounded_instructions = + BoundedVec::>::decode(input)?; Self::from_sorted_and_deduplicated(bounded_instructions.into_inner()) .map_err(|()| "Out of order".into()) } diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index 8dc0c9de393e..54cf5d71dcad 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -333,12 +333,8 @@ impl Pallet { region.begin = r + 1; contribution.length.saturating_dec(); - let Some(mut pool_record) = InstaPoolHistory::::get(r) else { - continue - }; - let Some(total_payout) = pool_record.maybe_payout else { - break - }; + let Some(mut pool_record) = InstaPoolHistory::::get(r) else { continue }; + let Some(total_payout) = pool_record.maybe_payout else { break }; let p = total_payout .saturating_mul(contributed_parts.into()) .checked_div(&pool_record.private_contributions.into()) diff --git a/substrate/frame/broker/src/tick_impls.rs b/substrate/frame/broker/src/tick_impls.rs index 909af6caf734..a1a50a61908d 100644 --- a/substrate/frame/broker/src/tick_impls.rs +++ b/substrate/frame/broker/src/tick_impls.rs @@ -95,9 +95,7 @@ impl Pallet { } pub(crate) fn process_revenue() -> bool { - let Some((until, amount)) = T::Coretime::check_notify_revenue_info() else { - return false - }; + let Some((until, amount)) = T::Coretime::check_notify_revenue_info() else { return false }; let when: Timeslice = (until / T::TimeslicePeriod::get()).saturating_sub(One::one()).saturated_into(); let mut revenue = T::ConvertBalance::convert_back(amount); @@ -289,9 +287,7 @@ impl Pallet { rc_begin: RelayBlockNumberOf, core: CoreIndex, ) { - let Some(workplan) = Workplan::::take((timeslice, core)) else { - return - }; + let Some(workplan) = Workplan::::take((timeslice, core)) else { return }; let workload = Workload::::get(core); let parts_used = workplan.iter().map(|i| i.mask).fold(CoreMask::void(), |a, i| a | i); let mut workplan = workplan.into_inner(); From eaf380aaf5d366d9ad9068126785f3a44cf88c43 Mon Sep 17 00:00:00 2001 From: Chevdor Date: Wed, 6 Sep 2023 10:25:47 +0200 Subject: [PATCH 17/28] Add PRdoc check (#1408) * Add test prdoc * Prepare for the check * Escape PR number * Fix conditional step * Add checkout and actual check * Cleanup * Minor fixes * Add doumentation * Add more doc --- .github/workflows/check-prdoc.yml | 51 ++++++++++++++++++++++++++ cumulus/scripts/ci/changelog/README.md | 2 +- docs/CONTRIBUTING.md | 32 ++++++++++++---- prdoc/.gitkeep | 0 prdoc/pr_1408_prodc-introduction.prdoc | 19 ++++++++++ 5 files changed, 96 insertions(+), 8 deletions(-) create mode 100644 .github/workflows/check-prdoc.yml create mode 100644 prdoc/.gitkeep create mode 100644 prdoc/pr_1408_prodc-introduction.prdoc diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml new file mode 100644 index 000000000000..e677ded8d6cc --- /dev/null +++ b/.github/workflows/check-prdoc.yml @@ -0,0 +1,51 @@ +name: Check PRdoc + +on: + pull_request: + types: [labeled, opened, synchronize, unlabeled] + +env: + # todo: switch to paritytech/prdoc once the container is built & published + # see https://github.com/paritytech/scripts/pull/595 + IMAGE: chevdor/prdoc:v0.0.4 + API_BASE: https://api.github.com/repos + REPO: ${{ github.repository }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_PR: ${{ github.event.pull_request.number }} + MOUNT: /prdoc + ENGINE: docker + +jobs: + check-prdoc: + runs-on: ubuntu-latest + steps: + - name: Pull image + run: | + echo "Pulling $IMAGE" + docker pull $IMAGE + docker run --rm $IMAGE --version + + - name: Check if PRdoc is required + id: get-labels + run: | + # Fetch the labels for the PR under test + echo "Fetch the labels for $API_BASE/${REPO}/pulls/${GITHUB_PR}" + labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",") + echo "Labels: ${labels}" + echo "labels=${labels}" >> "$GITHUB_OUTPUT" + + - name: No PRdoc required + if: ${{ contains(steps.get-labels.outputs.labels, 'R0') }} + run: | + echo "PR detected as silent, no PRdoc is required, exiting..." + exit 0 + + - name: Checkout repo + if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac #v4.0.0 + + - name: PRdoc check for PR#${{ github.event.pull_request.number }} + if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} + run: | + echo "Checking for PR#$GITHUB_PR in $MOUNT" + $ENGINE run --rm -v $PWD/prdoc:/doc $IMAGE check -n 1408 diff --git a/cumulus/scripts/ci/changelog/README.md b/cumulus/scripts/ci/changelog/README.md index e274b4919473..5c8ee9c9b914 100644 --- a/cumulus/scripts/ci/changelog/README.md +++ b/cumulus/scripts/ci/changelog/README.md @@ -61,7 +61,7 @@ all the labels that are used, search for `meta` in the templates. Currently, the Note that labels with the same letter are mutually exclusive. A PR should not have both `B0` and `B5`, or both `C1` and `C9`. In case of conflicts, the template will decide which label will be considered. -## Dev and debuggin +## Dev and debugging ### Hot Reload diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 20fa1d3a768b..d134188e25df 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -80,18 +80,36 @@ Reviews should finish with approval unless there are issues that would result in The reviewers are also responsible to check: -a) if a changelog is necessary and attached - -b) the quality of information in the changelog file - -c) the PR has an impact on docs - -d) that the docs team was included in the review process of a docs update +1. if a changelog is necessary and attached +1. the quality of information in the changelog file +1. the PR has an impact on docs +1. that the docs team was included in the review process of a docs update **Reviews may not be used as an effective veto for a PR because**: 1. There exists a somewhat cleaner/better/faster way of accomplishing the same feature/fix. 2. It does not fit well with some other contributors' longer-term vision for the project. +## Documentation + +All Pull Requests must contain proper title & description. + +Some Pull Requests can be exempt of `prdoc` documentation, those +must be labelled with +[`R0-silent`](https://github.com/paritytech/labels/blob/main/ruled_labels/specs_polkadot-sdk.yaml#L89-L91). + +Non "silent" PRs must come with documentation in the form of a `.prdoc` file. +A `.prdoc` documentation is made of a text file (YAML) named `/prdoc/pr_NNNN.prdoc` where `NNNN` is the PR number. +For convenience, those file can also contain a short description/title: `/prdoc/pr_NNNN_pr-foobar.prdoc`. + +The CI automation checks for the presence and validity of a `prdoc` in the `/prdoc` folder. +Those files need to comply with a specific [schema](https://github.com/paritytech/prdoc/blob/master/schema_user.json). It +is highly recommended to [make your editor aware](https://github.com/paritytech/prdoc#schemas) of the schema as it is +self-described and will assist you in writing correct content. + +This schema is also embedded in the +[prdoc](https://github.com/paritytech/prdoc) utility that can also be used to generate and check the validity of a +`prdoc` locally. + ## Helping out We use [labels](https://github.com/paritytech/polkadot-sdk/labels) to manage PRs and issues and communicate diff --git a/prdoc/.gitkeep b/prdoc/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/prdoc/pr_1408_prodc-introduction.prdoc b/prdoc/pr_1408_prodc-introduction.prdoc new file mode 100644 index 000000000000..4b10e0fe2e81 --- /dev/null +++ b/prdoc/pr_1408_prodc-introduction.prdoc @@ -0,0 +1,19 @@ +# This PR does not need a prdoc but it is provided in order to test +title: PRdoc check + +doc: + - audience: Core Dev + description: | + This PRdoc is an **example**. + + This PR brings support and automated checks for documentation in the form of a + [`prdoc`](https://github.com/paritytech/prdoc/) file. + +migrations: + db: [] + + runtime: [] + +crates: [] + +host_functions: [] From cd71f7e9b1ddfc30b674cf3316e49bff247a4b5b Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Wed, 6 Sep 2023 21:09:07 +1200 Subject: [PATCH 18/28] RFC 14: Improve locking mechanism for parachains (#1290) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * rfc14 * Update polkadot/runtime/common/src/paras_registrar/mod.rs Co-authored-by: Bastian Köcher * Update polkadot/runtime/common/src/paras_registrar/mod.rs Co-authored-by: Bastian Köcher * Update polkadot/runtime/common/src/paras_registrar/mod.rs Co-authored-by: Bastian Köcher * fmt * fix * Update polkadot/runtime/common/src/paras_registrar/migration.rs Co-authored-by: Oliver Tale-Yazdi * fmt * 2224 is unlocked * update migration list * update comment * use VersionedMigration --------- Co-authored-by: Bastian Köcher Co-authored-by: Oliver Tale-Yazdi --- Cargo.lock | 1 + .../runtime/common/src/assigned_slots/mod.rs | 1 + polkadot/runtime/common/src/crowdloan/mod.rs | 2 - .../runtime/common/src/integration_tests.rs | 1 + .../common/src/paras_registrar/migration.rs | 71 +++++++++++++++++++ .../mod.rs} | 61 ++++++++++++---- polkadot/runtime/kusama/Cargo.toml | 2 +- polkadot/runtime/kusama/src/lib.rs | 16 +++++ polkadot/runtime/parachains/Cargo.toml | 1 + polkadot/runtime/parachains/src/mock.rs | 1 + polkadot/runtime/parachains/src/paras/mod.rs | 27 ++++++- polkadot/runtime/polkadot/Cargo.toml | 2 +- polkadot/runtime/polkadot/src/lib.rs | 20 +++++- polkadot/runtime/rococo/Cargo.toml | 2 +- polkadot/runtime/rococo/src/lib.rs | 2 + polkadot/runtime/test-runtime/src/lib.rs | 1 + polkadot/runtime/westend/Cargo.toml | 2 +- polkadot/runtime/westend/src/lib.rs | 2 + 18 files changed, 191 insertions(+), 24 deletions(-) create mode 100644 polkadot/runtime/common/src/paras_registrar/migration.rs rename polkadot/runtime/common/src/{paras_registrar.rs => paras_registrar/mod.rs} (97%) diff --git a/Cargo.lock b/Cargo.lock index c254302517f0..918426ce5c0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12780,6 +12780,7 @@ dependencies = [ "frame-system", "futures", "hex-literal 0.4.1", + "impl-trait-for-tuples", "log", "pallet-authority-discovery", "pallet-authorship", diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index 3683cfc210fa..cc8ec339c118 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -739,6 +739,7 @@ mod tests { type UnsignedPriority = ParasUnsignedPriority; type QueueFootprinter = (); type NextSessionRotation = crate::mock::TestNextSessionRotation; + type OnNewHead = (); } impl parachains_shared::Config for Test {} diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index 0303808e0747..5a2939145925 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -441,8 +441,6 @@ pub mod pallet { ); NextFundIndex::::put(new_fund_index); - // Add a lock to the para so that the configuration cannot be changed. - T::Registrar::apply_lock(index); Self::deposit_event(Event::::Created { para_id: index }); Ok(()) diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index f78347dedd8c..f14db68267d5 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -204,6 +204,7 @@ impl paras::Config for Test { type UnsignedPriority = ParasUnsignedPriority; type QueueFootprinter = (); type NextSessionRotation = crate::mock::TestNextSessionRotation; + type OnNewHead = (); } parameter_types! { diff --git a/polkadot/runtime/common/src/paras_registrar/migration.rs b/polkadot/runtime/common/src/paras_registrar/migration.rs new file mode 100644 index 000000000000..7d3dda54e0ed --- /dev/null +++ b/polkadot/runtime/common/src/paras_registrar/migration.rs @@ -0,0 +1,71 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; +use frame_support::traits::{Contains, OnRuntimeUpgrade}; + +#[derive(Encode, Decode)] +pub struct ParaInfoV1 { + manager: Account, + deposit: Balance, + locked: bool, +} + +pub struct VersionUncheckedMigrateToV1( + sp_std::marker::PhantomData<(T, UnlockParaIds)>, +); +impl> OnRuntimeUpgrade + for VersionUncheckedMigrateToV1 +{ + fn on_runtime_upgrade() -> Weight { + let mut count = 0u64; + Paras::::translate::>, _>(|key, v1| { + count.saturating_inc(); + Some(ParaInfo { + manager: v1.manager, + deposit: v1.deposit, + locked: if UnlockParaIds::contains(&key) { None } else { Some(v1.locked) }, + }) + }); + + log::info!(target: "runtime::registrar", "Upgraded {} storages to version 1", count); + T::DbWeight::get().reads_writes(count, count) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + Ok((Paras::::iter_keys().count() as u32).encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + let old_count = u32::decode(&mut &state[..]).expect("Known good"); + let new_count = Paras::::iter_values().count() as u32; + + ensure!(old_count == new_count, "Paras count should not change"); + Ok(()) + } +} + +#[cfg(feature = "experimental")] +pub type VersionCheckedMigrateToV1 = + frame_support::migrations::VersionedMigration< + 0, + 1, + VersionUncheckedMigrateToV1, + super::Pallet, + ::DbWeight, + >; diff --git a/polkadot/runtime/common/src/paras_registrar.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs similarity index 97% rename from polkadot/runtime/common/src/paras_registrar.rs rename to polkadot/runtime/common/src/paras_registrar/mod.rs index 3f5a8e1a5f93..f2751803a413 100644 --- a/polkadot/runtime/common/src/paras_registrar.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -17,6 +17,8 @@ //! Pallet to handle parachain registration and related fund management. //! In essence this is a simple wrapper around `paras`. +pub mod migration; + use frame_support::{ dispatch::DispatchResult, ensure, @@ -35,7 +37,7 @@ use sp_std::{prelude::*, result}; use crate::traits::{OnSwap, Registrar}; pub use pallet::*; use parity_scale_codec::{Decode, Encode}; -use runtime_parachains::paras::ParaKind; +use runtime_parachains::paras::{OnNewHead, ParaKind}; use scale_info::TypeInfo; use sp_runtime::{ traits::{CheckedSub, Saturating}, @@ -49,7 +51,15 @@ pub struct ParaInfo { /// The amount reserved by the `manager` account for the registration. deposit: Balance, /// Whether the para registration should be locked from being controlled by the manager. - locked: bool, + /// None means the lock had not been explicitly set, and should be treated as false. + locked: Option, +} + +impl ParaInfo { + /// Returns if the para is locked. + pub fn is_locked(&self) -> bool { + self.locked.unwrap_or(false) + } } type BalanceOf = @@ -96,8 +106,12 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] @@ -446,12 +460,12 @@ impl Registrar for Pallet { // Apply a lock to the parachain. fn apply_lock(id: ParaId) { - Paras::::mutate(id, |x| x.as_mut().map(|info| info.locked = true)); + Paras::::mutate(id, |x| x.as_mut().map(|info| info.locked = Some(true))); } // Remove a lock from the parachain. fn remove_lock(id: ParaId) { - Paras::::mutate(id, |x| x.as_mut().map(|info| info.locked = false)); + Paras::::mutate(id, |x| x.as_mut().map(|info| info.locked = Some(false))); } // Register a Para ID under control of `manager`. @@ -481,9 +495,7 @@ impl Registrar for Pallet { ); runtime_parachains::schedule_parathread_upgrade::(id) .map_err(|_| Error::::CannotUpgrade)?; - // Once a para has upgraded to a parachain, it can no longer be managed by the owner. - // Intentionally, the flag stays with the para even after downgrade. - Self::apply_lock(id); + Ok(()) } @@ -533,7 +545,7 @@ impl Pallet { .map_err(|e| e.into()) .and_then(|who| -> DispatchResult { let para_info = Paras::::get(id).ok_or(Error::::NotRegistered)?; - ensure!(!para_info.locked, Error::::ParaLocked); + ensure!(!para_info.is_locked(), Error::::ParaLocked); ensure!(para_info.manager == who, Error::::NotOwner); Ok(()) }) @@ -566,7 +578,7 @@ impl Pallet { let deposit = deposit_override.unwrap_or_else(T::ParaDeposit::get); ::Currency::reserve(&who, deposit)?; - let info = ParaInfo { manager: who.clone(), deposit, locked: false }; + let info = ParaInfo { manager: who.clone(), deposit, locked: None }; Paras::::insert(id, info); Self::deposit_event(Event::::Reserved { para_id: id, who }); @@ -585,7 +597,7 @@ impl Pallet { ) -> DispatchResult { let deposited = if let Some(para_data) = Paras::::get(id) { ensure!(para_data.manager == who, Error::::NotOwner); - ensure!(!para_data.locked, Error::::ParaLocked); + ensure!(!para_data.is_locked(), Error::::ParaLocked); para_data.deposit } else { ensure!(!ensure_reserved, Error::::NotReserved); @@ -601,7 +613,7 @@ impl Pallet { } else if let Some(rebate) = deposited.checked_sub(&deposit) { ::Currency::unreserve(&who, rebate); }; - let info = ParaInfo { manager: who.clone(), deposit, locked: false }; + let info = ParaInfo { manager: who.clone(), deposit, locked: None }; Paras::::insert(id, info); // We check above that para has no lifecycle, so this should not fail. @@ -665,6 +677,21 @@ impl Pallet { } } +impl OnNewHead for Pallet { + fn on_new_head(id: ParaId, _head: &HeadData) -> Weight { + // mark the parachain locked if the locked value is not already set + let mut writes = 0; + if let Some(mut info) = Paras::::get(id) { + if info.locked.is_none() { + info.locked = Some(true); + Paras::::insert(id, info); + writes += 1; + } + } + T::DbWeight::get().reads_writes(1, writes) + } +} + #[cfg(test)] mod tests { use super::*; @@ -784,6 +811,7 @@ mod tests { type UnsignedPriority = ParasUnsignedPriority; type QueueFootprinter = (); type NextSessionRotation = crate::mock::TestNextSessionRotation; + type OnNewHead = (); } impl configuration::Config for Test { @@ -1270,8 +1298,10 @@ mod tests { )); assert_noop!(Registrar::add_lock(RuntimeOrigin::signed(2), para_id), BadOrigin); - // Once they begin onboarding, we lock them in. - assert_ok!(Registrar::add_lock(RuntimeOrigin::signed(1), para_id)); + + // Once they produces new block, we lock them in. + Registrar::on_new_head(para_id, &Default::default()); + // Owner cannot pass origin check when checking lock assert_noop!( Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id), @@ -1283,6 +1313,11 @@ mod tests { assert_ok!(Registrar::remove_lock(para_origin(para_id), para_id)); // Owner can pass origin check again assert_ok!(Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id)); + + // Won't lock again after it is unlocked + Registrar::on_new_head(para_id, &Default::default()); + + assert_ok!(Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id)); }); } diff --git a/polkadot/runtime/kusama/Cargo.toml b/polkadot/runtime/kusama/Cargo.toml index 8b0f59516c6d..6b0370633f0b 100644 --- a/polkadot/runtime/kusama/Cargo.toml +++ b/polkadot/runtime/kusama/Cargo.toml @@ -103,7 +103,7 @@ frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarki pallet-election-provider-support-benchmarking = { path = "../../../substrate/frame/election-provider-support/benchmarking", default-features = false, optional = true } hex-literal = "0.4.1" -runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } +runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false, features = ["experimental"] } runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parachains", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } diff --git a/polkadot/runtime/kusama/src/lib.rs b/polkadot/runtime/kusama/src/lib.rs index 94af807fb5de..659a7052d2b7 100644 --- a/polkadot/runtime/kusama/src/lib.rs +++ b/polkadot/runtime/kusama/src/lib.rs @@ -1215,6 +1215,7 @@ impl parachains_paras::Config for Runtime { type UnsignedPriority = ParasUnsignedPriority; type QueueFootprinter = ParaInclusion; type NextSessionRotation = Babe; + type OnNewHead = Registrar; } parameter_types! { @@ -1710,6 +1711,19 @@ pub mod migrations { } } + pub struct ParachainsToUnlock; + impl Contains for ParachainsToUnlock { + fn contains(id: &ParaId) -> bool { + let id: u32 = (*id).into(); + // ksuama parachains/parathreads that are locked and never produced block + match id { + 2003 | 2008 | 2018 | 2077 | 2089 | 2111 | 2112 | 2120 | 2126 | 2127 | 2130 | + 2226 | 2227 | 2231 | 2233 | 2237 | 2256 | 2257 | 2261 | 2268 | 2275 => true, + _ => false, + } + } + } + /// Unreleased migrations. Add new ones here: pub type Unreleased = ( init_state_migration::InitMigrate, @@ -1741,6 +1755,8 @@ pub mod migrations { UpgradeSessionKeys, parachains_configuration::migration::v9::MigrateToV9, + // Migrate parachain info format + paras_registrar::migration::VersionCheckedMigrateToV1, ); } diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 9a8bd5017e07..0e2f6aa1aa24 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -6,6 +6,7 @@ edition.workspace = true license.workspace = true [dependencies] +impl-trait-for-tuples = "0.2.2" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.17", default-features = false } diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index f978b6c3360e..ded7de08e4fa 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -214,6 +214,7 @@ impl crate::paras::Config for Test { type UnsignedPriority = ParasUnsignedPriority; type QueueFootprinter = ParaInclusion; type NextSessionRotation = TestNextSessionRotation; + type OnNewHead = (); } impl crate::dmp::Config for Test {} diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index 95b89a1ca2c3..2f370b5bfe47 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -481,6 +481,22 @@ impl PvfCheckActiveVoteState { } } +/// Runtime hook for when a parachain head is updated. +pub trait OnNewHead { + /// Called when a parachain head is updated. + /// Returns the weight consumed by this function. + fn on_new_head(id: ParaId, head: &HeadData) -> Weight; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl OnNewHead for Tuple { + fn on_new_head(id: ParaId, head: &HeadData) -> Weight { + let mut weight: Weight = Default::default(); + for_tuples!( #( weight.saturating_accrue(Tuple::on_new_head(id, head)); )* ); + weight + } +} + pub trait WeightInfo { fn force_set_current_code(c: u32) -> Weight; fn force_set_current_head(s: u32) -> Weight; @@ -575,6 +591,9 @@ pub mod pallet { /// be set to the `ParaInclusion` pallet. type QueueFootprinter: QueueFootprinter; + /// Runtime hook for when a parachain head is updated. + type OnNewHead: OnNewHead; + /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -1962,10 +1981,10 @@ impl Pallet { new_head: HeadData, execution_context: BlockNumberFor, ) -> Weight { - Heads::::insert(&id, new_head); + Heads::::insert(&id, &new_head); MostRecentContext::::insert(&id, execution_context); - if let Some(expected_at) = FutureCodeUpgrades::::get(&id) { + let weight = if let Some(expected_at) = FutureCodeUpgrades::::get(&id) { if expected_at <= execution_context { FutureCodeUpgrades::::remove(&id); UpgradeGoAheadSignal::::remove(&id); @@ -2005,7 +2024,9 @@ impl Pallet { // the `Abort` signal. UpgradeGoAheadSignal::::remove(&id); T::DbWeight::get().reads_writes(1, 2) - } + }; + + weight.saturating_add(T::OnNewHead::on_new_head(id, &new_head)) } /// Returns the list of PVFs (aka validation code) that require casting a vote by a validator in diff --git a/polkadot/runtime/polkadot/Cargo.toml b/polkadot/runtime/polkadot/Cargo.toml index d185677ab8d2..0b9498347ca4 100644 --- a/polkadot/runtime/polkadot/Cargo.toml +++ b/polkadot/runtime/polkadot/Cargo.toml @@ -94,7 +94,7 @@ pallet-session-benchmarking = { path = "../../../substrate/frame/session/benchma pallet-nomination-pools-benchmarking = { path = "../../../substrate/frame/nomination-pools/benchmarking", default-features = false, optional = true } hex-literal = { version = "0.4.1", optional = true } -runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } +runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false, features = ["experimental"] } runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parachains", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } diff --git a/polkadot/runtime/polkadot/src/lib.rs b/polkadot/runtime/polkadot/src/lib.rs index b71e0f726c55..45ea561b33fa 100644 --- a/polkadot/runtime/polkadot/src/lib.rs +++ b/polkadot/runtime/polkadot/src/lib.rs @@ -47,8 +47,8 @@ use frame_election_provider_support::{ use frame_support::{ construct_runtime, parameter_types, traits::{ - ConstU32, EitherOf, EitherOfDiverse, InstanceFilter, KeyOwnerProofSystem, PrivilegeCmp, - ProcessMessage, ProcessMessageError, WithdrawReasons, + ConstU32, Contains, EitherOf, EitherOfDiverse, InstanceFilter, KeyOwnerProofSystem, + PrivilegeCmp, ProcessMessage, ProcessMessageError, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, PalletId, @@ -1091,6 +1091,7 @@ impl parachains_paras::Config for Runtime { type UnsignedPriority = ParasUnsignedPriority; type QueueFootprinter = ParaInclusion; type NextSessionRotation = Babe; + type OnNewHead = Registrar; } parameter_types! { @@ -1503,6 +1504,19 @@ pub mod migrations { type PalletName = TipsPalletName; } + pub struct ParachainsToUnlock; + impl Contains for ParachainsToUnlock { + fn contains(id: &ParaId) -> bool { + let id: u32 = (*id).into(); + // polkadot parachains/parathreads that are locked and never produced block + match id { + 2003 | 2015 | 2017 | 2018 | 2025 | 2028 | 2036 | 2038 | 2053 | 2055 | 2090 | + 2097 | 2106 | 3336 | 3338 | 3342 => true, + _ => false, + } + } + } + /// Unreleased migrations. Add new ones here: pub type Unreleased = ( pallet_im_online::migration::v1::Migration, @@ -1525,6 +1539,8 @@ pub mod migrations { frame_support::migrations::RemovePallet::DbWeight>, parachains_configuration::migration::v9::MigrateToV9, + // Migrate parachain info format + paras_registrar::migration::VersionCheckedMigrateToV1, ); } diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 6af9407a5879..a181250cfa37 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -84,7 +84,7 @@ frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-fea frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } hex-literal = { version = "0.4.1" } -runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false, features=["experimental"] } +runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false, features = ["experimental"] } runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parachains", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } polkadot-parachain-primitives = { path = "../../parachain", default-features = false } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index a80f45c340d9..e043852901f1 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1031,6 +1031,7 @@ impl parachains_paras::Config for Runtime { type UnsignedPriority = ParasUnsignedPriority; type QueueFootprinter = ParaInclusion; type NextSessionRotation = Babe; + type OnNewHead = Registrar; } parameter_types! { @@ -1550,6 +1551,7 @@ pub mod migrations { parachains_scheduler::migration::v1::MigrateToV1, parachains_configuration::migration::v8::MigrateToV8, parachains_configuration::migration::v9::MigrateToV9, + paras_registrar::migration::VersionCheckedMigrateToV1, ); } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index b2397299430d..94852ad39f5a 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -540,6 +540,7 @@ impl parachains_paras::Config for Runtime { type UnsignedPriority = ParasUnsignedPriority; type QueueFootprinter = ParaInclusion; type NextSessionRotation = Babe; + type OnNewHead = (); } impl parachains_dmp::Config for Runtime {} diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 5a47288297be..de561c8ac68b 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -95,7 +95,7 @@ pallet-offences-benchmarking = { path = "../../../substrate/frame/offences/bench pallet-session-benchmarking = { path = "../../../substrate/frame/session/benchmarking", default-features = false, optional = true } hex-literal = { version = "0.4.1", optional = true } -runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false, features=["experimental"] } +runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false, features = ["experimental"] } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } polkadot-parachain-primitives = { path = "../../parachain", default-features = false } runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parachains", default-features = false } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 73aa4980151e..7dfc781d2467 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1044,6 +1044,7 @@ impl parachains_paras::Config for Runtime { type UnsignedPriority = ParasUnsignedPriority; type QueueFootprinter = ParaInclusion; type NextSessionRotation = Babe; + type OnNewHead = (); } parameter_types! { @@ -1425,6 +1426,7 @@ pub mod migrations { parachains_configuration::migration::v8::MigrateToV8, UpgradeSessionKeys, parachains_configuration::migration::v9::MigrateToV9, + paras_registrar::migration::VersionCheckedMigrateToV1, ); } From cd901764a52edc04a6d22bea3a526def593ab2a7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Sep 2023 12:30:54 +0300 Subject: [PATCH 19/28] Bump enumn from 0.1.11 to 0.1.12 (#1412) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [enumn](https://github.com/dtolnay/enumn) from 0.1.11 to 0.1.12. - [Release notes](https://github.com/dtolnay/enumn/releases) - [Commits](https://github.com/dtolnay/enumn/compare/0.1.11...0.1.12) --- updated-dependencies: - dependency-name: enumn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Bastian Köcher --- Cargo.lock | 4 ++-- polkadot/runtime/common/slot_range_helper/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 918426ce5c0f..0d08c175bde5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4733,9 +4733,9 @@ dependencies = [ [[package]] name = "enumn" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b893c4eb2dc092c811165f84dc7447fae16fb66521717968c34c509b39b1a5c5" +checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ "proc-macro2", "quote", diff --git a/polkadot/runtime/common/slot_range_helper/Cargo.toml b/polkadot/runtime/common/slot_range_helper/Cargo.toml index 30d5dc84e9dc..f65717519d5e 100644 --- a/polkadot/runtime/common/slot_range_helper/Cargo.toml +++ b/polkadot/runtime/common/slot_range_helper/Cargo.toml @@ -7,7 +7,7 @@ license.workspace = true [dependencies] paste = "1.0" -enumn = "0.1.8" +enumn = "0.1.12" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sp-std = { package = "sp-std", path = "../../../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } From dc28df0b278f2ca0f69d14363e08668a9ecc2fac Mon Sep 17 00:00:00 2001 From: Cem Eliguzel Date: Wed, 6 Sep 2023 13:02:33 +0300 Subject: [PATCH 20/28] Fix the wasm runtime substitute caching bug (#1416) --- substrate/client/service/src/client/wasm_substitutes.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/client/service/src/client/wasm_substitutes.rs b/substrate/client/service/src/client/wasm_substitutes.rs index a792ab87e771..70db0ef20f5a 100644 --- a/substrate/client/service/src/client/wasm_substitutes.rs +++ b/substrate/client/service/src/client/wasm_substitutes.rs @@ -126,7 +126,7 @@ where let runtime_code = RuntimeCode { code_fetcher: &WrappedRuntimeCode((&code).into()), heap_pages: None, - hash: Vec::new(), + hash: make_hash(&code), }; let version = Self::runtime_version(&executor, &runtime_code)?; let spec_version = version.spec_version; From 50de035f6d33b7233fd263892dd263d52b997f5c Mon Sep 17 00:00:00 2001 From: Chevdor Date: Wed, 6 Sep 2023 13:58:53 +0200 Subject: [PATCH 21/28] Fix PRdoc check (#1419) --- .github/workflows/check-prdoc.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index e677ded8d6cc..219952fdbfba 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -47,5 +47,5 @@ jobs: - name: PRdoc check for PR#${{ github.event.pull_request.number }} if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} run: | - echo "Checking for PR#$GITHUB_PR in $MOUNT" - $ENGINE run --rm -v $PWD/prdoc:/doc $IMAGE check -n 1408 + echo "Checking for PR#${GITHUB_PR} in $MOUNT" + $ENGINE run --rm -v $PWD/prdoc:/doc $IMAGE check -n ${GITHUB_PR} From 68ab943c65a7d214d62c3b7e0fd94561dd9e64b2 Mon Sep 17 00:00:00 2001 From: Juan Date: Wed, 6 Sep 2023 15:06:47 +0200 Subject: [PATCH 22/28] Remove deprecated `pallet_balances`'s `set_balance_deprecated` and `transfer` dispatchables (#1226) * remove deprecated dispatchables * update test * update tests * update tests * add prdocs * add prdoc * Update docs/prdoc/pr_1226.prdoc Co-authored-by: Chevdor * move prdoc file --------- Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Keith Yeung Co-authored-by: Chevdor --- prdoc/pr_1226.prdoc | 17 +++++ substrate/frame/asset-conversion/src/tests.rs | 6 +- substrate/frame/balances/src/lib.rs | 63 ------------------- substrate/frame/safe-mode/src/mock.rs | 5 +- substrate/frame/safe-mode/src/tests.rs | 2 +- substrate/frame/tx-pause/src/mock.rs | 5 +- substrate/frame/tx-pause/src/tests.rs | 18 +++--- 7 files changed, 40 insertions(+), 76 deletions(-) create mode 100644 prdoc/pr_1226.prdoc diff --git a/prdoc/pr_1226.prdoc b/prdoc/pr_1226.prdoc new file mode 100644 index 000000000000..df7a425b5384 --- /dev/null +++ b/prdoc/pr_1226.prdoc @@ -0,0 +1,17 @@ +title: Removed deprecated `Balances::transfer` and `Balances::set_balance_deprecated` functions. + +doc: + - audience: Builder + description: The Balances pallet's dispatchables `set_balance_deprecated` and `transfer` were deprecated in [paritytech/substrate#12951](https://github.com/paritytech/substrate/pull/12951) and have now been removed. + notes: + - Use `set_balance_deprecated` instead `force_set_balance` and `transfer_allow_death` instead of `transfer`. + +migrations: + db: [] + + runtime: [] + +crates: + - name: pallet-balances + +host_functions: [] diff --git a/substrate/frame/asset-conversion/src/tests.rs b/substrate/frame/asset-conversion/src/tests.rs index 190e4fb62147..3af7500a6f30 100644 --- a/substrate/frame/asset-conversion/src/tests.rs +++ b/substrate/frame/asset-conversion/src/tests.rs @@ -1389,7 +1389,11 @@ fn cannot_block_pool_creation() { let pool_account = AssetConversion::get_pool_account(&AssetConversion::get_pool_id(token_2, token_1)); // And transfers the ED to that pool account - assert_ok!(Balances::transfer(RuntimeOrigin::signed(attacker), pool_account, ed)); + assert_ok!(Balances::transfer_allow_death( + RuntimeOrigin::signed(attacker), + pool_account, + ed + )); // Then, the attacker creates 14 tokens and sends one of each to the pool account for i in 10..25 { create_tokens(attacker, vec![NativeOrAssetId::Asset(i)]); diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index f94b3230b917..5da6600d8796 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -563,53 +563,6 @@ pub mod pallet { Ok(()) } - /// Set the regular balance of a given account; it also takes a reserved balance but this - /// must be the same as the account's current reserved balance. - /// - /// The dispatch origin for this call is `root`. - /// - /// WARNING: This call is DEPRECATED! Use `force_set_balance` instead. - #[pallet::call_index(1)] - #[pallet::weight( - T::WeightInfo::force_set_balance_creating() // Creates a new account. - .max(T::WeightInfo::force_set_balance_killing()) // Kills an existing account. - )] - pub fn set_balance_deprecated( - origin: OriginFor, - who: AccountIdLookupOf, - #[pallet::compact] new_free: T::Balance, - #[pallet::compact] old_reserved: T::Balance, - ) -> DispatchResult { - ensure_root(origin)?; - let who = T::Lookup::lookup(who)?; - let existential_deposit = Self::ed(); - - let wipeout = new_free < existential_deposit; - let new_free = if wipeout { Zero::zero() } else { new_free }; - - // First we try to modify the account's balance to the forced balance. - let old_free = Self::try_mutate_account_handling_dust( - &who, - |account, _is_new| -> Result { - let old_free = account.free; - ensure!(account.reserved == old_reserved, TokenError::Unsupported); - account.free = new_free; - Ok(old_free) - }, - )?; - - // This will adjust the total issuance, which was not done by the `mutate_account` - // above. - if new_free > old_free { - mem::drop(PositiveImbalance::::new(new_free - old_free)); - } else if new_free < old_free { - mem::drop(NegativeImbalance::::new(old_free - new_free)); - } - - Self::deposit_event(Event::BalanceSet { who, free: new_free }); - Ok(()) - } - /// Exactly as `transfer_allow_death`, except the origin must be root and the source account /// may be specified. #[pallet::call_index(2)] @@ -730,22 +683,6 @@ pub mod pallet { } } - /// Alias for `transfer_allow_death`, provided only for name-wise compatibility. - /// - /// WARNING: DEPRECATED! Will be released in approximately 3 months. - #[pallet::call_index(7)] - #[pallet::weight(T::WeightInfo::transfer_allow_death())] - pub fn transfer( - origin: OriginFor, - dest: AccountIdLookupOf, - #[pallet::compact] value: T::Balance, - ) -> DispatchResult { - let source = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - >::transfer(&source, &dest, value, Expendable)?; - Ok(()) - } - /// Set the regular balance of a given account. /// /// The dispatch origin for this call is `root`. diff --git a/substrate/frame/safe-mode/src/mock.rs b/substrate/frame/safe-mode/src/mock.rs index 337b6076f84b..635ee0cfedc0 100644 --- a/substrate/frame/safe-mode/src/mock.rs +++ b/substrate/frame/safe-mode/src/mock.rs @@ -122,7 +122,10 @@ impl InstanceFilter for ProxyType { match self { ProxyType::Any => true, ProxyType::JustTransfer => { - matches!(c, RuntimeCall::Balances(pallet_balances::Call::transfer { .. })) + matches!( + c, + RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { .. }) + ) }, ProxyType::JustUtility => matches!(c, RuntimeCall::Utility { .. }), } diff --git a/substrate/frame/safe-mode/src/tests.rs b/substrate/frame/safe-mode/src/tests.rs index 1e2eb343aa2f..ca1d7eb1d934 100644 --- a/substrate/frame/safe-mode/src/tests.rs +++ b/substrate/frame/safe-mode/src/tests.rs @@ -605,7 +605,7 @@ fn fails_when_explicit_origin_required() { } fn call_transfer() -> RuntimeCall { - RuntimeCall::Balances(pallet_balances::Call::transfer { dest: 1, value: 1 }) + RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: 1, value: 1 }) } fn signed(who: u64) -> RuntimeOrigin { diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs index 706b0a558ba7..60c5fc1eced5 100644 --- a/substrate/frame/tx-pause/src/mock.rs +++ b/substrate/frame/tx-pause/src/mock.rs @@ -120,7 +120,10 @@ impl InstanceFilter for ProxyType { match self { ProxyType::Any => true, ProxyType::JustTransfer => { - matches!(c, RuntimeCall::Balances(pallet_balances::Call::transfer { .. })) + matches!( + c, + RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { .. }) + ) }, ProxyType::JustUtility => matches!(c, RuntimeCall::Utility { .. }), } diff --git a/substrate/frame/tx-pause/src/tests.rs b/substrate/frame/tx-pause/src/tests.rs index 48b70f71ccb0..a71ff3439d90 100644 --- a/substrate/frame/tx-pause/src/tests.rs +++ b/substrate/frame/tx-pause/src/tests.rs @@ -32,7 +32,7 @@ fn can_pause_specific_call() { assert_ok!(TxPause::pause( RuntimeOrigin::signed(mock::PauseOrigin::get()), - full_name::(b"Balances", b"transfer") + full_name::(b"Balances", b"transfer_allow_death") )); assert_err!( @@ -69,7 +69,7 @@ fn can_unpause_specific_call() { new_test_ext().execute_with(|| { assert_ok!(TxPause::pause( RuntimeOrigin::signed(mock::PauseOrigin::get()), - full_name::(b"Balances", b"transfer"), + full_name::(b"Balances", b"transfer_allow_death"), )); assert_err!( call_transfer(2, 1).dispatch(RuntimeOrigin::signed(2)), @@ -78,7 +78,7 @@ fn can_unpause_specific_call() { assert_ok!(TxPause::unpause( RuntimeOrigin::signed(mock::UnpauseOrigin::get()), - full_name::(b"Balances", b"transfer"), + full_name::(b"Balances", b"transfer_allow_death"), )); assert_ok!(call_transfer(4, 1).dispatch(RuntimeOrigin::signed(0))); }); @@ -92,7 +92,7 @@ fn can_filter_balance_in_batch_when_paused() { assert_ok!(TxPause::pause( RuntimeOrigin::signed(mock::PauseOrigin::get()), - full_name::(b"Balances", b"transfer"), + full_name::(b"Balances", b"transfer_allow_death"), )); assert_ok!(batch_call.clone().dispatch(RuntimeOrigin::signed(0))); @@ -111,7 +111,7 @@ fn can_filter_balance_in_proxy_when_paused() { new_test_ext().execute_with(|| { assert_ok!(TxPause::pause( RuntimeOrigin::signed(mock::PauseOrigin::get()), - full_name::(b"Balances", b"transfer"), + full_name::(b"Balances", b"transfer_allow_death"), )); assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::JustTransfer, 0)); @@ -152,7 +152,7 @@ fn fails_to_pause_unpausable_call_when_other_call_is_paused() { assert_ok!(TxPause::pause( RuntimeOrigin::signed(mock::PauseOrigin::get()), - full_name::(b"Balances", b"transfer"), + full_name::(b"Balances", b"transfer_allow_death"), )); assert_ok!(call_transfer_keep_alive(3, 1).dispatch(RuntimeOrigin::signed(3))); @@ -181,13 +181,13 @@ fn fails_to_pause_already_paused_pallet() { new_test_ext().execute_with(|| { assert_ok!(TxPause::pause( RuntimeOrigin::signed(mock::PauseOrigin::get()), - full_name::(b"Balances", b"transfer"), + full_name::(b"Balances", b"transfer_allow_death"), )); assert_noop!( TxPause::pause( RuntimeOrigin::signed(mock::PauseOrigin::get()), - full_name::(b"Balances", b"transfer"), + full_name::(b"Balances", b"transfer_allow_death"), ), Error::::IsPaused ); @@ -208,7 +208,7 @@ fn fails_to_unpause_not_paused_pallet() { } pub fn call_transfer(dest: u64, value: u64) -> RuntimeCall { - RuntimeCall::Balances(pallet_balances::Call::transfer { dest, value }) + RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest, value }) } pub fn call_transfer_keep_alive(dest: u64, value: u64) -> RuntimeCall { From 4c077b209b17fbc0d8fa1468feb2015f7505f060 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Wed, 6 Sep 2023 15:00:53 +0100 Subject: [PATCH 23/28] pallet asset-conversion additional quote tests (#1371) * added identity quote test (only possible if fees are not included in quote) * add tests that compare quoted price to actual execution --- substrate/frame/asset-conversion/src/tests.rs | 261 ++++++++++++++++++ 1 file changed, 261 insertions(+) diff --git a/substrate/frame/asset-conversion/src/tests.rs b/substrate/frame/asset-conversion/src/tests.rs index 3af7500a6f30..1c1267ab87b3 100644 --- a/substrate/frame/asset-conversion/src/tests.rs +++ b/substrate/frame/asset-conversion/src/tests.rs @@ -569,6 +569,16 @@ fn can_quote_price() { ), Some(60) ); + // including fee so should get less out... + assert_eq!( + AssetConversion::quote_price_exact_tokens_for_tokens( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(2), + 3000, + true, + ), + Some(46) + ); // Check it still gives same price: // (if the above accidentally exchanged then it would not give same quote as before) assert_eq!( @@ -580,6 +590,16 @@ fn can_quote_price() { ), Some(60) ); + // including fee so should get less out... + assert_eq!( + AssetConversion::quote_price_exact_tokens_for_tokens( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(2), + 3000, + true, + ), + Some(46) + ); // Check inverse: assert_eq!( @@ -591,6 +611,247 @@ fn can_quote_price() { ), Some(3000) ); + // including fee so should get less out... + assert_eq!( + AssetConversion::quote_price_exact_tokens_for_tokens( + NativeOrAssetId::Asset(2), + NativeOrAssetId::Native, + 60, + true, + ), + Some(2302) + ); + + // + // same tests as above but for quote_price_tokens_for_exact_tokens: + // + assert_eq!( + AssetConversion::quote_price_tokens_for_exact_tokens( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(2), + 60, + false, + ), + Some(3000) + ); + // including fee so should need to put more in... + assert_eq!( + AssetConversion::quote_price_tokens_for_exact_tokens( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(2), + 60, + true, + ), + Some(4299) + ); + // Check it still gives same price: + // (if the above accidentally exchanged then it would not give same quote as before) + assert_eq!( + AssetConversion::quote_price_tokens_for_exact_tokens( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(2), + 60, + false, + ), + Some(3000) + ); + // including fee so should need to put more in... + assert_eq!( + AssetConversion::quote_price_tokens_for_exact_tokens( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(2), + 60, + true, + ), + Some(4299) + ); + + // Check inverse: + assert_eq!( + AssetConversion::quote_price_tokens_for_exact_tokens( + NativeOrAssetId::Asset(2), + NativeOrAssetId::Native, + 3000, + false, + ), + Some(60) + ); + // including fee so should need to put more in... + assert_eq!( + AssetConversion::quote_price_tokens_for_exact_tokens( + NativeOrAssetId::Asset(2), + NativeOrAssetId::Native, + 3000, + true, + ), + Some(86) + ); + + // + // roundtrip: Without fees one should get the original number + // + let amount_in = 100; + + assert_eq!( + AssetConversion::quote_price_exact_tokens_for_tokens( + NativeOrAssetId::Asset(2), + NativeOrAssetId::Native, + amount_in, + false, + ) + .and_then(|amount| AssetConversion::quote_price_exact_tokens_for_tokens( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(2), + amount, + false, + )), + Some(amount_in) + ); + assert_eq!( + AssetConversion::quote_price_exact_tokens_for_tokens( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(2), + amount_in, + false, + ) + .and_then(|amount| AssetConversion::quote_price_exact_tokens_for_tokens( + NativeOrAssetId::Asset(2), + NativeOrAssetId::Native, + amount, + false, + )), + Some(amount_in) + ); + + assert_eq!( + AssetConversion::quote_price_tokens_for_exact_tokens( + NativeOrAssetId::Asset(2), + NativeOrAssetId::Native, + amount_in, + false, + ) + .and_then(|amount| AssetConversion::quote_price_tokens_for_exact_tokens( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(2), + amount, + false, + )), + Some(amount_in) + ); + assert_eq!( + AssetConversion::quote_price_tokens_for_exact_tokens( + NativeOrAssetId::Native, + NativeOrAssetId::Asset(2), + amount_in, + false, + ) + .and_then(|amount| AssetConversion::quote_price_tokens_for_exact_tokens( + NativeOrAssetId::Asset(2), + NativeOrAssetId::Native, + amount, + false, + )), + Some(amount_in) + ); + }); +} + +#[test] +fn quote_price_exact_tokens_for_tokens_matches_execution() { + new_test_ext().execute_with(|| { + let user = 1; + let user2 = 2; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + + create_tokens(user, vec![token_2]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 100000)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + 10000, + 200, + 1, + 1, + user, + )); + + let amount = 1; + let quoted_price = 49; + assert_eq!( + AssetConversion::quote_price_exact_tokens_for_tokens(token_2, token_1, amount, true,), + Some(quoted_price) + ); + + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user2, amount)); + let prior_dot_balance = 20000; + assert_eq!(prior_dot_balance, balance(user2, token_1)); + assert_ok!(AssetConversion::swap_exact_tokens_for_tokens( + RuntimeOrigin::signed(user2), + bvec![token_2, token_1], + amount, + 1, + user2, + false, + )); + + assert_eq!(prior_dot_balance + quoted_price, balance(user2, token_1)); + }); +} + +#[test] +fn quote_price_tokens_for_exact_tokens_matches_execution() { + new_test_ext().execute_with(|| { + let user = 1; + let user2 = 2; + let token_1 = NativeOrAssetId::Native; + let token_2 = NativeOrAssetId::Asset(2); + + create_tokens(user, vec![token_2]); + assert_ok!(AssetConversion::create_pool(RuntimeOrigin::signed(user), token_1, token_2)); + + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 100000)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + token_1, + token_2, + 10000, + 200, + 1, + 1, + user, + )); + + let amount = 49; + let quoted_price = 1; + assert_eq!( + AssetConversion::quote_price_tokens_for_exact_tokens(token_2, token_1, amount, true,), + Some(quoted_price) + ); + + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user2, amount)); + let prior_dot_balance = 20000; + assert_eq!(prior_dot_balance, balance(user2, token_1)); + let prior_asset_balance = 49; + assert_eq!(prior_asset_balance, balance(user2, token_2)); + assert_ok!(AssetConversion::swap_tokens_for_exact_tokens( + RuntimeOrigin::signed(user2), + bvec![token_2, token_1], + amount, + 1, + user2, + false, + )); + + assert_eq!(prior_dot_balance + amount, balance(user2, token_1)); + assert_eq!(prior_asset_balance - quoted_price, balance(user2, token_2)); }); } From eeb368ed9ca3bc2af0ccf9dfddb985ddef8f4233 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Wed, 6 Sep 2023 16:11:10 +0200 Subject: [PATCH 24/28] GHW for building and publishing docker images (#1391) * add ghw and scripts for docker image deployment * debug * add permissions for content * fix path to the bin folder * add tags * rename env * fix path to docker file * make polkadot-parachain executable * fix typo * fix more typos * test * revert back use of working directory * mke bin executable in the artifacts folder * use cd instead of working directory * change path to cash * fix path to cash * change cache key * delete old flows * addressed PR comments * fix path * reorg docker files --- {.gitlab => .github/scripts}/common/lib.sh | 73 ++++++++- .../workflows/release-50_publish-docker.yml | 148 +++++++++--------- .gitlab/pipeline/build.yml | 2 +- .gitlab/pipeline/publish.yml | 18 +-- .gitlab/test_deterministic_wasm.sh | 2 +- cumulus/docker/parachain-registrar.dockerfile | 27 ---- ...rachain-debug_unsigned_injected.Dockerfile | 49 ------ .../polkadot-parachain_builder.Containerfile | 36 ----- .../docker/test-parachain-collator.dockerfile | 46 ------ .../docker/test-parachain_injected.Dockerfile | 49 ------ docker/docker-compose.yml | 129 --------------- docker/dockerfiles/binary_injected.Dockerfile | 48 ++++++ .../collator_injected.Dockerfile | 2 +- .../dockerfiles}/docker-compose.yml | 8 +- .../malus_injected.Dockerfile | 0 .../parachain-registrar.dockerfile | 2 +- ...rachain-debug_unsigned_injected.Dockerfile | 2 +- .../polkadot-parachain_builder.Containerfile | 4 +- .../polkadot-parachain_injected.Dockerfile | 10 +- docker/dockerfiles/polkadot/README.md | 9 ++ .../polkadot/docker-compose-local.yml | 50 ++++++ .../dockerfiles/polkadot/docker-compose.yml | 22 +++ .../polkadot/polkadot_Dockerfile.README.md | 7 + .../polkadot/polkadot_builder.Dockerfile | 36 +++++ .../polkadot_injected_debian.Dockerfile | 53 +++++++ .../polkadot_injected_debug.Dockerfile | 2 +- .../polkadot_injected_release.Dockerfile | 2 +- .../staking-miner_builder.Dockerfile | 0 .../staking-miner_injected.Dockerfile | 0 .../substrate_injected.Dockerfile | 0 .../test-parachain-collator.dockerfile | 2 +- .../test-parachain_injected.Dockerfile | 2 +- docker/injected.Dockerfile | 51 ------ .../scripts/adder-collator/build-injected.sh | 13 ++ docker/scripts/adder-collator/test-build.sh | 23 +++ docker/scripts/build-injected.sh | 100 ++++++++++++ docker/scripts/entrypoint.sh | 18 +++ docker/scripts/malus/build-injected.sh | 14 ++ docker/scripts/malus/test-build.sh | 19 +++ .../polkadot-parachain/build-injected.sh | 15 ++ .../scripts/polkadot-parachain/test-build.sh | 19 +++ ...polkadot-parachain_build-injected-image.sh | 2 +- docker/scripts/polkadot/build-injected.sh | 13 ++ docker/scripts/polkadot/test-build.sh | 18 +++ docker/scripts/staking-miner/README.md | 37 +++++ .../scripts/staking-miner/build-injected.sh | 13 ++ docker/scripts/staking-miner/build.sh | 13 ++ .../staking-miner_Dockerfile.README.md | 3 + .../staking-miner_builder.Dockerfile | 43 +++++ docker/scripts/staking-miner/test-build.sh | 18 +++ {cumulus/docs => docs}/container.md | 2 +- {polkadot/doc => docs}/docker.md | 2 +- .../workflows/release-40_publish-rc-image.yml | 132 ---------------- .../release-51_publish-docker-manual.yml | 51 ------ polkadot/utils/staking-miner/README.md | 2 +- 55 files changed, 784 insertions(+), 677 deletions(-) rename {.gitlab => .github/scripts}/common/lib.sh (76%) rename {cumulus/.github => .github}/workflows/release-50_publish-docker.yml (59%) delete mode 100644 cumulus/docker/parachain-registrar.dockerfile delete mode 100644 cumulus/docker/polkadot-parachain-debug_unsigned_injected.Dockerfile delete mode 100644 cumulus/docker/polkadot-parachain_builder.Containerfile delete mode 100644 cumulus/docker/test-parachain-collator.dockerfile delete mode 100644 cumulus/docker/test-parachain_injected.Dockerfile delete mode 100644 docker/docker-compose.yml create mode 100644 docker/dockerfiles/binary_injected.Dockerfile rename docker/{ => dockerfiles}/collator_injected.Dockerfile (95%) rename {cumulus/docker => docker/dockerfiles}/docker-compose.yml (89%) rename docker/{ => dockerfiles}/malus_injected.Dockerfile (100%) rename docker/{ => dockerfiles}/parachain-registrar.dockerfile (89%) rename docker/{ => dockerfiles/polkadot-parachain}/polkadot-parachain-debug_unsigned_injected.Dockerfile (93%) rename docker/{ => dockerfiles/polkadot-parachain}/polkadot-parachain_builder.Containerfile (89%) rename cumulus/docker/injected.Dockerfile => docker/dockerfiles/polkadot-parachain/polkadot-parachain_injected.Dockerfile (68%) create mode 100644 docker/dockerfiles/polkadot/README.md create mode 100644 docker/dockerfiles/polkadot/docker-compose-local.yml create mode 100644 docker/dockerfiles/polkadot/docker-compose.yml create mode 100644 docker/dockerfiles/polkadot/polkadot_Dockerfile.README.md create mode 100644 docker/dockerfiles/polkadot/polkadot_builder.Dockerfile create mode 100644 docker/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile rename docker/{ => dockerfiles/polkadot}/polkadot_injected_debug.Dockerfile (94%) rename docker/{ => dockerfiles/polkadot}/polkadot_injected_release.Dockerfile (95%) rename docker/{ => dockerfiles}/staking-miner/staking-miner_builder.Dockerfile (100%) rename docker/{ => dockerfiles}/staking-miner/staking-miner_injected.Dockerfile (100%) rename docker/{ => dockerfiles}/substrate_injected.Dockerfile (100%) rename docker/{ => dockerfiles}/test-parachain-collator.dockerfile (96%) rename docker/{ => dockerfiles}/test-parachain_injected.Dockerfile (95%) delete mode 100644 docker/injected.Dockerfile create mode 100755 docker/scripts/adder-collator/build-injected.sh create mode 100755 docker/scripts/adder-collator/test-build.sh create mode 100755 docker/scripts/build-injected.sh create mode 100755 docker/scripts/entrypoint.sh create mode 100755 docker/scripts/malus/build-injected.sh create mode 100755 docker/scripts/malus/test-build.sh create mode 100755 docker/scripts/polkadot-parachain/build-injected.sh create mode 100755 docker/scripts/polkadot-parachain/test-build.sh rename cumulus/docker/scripts/build-injected-image.sh => docker/scripts/polkadot-parachain_build-injected-image.sh (70%) create mode 100755 docker/scripts/polkadot/build-injected.sh create mode 100755 docker/scripts/polkadot/test-build.sh create mode 100644 docker/scripts/staking-miner/README.md create mode 100755 docker/scripts/staking-miner/build-injected.sh create mode 100755 docker/scripts/staking-miner/build.sh create mode 100644 docker/scripts/staking-miner/staking-miner_Dockerfile.README.md create mode 100644 docker/scripts/staking-miner/staking-miner_builder.Dockerfile create mode 100755 docker/scripts/staking-miner/test-build.sh rename {cumulus/docs => docs}/container.md (96%) rename {polkadot/doc => docs}/docker.md (98%) delete mode 100644 polkadot/.github/workflows/release-40_publish-rc-image.yml delete mode 100644 polkadot/.github/workflows/release-51_publish-docker-manual.yml diff --git a/.gitlab/common/lib.sh b/.github/scripts/common/lib.sh similarity index 76% rename from .gitlab/common/lib.sh rename to .github/scripts/common/lib.sh index ba5b17148728..b0f9cb32063a 100755 --- a/.gitlab/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -96,7 +96,7 @@ structure_message() { # access_token: see https://matrix.org/docs/guides/client-server-api/ # Usage: send_message $body (json formatted) $room_id $access_token send_message() { -curl -XPOST -d "$1" "https://matrix.parity.io/_matrix/client/r0/rooms/$2/send/m.room.message?access_token=$3" + curl -XPOST -d "$1" "https://m.parity.io/_matrix/client/r0/rooms/$2/send/m.room.message?access_token=$3" } # Pretty-printing functions @@ -193,3 +193,74 @@ check_bootnode(){ echo " Bootnode appears unreachable" return 1 } + +# Assumes the ENV are set: +# - RELEASE_ID +# - GITHUB_TOKEN +# - REPO in the form paritytech/polkadot +fetch_release_artifacts() { + echo "Release ID : $RELEASE_ID" + echo "Repo : $REPO" + echo "Binary : $BINARY" + + curl -L -s \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/${REPO}/releases/${RELEASE_ID} > release.json + + # Get Asset ids + ids=($(jq -r '.assets[].id' < release.json )) + count=$(jq '.assets|length' < release.json ) + + # Fetch artifacts + mkdir -p "./release-artifacts/${BINARY}" + pushd "./release-artifacts/${BINARY}" > /dev/null + + iter=1 + for id in "${ids[@]}" + do + echo " - $iter/$count: downloading asset id: $id..." + curl -s -OJ -L -H "Accept: application/octet-stream" \ + -H "Authorization: Token ${GITHUB_TOKEN}" \ + "https://api.github.com/repos/${REPO}/releases/assets/$id" + iter=$((iter + 1)) + done + + pwd + ls -al --color + popd > /dev/null +} + +# Check the checksum for a given binary +function check_sha256() { + echo "Checking SHA256 for $1" + shasum -qc $1.sha256 +} + +# Import GPG keys of the release team members +# This is done in parallel as it can take a while sometimes +function import_gpg_keys() { + GPG_KEYSERVER=${GPG_KEYSERVER:-"keyserver.ubuntu.com"} + SEC="9D4B2B6EB8F97156D19669A9FF0812D491B96798" + WILL="2835EAF92072BC01D188AF2C4A092B93E97CE1E2" + EGOR="E6FC4D4782EB0FA64A4903CCDB7D3555DD3932D3" + MARA="533C920F40E73A21EEB7E9EBF27AEA7E7594C9CF" + MORGAN="2E92A9D8B15D7891363D1AE8AF9E6C43F7F8C4CF" + + echo "Importing GPG keys from $GPG_KEYSERVER in parallel" + for key in $SEC $WILL $EGOR $MARA $MORGAN; do + ( + echo "Importing GPG key $key" + gpg --no-tty --quiet --keyserver $GPG_KEYSERVER --recv-keys $key + echo -e "5\ny\n" | gpg --no-tty --command-fd 0 --expert --edit-key $key trust; + ) & + done + wait +} + +# Check the GPG signature for a given binary +function check_gpg() { + echo "Checking GPG Signature for $1" + gpg --no-tty --verify -q $1.asc $1 +} diff --git a/cumulus/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml similarity index 59% rename from cumulus/.github/workflows/release-50_publish-docker.yml rename to .github/workflows/release-50_publish-docker.yml index 6ad943c3903c..0e466f26891c 100644 --- a/cumulus/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -7,9 +7,10 @@ name: Release - Publish Docker Image # image and publishes it. on: - release: - types: - - published + #TODO: activate automated run later + # release: + # types: + # - published workflow_dispatch: inputs: release_id: @@ -39,6 +40,18 @@ on: required: true type: string default: parity + binary: + description: Binary to be published + required: true + default: polkadot + type: choice + options: + - polkadot + - staking-miner + - polkadot-parachain + +permissions: + contents: write env: RELEASE_ID: ${{ inputs.release_id }} @@ -47,8 +60,8 @@ env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} DOCKER_OWNER: ${{ inputs.owner || github.repository_owner }} REPO: ${{ github.repository }} - BINARY: polkadot-parachain - EVENT_ACTION: ${{ github.event.action }} + BINARY: ${{ inputs.binary }} + # EVENT_ACTION: ${{ github.event.action }} EVENT_NAME: ${{ github.event_name }} IMAGE_TYPE: ${{ inputs.image_type }} @@ -58,59 +71,36 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - - - name: Prepare temp folder - run: | - TMP=$(mktemp -d) - echo "TMP=$TMP" >> "$GITHUB_ENV" - pwd - ls -al "$TMP" + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - - name: Fetch lib.sh from polkadot repo - working-directory: ${{ env.TMP }} - run: | - curl -O -L \ - -H "Accept: application/vnd.github.v3.raw" \ - https://raw.githubusercontent.com/paritytech/polkadot/master/scripts/ci/common/lib.sh - - chmod a+x lib.sh - ls -al - - - name: Fetch release artifacts based on final release tag + #TODO: this step will be needed when automated triggering will work #this step runs only if the workflow is triggered automatically when new release is published - if: ${{ env.EVENT_NAME == 'release' && env.EVENT_ACTION != '' && env.EVENT_ACTION == 'published' }} - run: | - mkdir -p release-artifacts && cd release-artifacts - - for f in $BINARY $BINARY.asc $BINARY.sha256; do - URL="https://github.com/${{ github.event.repository.full_name }}/releases/download/${{ github.event.release.tag_name }}/$f" - echo " - Fetching $f from $URL" - wget "$URL" -O "$f" - done - chmod a+x $BINARY - cp -f ${TMP}/lib.sh . - ls -al + # if: ${{ env.EVENT_NAME == 'release' && env.EVENT_ACTION != '' && env.EVENT_ACTION == 'published' }} + # run: | + # mkdir -p release-artifacts && cd release-artifacts + + # for f in $BINARY $BINARY.asc $BINARY.sha256; do + # URL="https://github.com/${{ github.event.repository.full_name }}/releases/download/${{ github.event.release.tag_name }}/$f" + # echo " - Fetching $f from $URL" + # wget "$URL" -O "$f" + # done + # chmod a+x $BINARY + # ls -al - name: Fetch rc artifacts or release artifacts based on release id #this step runs only if the workflow is triggered manually if: ${{ env.EVENT_NAME == 'workflow_dispatch' }} run: | - . ${TMP}/lib.sh + . ./.github/scripts/common/lib.sh fetch_release_artifacts - chmod a+x release-artifacts/$BINARY - ls -al - - cp -f ${TMP}/lib.sh release-artifacts/ - - name: Cache the artifacts uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 with: - key: artifacts-${{ github.sha }} + key: artifacts-${{ env.BINARY }}-${{ github.sha }} path: | - ./release-artifacts/**/* + ./release-artifacts/${{ env.BINARY }}/**/* build-container: runs-on: ubuntu-latest @@ -118,40 +108,31 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 - name: Get artifacts from cache uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 with: - key: artifacts-${{ github.sha }} + key: artifacts-${{ env.BINARY }}-${{ github.sha }} fail-on-cache-miss: true path: | - ./release-artifacts/**/* + ./release-artifacts/${{ env.BINARY }}/**/* - name: Check sha256 ${{ env.BINARY }} - working-directory: ./release-artifacts + working-directory: ./release-artifacts/${{ env.BINARY }} run: | - . ./lib.sh + . ../../.github/scripts/common/lib.sh echo "Checking binary $BINARY" check_sha256 $BINARY && echo "OK" || echo "ERR" - name: Check GPG ${{ env.BINARY }} - working-directory: ./release-artifacts + working-directory: ./release-artifacts/${{ env.BINARY }} run: | - . ./lib.sh + . ../../.github/scripts/common/lib.sh import_gpg_keys check_gpg $BINARY - - name: Build Injected Container image for ${{ env.BINARY }} - env: - IMAGE_NAME: ${{ env.BINARY }} - OWNER: ${{ env.DOCKER_OWNER }} - run: | - ls -al - echo "Building container for $BINARY" - ./docker/scripts/build-injected-image.sh - - name: Fetch rc commit and tag if: ${{ env.IMAGE_TYPE == 'rc' }} id: fetch_rc_refs @@ -167,14 +148,43 @@ jobs: echo "No tag, doing without" - name: Fetch release tags - if: ${{ env.IMAGE_TYPE == 'release' || env.EVENT_NAME == 'release' && env.EVENT_ACTION != '' && env.EVENT_ACTION == 'published' }} + working-directory: ./release-artifacts/${{ env.BINARY }} + if: ${{ env.IMAGE_TYPE == 'release'}} id: fetch_release_refs run: | - VERSION=$(docker run --pull never --rm $DOCKER_OWNER/$BINARY --version | awk '{ print $2 }' ) + chmod a+rx $BINARY + VERSION=$(./$BINARY --version | awk '{ print $2 }' ) release=$( echo $VERSION | cut -f1 -d- ) echo "tag=latest" >> $GITHUB_OUTPUT echo "release=${release}" >> $GITHUB_OUTPUT + - name: Build Injected Container image for polkadot/staking-miner + if: ${{ env.BINARY == 'polkadot' || env.BINARY == 'staking-miner' }} + env: + ARTIFACTS_FOLDER: ./release-artifacts + IMAGE_NAME: ${{ env.BINARY }} + OWNER: ${{ env.DOCKER_OWNER }} + TAGS: ${{ join(steps.fetch_rc_refs.outputs.*, ',') || join(steps.fetch_release_refs.outputs.*, ',') }} + run: | + ls -al + echo "Building container for $BINARY" + ./docker/scripts/build-injected.sh + + - name: Build Injected Container image for polkadot-parachain + if: ${{ env.BINARY == 'polkadot-parachain' }} + env: + ARTIFACTS_FOLDER: ./release-artifacts + IMAGE_NAME: ${{ env.BINARY }} + OWNER: ${{ env.DOCKER_OWNER }} + DOCKERFILE: docker/dockerfiles/polkadot-parachain/polkadot-parachain_injected.Dockerfile + TAGS: ${{ join(steps.fetch_rc_refs.outputs.*, ',') || join(steps.fetch_release_refs.outputs.*, ',') }} + run: | + ls -al + mkdir -p $ARTIFACTS_FOLDER/specs + cp cumulus/parachains/chain-specs/*.json $ARTIFACTS_FOLDER/specs + + echo "Building container for $BINARY" + ./docker/scripts/build-injected.sh - name: Login to Dockerhub uses: docker/login-action@v2 @@ -182,21 +192,11 @@ jobs: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Tag and Push Container image for ${{ env.BINARY }} + - name: Push Container image for ${{ env.BINARY }} id: docker_push - env: - TAGS: ${{ join(steps.fetch_rc_refs.outputs.*, ',') || join(steps.fetch_release_refs.outputs.*, ',') }} run: | - TAGS=${TAGS[@]:-latest} - IFS=',' read -r -a TAG_ARRAY <<< "$TAGS" - - echo "The image ${BINARY} will be tagged with ${TAG_ARRAY[*]}" - for TAG in "${TAG_ARRAY[@]}"; do - $ENGINE tag ${DOCKER_OWNER}/${BINARY} ${DOCKER_OWNER}/${BINARY}:${TAG} - $ENGINE push ${DOCKER_OWNER}/${BINARY}:${TAG} - done - $ENGINE images | grep ${BINARY} + $ENGINE push --all-tags ${REGISTRY}/${DOCKER_OWNER}/${BINARY} - name: Check version for the published image for ${{ env.BINARY }} env: diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 20fed5df3dfd..2d74187cadfa 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -328,7 +328,7 @@ build-linux-substrate: cut -d ' ' -f 2 | tee ./artifacts/substrate/VERSION; fi - sha256sum ./artifacts/substrate/substrate | tee ./artifacts/substrate/substrate.sha256 - - cp -r ./docker/substrate_injected.Dockerfile ./artifacts/substrate/ + - cp -r ./docker/dockerfiles/substrate_injected.Dockerfile ./artifacts/substrate/ # - printf '\n# building node-template\n\n' # - ./scripts/ci/node-template-release.sh ./artifacts/substrate/substrate-node-template.tar.gz diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index ed18082344f0..341d3ac2a862 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -35,7 +35,7 @@ build-push-image-polkadot-parachain-debug: - job: build-linux-stable-cumulus artifacts: true variables: - DOCKERFILE: "docker/polkadot-parachain-debug_unsigned_injected.Dockerfile" + DOCKERFILE: "docker/dockerfiles/polkadot-parachain/polkadot-parachain-debug_unsigned_injected.Dockerfile" IMAGE_NAME: "docker.io/paritypr/polkadot-parachain-debug" build-push-image-test-parachain: @@ -48,7 +48,7 @@ build-push-image-test-parachain: - job: build-test-parachain artifacts: true variables: - DOCKERFILE: "docker/test-parachain_injected.Dockerfile" + DOCKERFILE: "docker/dockerfiles/test-parachain_injected.Dockerfile" IMAGE_NAME: "docker.io/paritypr/test-parachain" # publish-s3: # stage: publish @@ -114,7 +114,7 @@ build-push-image-polkadot-debug: - job: build-linux-stable artifacts: true variables: - DOCKERFILE: "docker/polkadot_injected_debug.Dockerfile" + DOCKERFILE: "docker/dockerfiles/polkadot/polkadot_injected_debug.Dockerfile" IMAGE_NAME: "docker.io/paritypr/polkadot-debug" build-push-image-colander: @@ -127,7 +127,7 @@ build-push-image-colander: - job: build-test-collators artifacts: true variables: - DOCKERFILE: "docker/collator_injected.Dockerfile" + DOCKERFILE: "docker/dockerfiles/collator_injected.Dockerfile" IMAGE_NAME: "docker.io/paritypr/colander" build-push-image-malus: @@ -140,7 +140,7 @@ build-push-image-malus: - job: build-malus artifacts: true variables: - DOCKERFILE: "docker/malus_injected.Dockerfile" + DOCKERFILE: "docker/dockerfiles/malus_injected.Dockerfile" IMAGE_NAME: "docker.io/paritypr/malus" build-push-image-substrate-pr: @@ -153,7 +153,7 @@ build-push-image-substrate-pr: - job: build-linux-substrate artifacts: true variables: - DOCKERFILE: "docker/substrate_injected.Dockerfile" + DOCKERFILE: "docker/dockerfiles/substrate_injected.Dockerfile" IMAGE_NAME: "docker.io/paritypr/substrate" # old way @@ -201,7 +201,7 @@ build-push-image-substrate-pr: # GIT_STRATEGY: none # DOCKER_USER: ${PARITYPR_USER} # DOCKER_PASS: ${PARITYPR_PASS} -# # scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile +# # docker/dockerfiles/polkadot/polkadot_injected_debug.Dockerfile # DOCKERFILE: polkadot_injected_debug.Dockerfile # IMAGE_NAME: docker.io/paritypr/polkadot-debug # needs: @@ -230,7 +230,7 @@ build-push-image-substrate-pr: # GIT_STRATEGY: none # DOCKER_USER: ${PARITYPR_USER} # DOCKER_PASS: ${PARITYPR_PASS} -# # scripts/ci/dockerfiles/collator_injected.Dockerfile +# # docker/dockerfiles/collator_injected.Dockerfile # DOCKERFILE: collator_injected.Dockerfile # IMAGE_NAME: docker.io/paritypr/colander # needs: @@ -258,7 +258,7 @@ build-push-image-substrate-pr: # GIT_STRATEGY: none # DOCKER_USER: ${PARITYPR_USER} # DOCKER_PASS: ${PARITYPR_PASS} -# # scripts/ci/dockerfiles/malus_injected.Dockerfile +# # docker/dockerfiles/malus_injected.Dockerfile # DOCKERFILE: malus_injected.Dockerfile # IMAGE_NAME: docker.io/paritypr/malus # needs: diff --git a/.gitlab/test_deterministic_wasm.sh b/.gitlab/test_deterministic_wasm.sh index 4f1d2981ff2b..fac28fce1f64 100755 --- a/.gitlab/test_deterministic_wasm.sh +++ b/.gitlab/test_deterministic_wasm.sh @@ -2,7 +2,7 @@ set -e #shellcheck source=../common/lib.sh -source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/common/lib.sh" +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../.github/scripts/common/lib.sh" # build runtime WASM_BUILD_NO_COLOR=1 cargo build -q --locked --release -p staging-kusama-runtime -p polkadot-runtime -p westend-runtime diff --git a/cumulus/docker/parachain-registrar.dockerfile b/cumulus/docker/parachain-registrar.dockerfile deleted file mode 100644 index f7d77454a2b9..000000000000 --- a/cumulus/docker/parachain-registrar.dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM node:latest AS pjs - -# It would be great to depend on a more stable tag, but we need some -# as-yet-unreleased features. -RUN yarn global add @polkadot/api-cli@0.10.0-beta.14 - -ENTRYPOINT [ "polkadot-js-api" ] -CMD [ "--version" ] - -# To use the pjs build stage to access the blockchain from the host machine: -# -# docker build -f docker/parachain-registrar.dockerfile --target pjs -t parachain-registrar:pjs . -# alias pjs='docker run --rm --net cumulus_testing_net parachain-registrar:pjs --ws ws://172.28.1.1:9944' -# -# Then, as long as the chain is running, you can use the polkadot-js-api CLI like: -# -# pjs query.sudo.key - -FROM pjs -RUN apt-get update && apt-get install curl netcat -y && \ - curl -sSo /wait-for-it.sh https://raw.githubusercontent.com/vishnubob/wait-for-it/master/wait-for-it.sh && \ - chmod +x /wait-for-it.sh -# the only thing left to do is to actually run the transaction. -COPY ./docker/scripts/register_para.sh /usr/bin -# unset the previous stage's entrypoint -ENTRYPOINT [] -CMD [ "/usr/bin/register_para.sh" ] diff --git a/cumulus/docker/polkadot-parachain-debug_unsigned_injected.Dockerfile b/cumulus/docker/polkadot-parachain-debug_unsigned_injected.Dockerfile deleted file mode 100644 index a2e32049f5bb..000000000000 --- a/cumulus/docker/polkadot-parachain-debug_unsigned_injected.Dockerfile +++ /dev/null @@ -1,49 +0,0 @@ -FROM docker.io/library/ubuntu:20.04 - -# metadata -ARG VCS_REF -ARG BUILD_DATE -ARG IMAGE_NAME - -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="${IMAGE_NAME}" \ - io.parity.image.description="Cumulus, the Polkadot collator." \ - io.parity.image.source="https://github.com/paritytech/cumulus/blob/${VCS_REF}/scripts/docker/polkadot-parachain-debug_unsigned_injected.Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/cumulus/" - -# show backtraces -ENV RUST_BACKTRACE 1 - -# install tools and dependencies -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - libssl1.1 \ - ca-certificates \ - curl && \ - # apt cleanup - apt-get autoremove -y && \ - apt-get clean && \ - find /var/lib/apt/lists/ -type f -not -name lock -delete; \ - # add user and link ~/.local/share/polkadot-parachain to /data - useradd -m -u 10000 -U -s /bin/sh -d /polkadot-parachain polkadot-parachain && \ - mkdir -p /data /polkadot-parachain/.local/share && \ - chown -R polkadot-parachain:polkadot-parachain /data && \ - ln -s /data /polkadot-parachain/.local/share/polkadot-parachain && \ - mkdir -p /specs - -# add polkadot-parachain binary to the docker image -COPY ./artifacts/polkadot-parachain /usr/local/bin -COPY ./parachains/chain-specs/*.json /specs/ - -USER polkadot-parachain - -# check if executable works in this container -RUN /usr/local/bin/polkadot-parachain --version - -EXPOSE 30333 9933 9944 -VOLUME ["/polkadot-parachain"] - -ENTRYPOINT ["/usr/local/bin/polkadot-parachain"] diff --git a/cumulus/docker/polkadot-parachain_builder.Containerfile b/cumulus/docker/polkadot-parachain_builder.Containerfile deleted file mode 100644 index 159bcb323693..000000000000 --- a/cumulus/docker/polkadot-parachain_builder.Containerfile +++ /dev/null @@ -1,36 +0,0 @@ -# This file is sourced from https://github.com/paritytech/polkadot/blob/master/scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile -# This is the build stage for polkadot-parachain. Here we create the binary in a temporary image. -FROM docker.io/paritytech/ci-linux:production as builder - -WORKDIR /cumulus -COPY . /cumulus - -RUN cargo build --release --locked -p polkadot-parachain - -# This is the 2nd stage: a very small image where we copy the Polkadot binary." -FROM docker.io/library/ubuntu:20.04 - -LABEL io.parity.image.type="builder" \ - io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.description="Multistage Docker image for polkadot-parachain" \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot/polkadot-parachain_builder.Dockerfile" \ - io.parity.image.documentation="https://github.com/paritytech/cumulus" - -COPY --from=builder /cumulus/target/release/polkadot-parachain /usr/local/bin - -RUN useradd -m -u 1000 -U -s /bin/sh -d /cumulus polkadot-parachain && \ - mkdir -p /data /cumulus/.local/share && \ - chown -R polkadot-parachain:polkadot-parachain /data && \ - ln -s /data /cumulus/.local/share/polkadot-parachain && \ -# unclutter and minimize the attack surface - rm -rf /usr/bin /usr/sbin && \ -# check if executable works in this container - /usr/local/bin/polkadot-parachain --version - -USER polkadot-parachain - -EXPOSE 30333 9933 9944 9615 -VOLUME ["/data"] - -ENTRYPOINT ["/usr/local/bin/polkadot-parachain"] diff --git a/cumulus/docker/test-parachain-collator.dockerfile b/cumulus/docker/test-parachain-collator.dockerfile deleted file mode 100644 index 9c2d8fbe5818..000000000000 --- a/cumulus/docker/test-parachain-collator.dockerfile +++ /dev/null @@ -1,46 +0,0 @@ -# This file is sourced from https://github.com/paritytech/polkadot/blob/master/scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile -FROM docker.io/paritytech/ci-linux:production as builder - -WORKDIR /cumulus -COPY . /cumulus - -RUN cargo build --release --locked -p polkadot-parachain - -# the collator stage is normally built once, cached, and then ignored, but can -# be specified with the --target build flag. This adds some extra tooling to the -# image, which is required for a launcher script. The script simply adds two -# arguments to the list passed in: -# -# --bootnodes /ip4/127.0.0.1/tcp/30333/p2p/PEER_ID -# -# with the appropriate ip and ID for both Alice and Bob -FROM debian:buster-slim as collator -RUN apt-get update && apt-get install jq curl bash -y && \ - curl -sSo /wait-for-it.sh https://raw.githubusercontent.com/vishnubob/wait-for-it/master/wait-for-it.sh && \ - chmod +x /wait-for-it.sh && \ - curl -sL https://deb.nodesource.com/setup_12.x | bash - && \ - apt-get install -y nodejs && \ - npm install --global yarn && \ - yarn global add @polkadot/api-cli@0.10.0-beta.14 -COPY --from=builder \ - /paritytech/cumulus/target/release/polkadot-parachain /usr/bin -COPY ./docker/scripts/inject_bootnodes.sh /usr/bin -CMD ["/usr/bin/inject_bootnodes.sh"] -COPY ./docker/scripts/healthcheck.sh /usr/bin/ -HEALTHCHECK --interval=300s --timeout=75s --start-period=30s --retries=3 \ - CMD ["/usr/bin/healthcheck.sh"] - -# the runtime stage is normally built once, cached, and ignored, but can be -# specified with the --target build flag. This just preserves one of the builder's -# outputs, which can then be moved into a volume at runtime -FROM debian:buster-slim as runtime -COPY --from=builder \ - /paritytech/cumulus/target/release/wbuild/cumulus-test-parachain-runtime/cumulus_test_parachain_runtime.compact.wasm \ - /var/opt/ -CMD ["cp", "-v", "/var/opt/cumulus_test_parachain_runtime.compact.wasm", "/runtime/"] - -FROM debian:buster-slim -COPY --from=builder \ - /paritytech/cumulus/target/release/polkadot-parachain /usr/bin - -CMD ["/usr/bin/polkadot-parachain"] diff --git a/cumulus/docker/test-parachain_injected.Dockerfile b/cumulus/docker/test-parachain_injected.Dockerfile deleted file mode 100644 index 6056c504604e..000000000000 --- a/cumulus/docker/test-parachain_injected.Dockerfile +++ /dev/null @@ -1,49 +0,0 @@ -FROM docker.io/library/ubuntu:20.04 - -# metadata -ARG VCS_REF -ARG BUILD_DATE -ARG IMAGE_NAME - -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="${IMAGE_NAME}" \ - io.parity.image.description="Test parachain for Zombienet" \ - io.parity.image.source="https://github.com/paritytech/cumulus/blob/${VCS_REF}/docker/test-parachain_injected.Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/cumulus/" - -# show backtraces -ENV RUST_BACKTRACE 1 - -# install tools and dependencies -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - libssl1.1 \ - ca-certificates \ - curl && \ - # apt cleanup - apt-get autoremove -y && \ - apt-get clean && \ - find /var/lib/apt/lists/ -type f -not -name lock -delete; \ - # add user and link ~/.local/share/test-parachain to /data - useradd -m -u 10000 -U -s /bin/sh -d /test-parachain test-parachain && \ - mkdir -p /data /test-parachain/.local/share && \ - chown -R test-parachain:test-parachain /data && \ - ln -s /data /test-parachain/.local/share/test-parachain && \ - mkdir -p /specs - -# add test-parachain binary to the docker image -COPY ./artifacts/test-parachain /usr/local/bin -COPY ./parachains/chain-specs/*.json /specs/ - -USER test-parachain - -# check if executable works in this container -RUN /usr/local/bin/test-parachain --version - -EXPOSE 30333 9933 9944 -VOLUME ["/test-parachain"] - -ENTRYPOINT ["/usr/local/bin/test-parachain"] diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml deleted file mode 100644 index 8344ad43bb4c..000000000000 --- a/docker/docker-compose.yml +++ /dev/null @@ -1,129 +0,0 @@ -version: '3.7' -services: - node_alice: - image: "polkadot:${BRANCH:-cumulus-branch}" - ports: - - "30333:30333" - - "9933:9933" - - "9944:9944" - volumes: - - "polkadot-data-alice:/data" - - type: bind - source: ./test/parachain/chain-specs/polkadot_chainspec.json - target: /chainspec.json - read_only: true - command: > - polkadot - --chain=/chainspec.json - --base-path=/data - --port 30333 - --rpc-port 9933 - --ws-port 9944 - --rpc-external - --rpc-cors all - --ws-external - --alice - networks: - testing_net: - ipv4_address: 172.28.1.1 - aliases: - - alice - - node_bob: - image: "polkadot:${BRANCH:-cumulus-branch}" - ports: - - "30344:30333" - - "9935:9933" - - "9945:9944" - volumes: - - "polkadot-data-bob:/data" - - type: bind - source: ./test/parachain/chain-specs/polkadot_chainspec.json - target: /chainspec.json - read_only: true - command: > - polkadot - --chain=/chainspec.json - --base-path=/data - --port 30333 - --rpc-port 9933 - --ws-port 9944 - --rpc-external - --ws-external - --rpc-cors all - --bob - networks: - testing_net: - ipv4_address: 172.28.1.2 - aliases: - - bob - - genesis_state: - build: - context: . - dockerfile: ./docker/test-parachain-collator.dockerfile - image: "ctpc:latest" - volumes: - - "genesis-state:/data" - command: > - polkadot-parachain - export-genesis-state - /data/genesis-state - - collator: - build: - context: . - dockerfile: ./docker/test-parachain-collator.dockerfile - target: collator - image: "ctpc:collator" - volumes: - - "collator-data:/data" - depends_on: - - node_alice - - node_bob - command: > - inject_bootnodes.sh - --base-path=/data - networks: - testing_net: - - runtime: - build: - context: . - dockerfile: ./docker/test-parachain-collator.dockerfile - target: runtime - image: "ctpc:runtime" - volumes: - - "parachain-runtime:/runtime" - - - registrar: - build: - context: . - dockerfile: ./docker/parachain-registrar.dockerfile - image: para-reg:latest - volumes: - - "genesis-state:/genesis" - - "parachain-runtime:/runtime" - depends_on: - - node_alice - - runtime - - genesis_state - networks: - testing_net: - - -volumes: - polkadot-data-alice: - polkadot-data-bob: - collator-data: - genesis-state: - parachain-runtime: - - -networks: - testing_net: - ipam: - driver: default - config: - - subnet: 172.28.0.0/16 diff --git a/docker/dockerfiles/binary_injected.Dockerfile b/docker/dockerfiles/binary_injected.Dockerfile new file mode 100644 index 000000000000..ac1fd5317c67 --- /dev/null +++ b/docker/dockerfiles/binary_injected.Dockerfile @@ -0,0 +1,48 @@ +FROM docker.io/parity/base-bin + +# This file allows building a Generic container image +# based on one or multiple pre-built Linux binaries. +# Some defaults are set to polkadot but all can be overriden. + +SHELL ["/bin/bash", "-c"] + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG IMAGE_NAME + +# That can be a single one or a comma separated list +ARG BINARY=polkadot + +ARG BIN_FOLDER=. +ARG DOC_URL=https://github.com/paritytech/polkadot-sdk +ARG DESCRIPTION="Polkadot: a platform for web3" +ARG AUTHORS="devops-team@parity.io" +ARG VENDOR="Parity Technologies" + +LABEL io.parity.image.authors=${AUTHORS} \ + io.parity.image.vendor="${VENDOR}" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.title="${IMAGE_NAME}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="${DOC_URL}" \ + io.parity.image.description="${DESCRIPTION}" \ + io.parity.image.source="https://github.com/paritytech/polkadot-sdk/blob/${VCS_REF}/docker/dockerfiles/binary_injected.Dockerfile" + +USER root +WORKDIR /app + +# add polkadot binary to docker image +# sample for polkadot: COPY ./polkadot ./polkadot-*-worker /usr/local/bin/ +COPY entrypoint.sh . +COPY "bin/*" "/usr/local/bin/" +RUN chmod -R a+rx "/usr/local/bin" + +USER parity +ENV BINARY=${BINARY} + +# ENTRYPOINT +ENTRYPOINT ["/app/entrypoint.sh"] + +# We call the help by default +CMD ["--help"] diff --git a/docker/collator_injected.Dockerfile b/docker/dockerfiles/collator_injected.Dockerfile similarity index 95% rename from docker/collator_injected.Dockerfile rename to docker/dockerfiles/collator_injected.Dockerfile index 6472c240f332..0c9ea1e0ca83 100644 --- a/docker/collator_injected.Dockerfile +++ b/docker/dockerfiles/collator_injected.Dockerfile @@ -10,7 +10,7 @@ LABEL io.parity.image.authors="devops-team@parity.io" \ io.parity.image.vendor="Parity Technologies" \ io.parity.image.title="${IMAGE_NAME}" \ io.parity.image.description="Injected adder-collator Docker image" \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/collator_injected.Dockerfile" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/docker/dockerfiles/collator_injected.Dockerfile" \ io.parity.image.revision="${VCS_REF}" \ io.parity.image.created="${BUILD_DATE}" \ io.parity.image.documentation="https://github.com/paritytech/polkadot/" diff --git a/cumulus/docker/docker-compose.yml b/docker/dockerfiles/docker-compose.yml similarity index 89% rename from cumulus/docker/docker-compose.yml rename to docker/dockerfiles/docker-compose.yml index 8344ad43bb4c..8dc8540353fe 100644 --- a/cumulus/docker/docker-compose.yml +++ b/docker/dockerfiles/docker-compose.yml @@ -61,7 +61,7 @@ services: genesis_state: build: context: . - dockerfile: ./docker/test-parachain-collator.dockerfile + dockerfile: ./docker/dockerfiles/test-parachain-collator.dockerfile image: "ctpc:latest" volumes: - "genesis-state:/data" @@ -73,7 +73,7 @@ services: collator: build: context: . - dockerfile: ./docker/test-parachain-collator.dockerfile + dockerfile: ./docker/dockerfiles/test-parachain-collator.dockerfile target: collator image: "ctpc:collator" volumes: @@ -90,7 +90,7 @@ services: runtime: build: context: . - dockerfile: ./docker/test-parachain-collator.dockerfile + dockerfile: ./docker/dockerfiles/test-parachain-collator.dockerfile target: runtime image: "ctpc:runtime" volumes: @@ -100,7 +100,7 @@ services: registrar: build: context: . - dockerfile: ./docker/parachain-registrar.dockerfile + dockerfile: ./docker/dockerfiles/parachain-registrar.dockerfile image: para-reg:latest volumes: - "genesis-state:/genesis" diff --git a/docker/malus_injected.Dockerfile b/docker/dockerfiles/malus_injected.Dockerfile similarity index 100% rename from docker/malus_injected.Dockerfile rename to docker/dockerfiles/malus_injected.Dockerfile diff --git a/docker/parachain-registrar.dockerfile b/docker/dockerfiles/parachain-registrar.dockerfile similarity index 89% rename from docker/parachain-registrar.dockerfile rename to docker/dockerfiles/parachain-registrar.dockerfile index f7d77454a2b9..00908395101f 100644 --- a/docker/parachain-registrar.dockerfile +++ b/docker/dockerfiles/parachain-registrar.dockerfile @@ -9,7 +9,7 @@ CMD [ "--version" ] # To use the pjs build stage to access the blockchain from the host machine: # -# docker build -f docker/parachain-registrar.dockerfile --target pjs -t parachain-registrar:pjs . +# docker build -f docker/dockerfiles/parachain-registrar.dockerfile --target pjs -t parachain-registrar:pjs . # alias pjs='docker run --rm --net cumulus_testing_net parachain-registrar:pjs --ws ws://172.28.1.1:9944' # # Then, as long as the chain is running, you can use the polkadot-js-api CLI like: diff --git a/docker/polkadot-parachain-debug_unsigned_injected.Dockerfile b/docker/dockerfiles/polkadot-parachain/polkadot-parachain-debug_unsigned_injected.Dockerfile similarity index 93% rename from docker/polkadot-parachain-debug_unsigned_injected.Dockerfile rename to docker/dockerfiles/polkadot-parachain/polkadot-parachain-debug_unsigned_injected.Dockerfile index e77563b8ebf2..7a2202d9c52b 100644 --- a/docker/polkadot-parachain-debug_unsigned_injected.Dockerfile +++ b/docker/dockerfiles/polkadot-parachain/polkadot-parachain-debug_unsigned_injected.Dockerfile @@ -9,7 +9,7 @@ LABEL io.parity.image.authors="devops-team@parity.io" \ io.parity.image.vendor="Parity Technologies" \ io.parity.image.title="${IMAGE_NAME}" \ io.parity.image.description="Cumulus, the Polkadot collator." \ - io.parity.image.source="https://github.com/paritytech/cumulus/blob/${VCS_REF}/scripts/docker/polkadot-parachain-debug_unsigned_injected.Dockerfile" \ + io.parity.image.source="https://github.com/paritytech/cumulus/blob/${VCS_REF}/docker/dockerfiles/polkadot-parachain/polkadot-parachain-debug_unsigned_injected.Dockerfile" \ io.parity.image.revision="${VCS_REF}" \ io.parity.image.created="${BUILD_DATE}" \ io.parity.image.documentation="https://github.com/paritytech/cumulus/" diff --git a/docker/polkadot-parachain_builder.Containerfile b/docker/dockerfiles/polkadot-parachain/polkadot-parachain_builder.Containerfile similarity index 89% rename from docker/polkadot-parachain_builder.Containerfile rename to docker/dockerfiles/polkadot-parachain/polkadot-parachain_builder.Containerfile index 159bcb323693..4d110d6af472 100644 --- a/docker/polkadot-parachain_builder.Containerfile +++ b/docker/dockerfiles/polkadot-parachain/polkadot-parachain_builder.Containerfile @@ -1,4 +1,4 @@ -# This file is sourced from https://github.com/paritytech/polkadot/blob/master/scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile +# This file is sourced from https://github.com/paritytech/polkadot/blob/master/docker/dockerfiles/polkadot/polkadot_builder.Dockerfile # This is the build stage for polkadot-parachain. Here we create the binary in a temporary image. FROM docker.io/paritytech/ci-linux:production as builder @@ -14,7 +14,7 @@ LABEL io.parity.image.type="builder" \ io.parity.image.authors="devops-team@parity.io" \ io.parity.image.vendor="Parity Technologies" \ io.parity.image.description="Multistage Docker image for polkadot-parachain" \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot/polkadot-parachain_builder.Dockerfile" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/docker/dockerfiles/polkadot-parachain/polkadot-parachain_builder.Dockerfile" \ io.parity.image.documentation="https://github.com/paritytech/cumulus" COPY --from=builder /cumulus/target/release/polkadot-parachain /usr/local/bin diff --git a/cumulus/docker/injected.Dockerfile b/docker/dockerfiles/polkadot-parachain/polkadot-parachain_injected.Dockerfile similarity index 68% rename from cumulus/docker/injected.Dockerfile rename to docker/dockerfiles/polkadot-parachain/polkadot-parachain_injected.Dockerfile index f9b11f022e75..16bd0f4cf3c5 100644 --- a/cumulus/docker/injected.Dockerfile +++ b/docker/dockerfiles/polkadot-parachain/polkadot-parachain_injected.Dockerfile @@ -9,10 +9,10 @@ LABEL io.parity.image.authors="devops-team@parity.io" \ io.parity.image.vendor="Parity Technologies" \ io.parity.image.title="${IMAGE_NAME}" \ io.parity.image.description="Cumulus, the Polkadot collator." \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/docker/Dockerfile" \ + io.parity.image.source="https://github.com/paritytech/polkadot-sdk/blob/${VCS_REF}/docker/dockerfiles/polkadot-parachain/polkadot-parachain_injected.Dockerfile" \ io.parity.image.revision="${VCS_REF}" \ io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/cumulus/" + io.parity.image.documentation="https://github.com/paritytech/polkadot-sdk/" # show backtraces ENV RUST_BACKTRACE 1 @@ -22,8 +22,10 @@ USER root RUN mkdir -p /specs # add polkadot-parachain binary to the docker image -COPY ./release-artifacts/* /usr/local/bin -COPY ./parachains/chain-specs/*.json /specs/ +COPY bin/* /usr/local/bin/ +COPY specs/* /specs/ + +RUN chmod -R a+rx "/usr/local/bin" USER parity diff --git a/docker/dockerfiles/polkadot/README.md b/docker/dockerfiles/polkadot/README.md new file mode 100644 index 000000000000..e331d8984c2c --- /dev/null +++ b/docker/dockerfiles/polkadot/README.md @@ -0,0 +1,9 @@ +# Self built Docker image + +The Polkadot repo contains several options to build Docker images for Polkadot. + +This folder contains a self-contained image that does not require a Linux pre-built binary. + +Instead, building the image is possible on any host having docker installed and will +build Polkadot inside Docker. That also means that no Rust toolchain is required on the host +machine for the build to succeed. diff --git a/docker/dockerfiles/polkadot/docker-compose-local.yml b/docker/dockerfiles/polkadot/docker-compose-local.yml new file mode 100644 index 000000000000..1ff3a1ccaac2 --- /dev/null +++ b/docker/dockerfiles/polkadot/docker-compose-local.yml @@ -0,0 +1,50 @@ +version: '3' +services: + node_alice: + ports: + - "30333:30333" + - "9933:9933" + - "9944:9944" + - "9615:9615" + image: parity/polkadot:latest + volumes: + - "polkadot-data-alice:/data" + command: | + --chain=polkadot-local + --alice + -d /data + --node-key 0000000000000000000000000000000000000000000000000000000000000001 + networks: + testing_net: + ipv4_address: 172.28.1.1 + + node_bob: + ports: + - "30344:30333" + - "9935:9933" + - "9945:9944" + - "29615:9615" + image: parity/polkadot:latest + volumes: + - "polkadot-data-bob:/data" + links: + - "node_alice:alice" + command: | + --chain=polkadot-local + --bob + -d /data + --bootnodes '/ip4/172.28.1.1/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR' + networks: + testing_net: + ipv4_address: 172.28.1.2 + +volumes: + polkadot-data-alice: + polkadot-data-bob: + +networks: + testing_net: + ipam: + driver: default + config: + - subnet: 172.28.0.0/16 diff --git a/docker/dockerfiles/polkadot/docker-compose.yml b/docker/dockerfiles/polkadot/docker-compose.yml new file mode 100644 index 000000000000..524b1164796a --- /dev/null +++ b/docker/dockerfiles/polkadot/docker-compose.yml @@ -0,0 +1,22 @@ +version: '3' +services: + polkadot: + image: parity/polkadot:latest + + ports: + - "127.0.0.1:30333:30333/tcp" + - "127.0.0.1:9933:9933/tcp" + - "127.0.0.1:9944:9944/tcp" + - "127.0.0.1:9615:9615/tcp" + + volumes: + - "polkadot-data:/data" + + command: | + --unsafe-rpc-external + --unsafe-ws-external + --rpc-cors all + --prometheus-external + +volumes: + polkadot-data: diff --git a/docker/dockerfiles/polkadot/polkadot_Dockerfile.README.md b/docker/dockerfiles/polkadot/polkadot_Dockerfile.README.md new file mode 100644 index 000000000000..7e89cb55f3de --- /dev/null +++ b/docker/dockerfiles/polkadot/polkadot_Dockerfile.README.md @@ -0,0 +1,7 @@ +# Polkadot official Docker image + +## [Polkadot](https://polkadot.network/) + +## [GitHub](https://github.com/paritytech/polkadot) + +## [Polkadot Wiki](https://wiki.polkadot.network/) diff --git a/docker/dockerfiles/polkadot/polkadot_builder.Dockerfile b/docker/dockerfiles/polkadot/polkadot_builder.Dockerfile new file mode 100644 index 000000000000..f8dc374a14aa --- /dev/null +++ b/docker/dockerfiles/polkadot/polkadot_builder.Dockerfile @@ -0,0 +1,36 @@ +# This is the build stage for Polkadot. Here we create the binary in a temporary image. +FROM docker.io/paritytech/ci-linux:production as builder + +WORKDIR /polkadot +COPY . /polkadot + +RUN cargo build --locked --release + +# This is the 2nd stage: a very small image where we copy the Polkadot binary." +FROM docker.io/parity/base-bin:latest + +LABEL description="Multistage Docker image for Polkadot: a platform for web3" \ + io.parity.image.type="builder" \ + io.parity.image.authors="chevdor@gmail.com, devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.description="Polkadot: a platform for web3" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/docker/dockerfiles/polkadot/polkadot_builder.Dockerfile" \ + io.parity.image.documentation="https://github.com/paritytech/polkadot/" + +COPY --from=builder /polkadot/target/release/polkadot /usr/local/bin + +RUN useradd -m -u 1000 -U -s /bin/sh -d /polkadot polkadot && \ + mkdir -p /data /polkadot/.local/share && \ + chown -R polkadot:polkadot /data && \ + ln -s /data /polkadot/.local/share/polkadot && \ +# unclutter and minimize the attack surface + rm -rf /usr/bin /usr/sbin && \ +# check if executable works in this container + /usr/local/bin/polkadot --version + +USER polkadot + +EXPOSE 30333 9933 9944 9615 +VOLUME ["/data"] + +ENTRYPOINT ["/usr/local/bin/polkadot"] diff --git a/docker/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile b/docker/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile new file mode 100644 index 000000000000..e2c72dcfe2e9 --- /dev/null +++ b/docker/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile @@ -0,0 +1,53 @@ +FROM docker.io/library/ubuntu:20.04 + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG POLKADOT_VERSION +ARG POLKADOT_GPGKEY=9D4B2B6EB8F97156D19669A9FF0812D491B96798 +ARG GPG_KEYSERVER="keyserver.ubuntu.com" + +LABEL io.parity.image.authors="devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.title="parity/polkadot" \ + io.parity.image.description="Polkadot: a platform for web3. This is the official Parity image with an injected binary." \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="https://github.com/paritytech/polkadot/" + +# show backtraces +ENV RUST_BACKTRACE 1 + +# install tools and dependencies +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + libssl1.1 \ + ca-certificates \ + gnupg && \ + useradd -m -u 1000 -U -s /bin/sh -d /polkadot polkadot && \ +# add repo's gpg keys and install the published polkadot binary + gpg --keyserver ${GPG_KEYSERVER} --recv-keys ${POLKADOT_GPGKEY} && \ + gpg --export ${POLKADOT_GPGKEY} > /usr/share/keyrings/parity.gpg && \ + echo 'deb [signed-by=/usr/share/keyrings/parity.gpg] https://releases.parity.io/deb release main' > /etc/apt/sources.list.d/parity.list && \ + apt-get update && \ + apt-get install -y --no-install-recommends polkadot=${POLKADOT_VERSION#?} && \ +# apt cleanup + apt-get autoremove -y && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* ; \ + mkdir -p /data /polkadot/.local/share && \ + chown -R polkadot:polkadot /data && \ + ln -s /data /polkadot/.local/share/polkadot + +USER polkadot + +# check if executable works in this container +RUN /usr/bin/polkadot --version +RUN /usr/bin/polkadot-execute-worker --version +RUN /usr/bin/polkadot-prepare-worker --version + +EXPOSE 30333 9933 9944 +VOLUME ["/polkadot"] + +ENTRYPOINT ["/usr/bin/polkadot"] diff --git a/docker/polkadot_injected_debug.Dockerfile b/docker/dockerfiles/polkadot/polkadot_injected_debug.Dockerfile similarity index 94% rename from docker/polkadot_injected_debug.Dockerfile rename to docker/dockerfiles/polkadot/polkadot_injected_debug.Dockerfile index f7f764d335a2..80ce82589873 100644 --- a/docker/polkadot_injected_debug.Dockerfile +++ b/docker/dockerfiles/polkadot/polkadot_injected_debug.Dockerfile @@ -9,7 +9,7 @@ LABEL io.parity.image.authors="devops-team@parity.io" \ io.parity.image.vendor="Parity Technologies" \ io.parity.image.title="${IMAGE_NAME}" \ io.parity.image.description="Polkadot: a platform for web3" \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/docker/dockerfiles/polkadot/polkadot_injected_debug.Dockerfile" \ io.parity.image.revision="${VCS_REF}" \ io.parity.image.created="${BUILD_DATE}" \ io.parity.image.documentation="https://github.com/paritytech/polkadot/" diff --git a/docker/polkadot_injected_release.Dockerfile b/docker/dockerfiles/polkadot/polkadot_injected_release.Dockerfile similarity index 95% rename from docker/polkadot_injected_release.Dockerfile rename to docker/dockerfiles/polkadot/polkadot_injected_release.Dockerfile index 87ae7ac27dc0..c13f2db982a1 100644 --- a/docker/polkadot_injected_release.Dockerfile +++ b/docker/dockerfiles/polkadot/polkadot_injected_release.Dockerfile @@ -11,7 +11,7 @@ LABEL io.parity.image.authors="devops-team@parity.io" \ io.parity.image.vendor="Parity Technologies" \ io.parity.image.title="parity/polkadot" \ io.parity.image.description="Polkadot: a platform for web3. This is the official Parity image with an injected binary." \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot_injected_release.Dockerfile" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/docker/dockerfiles/polkadot/polkadot_injected_release.Dockerfile" \ io.parity.image.revision="${VCS_REF}" \ io.parity.image.created="${BUILD_DATE}" \ io.parity.image.documentation="https://github.com/paritytech/polkadot/" diff --git a/docker/staking-miner/staking-miner_builder.Dockerfile b/docker/dockerfiles/staking-miner/staking-miner_builder.Dockerfile similarity index 100% rename from docker/staking-miner/staking-miner_builder.Dockerfile rename to docker/dockerfiles/staking-miner/staking-miner_builder.Dockerfile diff --git a/docker/staking-miner/staking-miner_injected.Dockerfile b/docker/dockerfiles/staking-miner/staking-miner_injected.Dockerfile similarity index 100% rename from docker/staking-miner/staking-miner_injected.Dockerfile rename to docker/dockerfiles/staking-miner/staking-miner_injected.Dockerfile diff --git a/docker/substrate_injected.Dockerfile b/docker/dockerfiles/substrate_injected.Dockerfile similarity index 100% rename from docker/substrate_injected.Dockerfile rename to docker/dockerfiles/substrate_injected.Dockerfile diff --git a/docker/test-parachain-collator.dockerfile b/docker/dockerfiles/test-parachain-collator.dockerfile similarity index 96% rename from docker/test-parachain-collator.dockerfile rename to docker/dockerfiles/test-parachain-collator.dockerfile index 9c2d8fbe5818..0d56949152e2 100644 --- a/docker/test-parachain-collator.dockerfile +++ b/docker/dockerfiles/test-parachain-collator.dockerfile @@ -1,4 +1,4 @@ -# This file is sourced from https://github.com/paritytech/polkadot/blob/master/scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile +# This file is sourced from https://github.com/paritytech/polkadot/blob/master/docker/dockerfiles/polkadot/polkadot_builder.Dockerfile FROM docker.io/paritytech/ci-linux:production as builder WORKDIR /cumulus diff --git a/docker/test-parachain_injected.Dockerfile b/docker/dockerfiles/test-parachain_injected.Dockerfile similarity index 95% rename from docker/test-parachain_injected.Dockerfile rename to docker/dockerfiles/test-parachain_injected.Dockerfile index 0b345e16e4af..e5d0df7aad67 100644 --- a/docker/test-parachain_injected.Dockerfile +++ b/docker/dockerfiles/test-parachain_injected.Dockerfile @@ -9,7 +9,7 @@ LABEL io.parity.image.authors="devops-team@parity.io" \ io.parity.image.vendor="Parity Technologies" \ io.parity.image.title="${IMAGE_NAME}" \ io.parity.image.description="Test parachain for Zombienet" \ - io.parity.image.source="https://github.com/paritytech/cumulus/blob/${VCS_REF}/docker/test-parachain_injected.Dockerfile" \ + io.parity.image.source="https://github.com/paritytech/cumulus/blob/${VCS_REF}/docker/dockerfiles/test-parachain_injected.Dockerfile" \ io.parity.image.revision="${VCS_REF}" \ io.parity.image.created="${BUILD_DATE}" \ io.parity.image.documentation="https://github.com/paritytech/cumulus/" diff --git a/docker/injected.Dockerfile b/docker/injected.Dockerfile deleted file mode 100644 index 93d0561ca877..000000000000 --- a/docker/injected.Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -FROM docker.io/library/ubuntu:20.04 - -# metadata -ARG VCS_REF -ARG BUILD_DATE -ARG IMAGE_NAME - -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="${IMAGE_NAME}" \ - io.parity.image.description="Cumulus, the Polkadot collator." \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/docker/Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/cumulus/" - -# show backtraces -ENV RUST_BACKTRACE 1 - -# install tools and dependencies -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - libssl1.1 \ - ca-certificates \ - curl && \ -# apt cleanup - apt-get autoremove -y && \ - apt-get clean && \ - find /var/lib/apt/lists/ -type f -not -name lock -delete; \ -# add user and link ~/.local/share/polkadot to /data - useradd -m -u 1000 -U -s /bin/sh -d /polkadot polkadot && \ - mkdir -p /data /polkadot/.local/share && \ - chown -R polkadot:polkadot /data && \ - ln -s /data /polkadot/.local/share/polkadot && \ - mkdir -p /specs - -# add polkadot-parachain binary to the docker image -COPY ./target/release/polkadot-parachain /usr/local/bin -COPY ./target/release/polkadot-parachain.asc /usr/local/bin -COPY ./target/release/polkadot-parachain.sha256 /usr/local/bin -COPY ./parachains/chain-specs/*.json /specs/ - -USER polkadot - -# check if executable works in this container -RUN /usr/local/bin/polkadot-parachain --version - -EXPOSE 30333 9933 9944 -VOLUME ["/polkadot"] - -ENTRYPOINT ["/usr/local/bin/polkadot-parachain"] diff --git a/docker/scripts/adder-collator/build-injected.sh b/docker/scripts/adder-collator/build-injected.sh new file mode 100755 index 000000000000..3a2d49741374 --- /dev/null +++ b/docker/scripts/adder-collator/build-injected.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_binary +# This script replace the former dedicated Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=adder-collator,undying-collator +export ARTIFACTS_FOLDER=$1 + +$PROJECT_ROOT/docker/scripts/build-injected.sh diff --git a/docker/scripts/adder-collator/test-build.sh b/docker/scripts/adder-collator/test-build.sh new file mode 100755 index 000000000000..171e0309f807 --- /dev/null +++ b/docker/scripts/adder-collator/test-build.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +TMP=$(mktemp -d) +ENGINE=${ENGINE:-podman} + +# TODO: Switch to /bin/bash when the image is built from parity/base-bin + +# Fetch some binaries +$ENGINE run --user root --rm -i \ + --pull always \ + -v "$TMP:/export" \ + --entrypoint /usr/bin/bash \ + paritypr/colander:master -c \ + 'cp "$(which adder-collator)" /export' + +$ENGINE run --user root --rm -i \ + --pull always \ + -v "$TMP:/export" \ + --entrypoint /usr/bin/bash \ + paritypr/colander:master -c \ + 'cp "$(which undying-collator)" /export' + +./build-injected.sh $TMP diff --git a/docker/scripts/build-injected.sh b/docker/scripts/build-injected.sh new file mode 100755 index 000000000000..f415cf43c0ee --- /dev/null +++ b/docker/scripts/build-injected.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash +#set -e + +# This script allows building a Container Image from a Linux +# binary that is injected into a base-image. + +ENGINE=${ENGINE:-podman} + +if [ "$ENGINE" == "podman" ]; then + PODMAN_FLAGS="--format docker" +else + PODMAN_FLAGS="" +fi + +CONTEXT=$(mktemp -d) +REGISTRY=${REGISTRY:-docker.io} + +# The following line ensure we know the project root +PROJECT_ROOT=${PROJECT_ROOT:-$(git rev-parse --show-toplevel)} +DOCKERFILE=${DOCKERFILE:-docker/dockerfiles/binary_injected.Dockerfile} +VERSION_TOML=$(grep "^version " $PROJECT_ROOT/Cargo.toml | grep -oE "([0-9\.]+-?[0-9]+)") + +#n The following VAR have default that can be overriden +DOCKER_OWNER=${DOCKER_OWNER:-parity} + +# We may get 1..n binaries, comma separated +BINARY=${BINARY:-polkadot} +IFS=',' read -r -a BINARIES <<< "$BINARY" + +VERSION=${VERSION:-$VERSION_TOML} +ARTIFACTS_FOLDER=${ARTIFACTS_FOLDER:-.} + +IMAGE=${IMAGE:-${REGISTRY}/${DOCKER_OWNER}/${BINARIES[0]}} +DESCRIPTION_DEFAULT="Injected Container image built for ${BINARY}" +DESCRIPTION=${DESCRIPTION:-$DESCRIPTION_DEFAULT} + +VCS_REF=${VCS_REF:-01234567} + +# Build the image +echo "Using engine: $ENGINE" +echo "Using Dockerfile: $DOCKERFILE" +echo "Using context: $CONTEXT" +echo "Building ${IMAGE}:latest container image for ${BINARY} v${VERSION} from ${ARTIFACTS_FOLDER} hang on!" +echo "ARTIFACTS_FOLDER=$ARTIFACTS_FOLDER" +echo "CONTEXT=$CONTEXT" + +# We need all binaries and resources available in the Container build "CONTEXT" +mkdir -p $CONTEXT/bin +for bin in "${BINARIES[@]}" +do + echo "Copying $ARTIFACTS_FOLDER/$bin to context: $CONTEXT/bin" + ls -al "$ARTIFACTS_FOLDER/$bin" + cp -r "$ARTIFACTS_FOLDER/$bin" "$CONTEXT/bin" +done + +cp "$PROJECT_ROOT/docker/scripts/entrypoint.sh" "$CONTEXT" + +if [[ "$BINARY" == "polkadot-parachain" ]]; then + mkdir -p "$CONTEXT/specs" + echo "Copying parachains chain-specs from $ARTIFACTS_FOLDER/specs to context: $CONTEXT/specs" + ls -al "$ARTIFACTS_FOLDER/specs" + cp -r "$ARTIFACTS_FOLDER/specs" "$CONTEXT/specs" +fi + +echo "Building image: ${IMAGE}" + +TAGS=${TAGS[@]:-latest} +IFS=',' read -r -a TAG_ARRAY <<< "$TAGS" +TAG_ARGS=" " + +echo "The image ${IMAGE} will be tagged with ${TAG_ARRAY[*]}" +for tag in "${TAG_ARRAY[@]}"; do + TAG_ARGS+="--tag ${IMAGE}:${tag} " +done + +echo "$TAG_ARGS" + +# time \ +$ENGINE build \ + ${PODMAN_FLAGS} \ + --build-arg VCS_REF="${VCS_REF}" \ + --build-arg BUILD_DATE=$(date -u '+%Y-%m-%dT%H:%M:%SZ') \ + --build-arg IMAGE_NAME="${IMAGE}" \ + --build-arg BINARY="${BINARY}" \ + --build-arg ARTIFACTS_FOLDER="${ARTIFACTS_FOLDER}" \ + --build-arg DESCRIPTION="${DESCRIPTION}" \ + ${TAG_ARGS} \ + -f "${PROJECT_ROOT}/${DOCKERFILE}" \ + ${CONTEXT} + +echo "Your Container image for ${IMAGE} is ready" +$ENGINE images + +if [[ -z "${SKIP_IMAGE_VALIDATION}" ]]; then + echo "Check the image ${IMAGE}:${TAG_ARRAY[0]}" + $ENGINE run --rm -i "${IMAGE}:${TAG_ARRAY[0]}" --version + + echo "Query binaries" + $ENGINE run --rm -i --entrypoint /bin/bash "${IMAGE}:${TAG_ARRAY[0]}" -c "echo BINARY: ${BINARY}" +fi diff --git a/docker/scripts/entrypoint.sh b/docker/scripts/entrypoint.sh new file mode 100755 index 000000000000..eaa815faf6a4 --- /dev/null +++ b/docker/scripts/entrypoint.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +# Sanity check +if [ -z "$BINARY" ] +then + echo "BINARY ENV not defined, this should never be the case. Aborting..." + exit 1 +fi + +# If the user built the image with multiple binaries, +# we consider the first one to be the canonical one +# To start with another binary, the user can either: +# - use the --entrypoint option +# - pass the ENV BINARY with a single binary +IFS=',' read -r -a BINARIES <<< "$BINARY" +BIN0=${BINARIES[0]} +echo "Starting binary $BIN0" +$BIN0 $@ diff --git a/docker/scripts/malus/build-injected.sh b/docker/scripts/malus/build-injected.sh new file mode 100755 index 000000000000..83e30e178500 --- /dev/null +++ b/docker/scripts/malus/build-injected.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_binary +# This script replace the former dedicated Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=malus,polkadot-execute-worker,polkadot-prepare-worker +export ARTIFACTS_FOLDER=$1 +# export TAGS=... + +$PROJECT_ROOT/docker/scripts/build-injected.sh diff --git a/docker/scripts/malus/test-build.sh b/docker/scripts/malus/test-build.sh new file mode 100755 index 000000000000..3114e9e2adf1 --- /dev/null +++ b/docker/scripts/malus/test-build.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +TMP=$(mktemp -d) +ENGINE=${ENGINE:-podman} + +export TAGS=latest,beta,7777,1.0.2-rc23 + +# Fetch some binaries +$ENGINE run --user root --rm -i \ + --pull always \ + -v "$TMP:/export" \ + --entrypoint /bin/bash \ + paritypr/malus:7217 -c \ + 'cp "$(which malus)" /export' + +echo "Checking binaries we got:" +ls -al $TMP + +./build-injected.sh $TMP diff --git a/docker/scripts/polkadot-parachain/build-injected.sh b/docker/scripts/polkadot-parachain/build-injected.sh new file mode 100755 index 000000000000..f5c86a035177 --- /dev/null +++ b/docker/scripts/polkadot-parachain/build-injected.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_binary +# This script replace the former dedicated Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=polkadot-parachain +export ARTIFACTS_FOLDER=$1 +export DOCKERFILE="docker/dockerfiles/polkadot-parachain/polkadot-parachain_injected.Dockerfile" +# export TAGS=... + +$PROJECT_ROOT/docker/scripts/build-injected.sh diff --git a/docker/scripts/polkadot-parachain/test-build.sh b/docker/scripts/polkadot-parachain/test-build.sh new file mode 100755 index 000000000000..1dc53bd0d0b7 --- /dev/null +++ b/docker/scripts/polkadot-parachain/test-build.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +TMP=$(mktemp -d) +ENGINE=${ENGINE:-podman} + +export TAGS=latest,beta,7777,1.0.2-rc23 + +# Fetch some binaries +$ENGINE run --user root --rm -i \ + --pull always \ + -v "$TMP:/export" \ + --entrypoint /bin/bash \ + parity/polkadot-parachain:7217 -c \ + 'cp "$(which malus)" /export' + +echo "Checking binaries we got:" +ls -al $TMP + +./build-injected.sh $TMP diff --git a/cumulus/docker/scripts/build-injected-image.sh b/docker/scripts/polkadot-parachain_build-injected-image.sh similarity index 70% rename from cumulus/docker/scripts/build-injected-image.sh rename to docker/scripts/polkadot-parachain_build-injected-image.sh index b8bb0dd7dd2c..bb6909dd3b7b 100755 --- a/cumulus/docker/scripts/build-injected-image.sh +++ b/docker/scripts/polkadot-parachain_build-injected-image.sh @@ -6,5 +6,5 @@ IMAGE_NAME=${IMAGE_NAME:-polkadot-parachain} docker build --no-cache \ --build-arg IMAGE_NAME=$IMAGE_NAME \ -t $OWNER/$IMAGE_NAME \ - -f ./docker/injected.Dockerfile \ + -f ./docker/dockerfiles/polkadot-parachain/polkadot-parachain_injected.Dockerfile \ . && docker images diff --git a/docker/scripts/polkadot/build-injected.sh b/docker/scripts/polkadot/build-injected.sh new file mode 100755 index 000000000000..7cc6db43a54a --- /dev/null +++ b/docker/scripts/polkadot/build-injected.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_binary +# This script replace the former dedicated Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=polkadot,polkadot-execute-worker,polkadot-prepare-worker +export ARTIFACTS_FOLDER=$1 + +$PROJECT_ROOT/docker/scripts/build-injected.sh diff --git a/docker/scripts/polkadot/test-build.sh b/docker/scripts/polkadot/test-build.sh new file mode 100755 index 000000000000..d2d904561cb5 --- /dev/null +++ b/docker/scripts/polkadot/test-build.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +TMP=$(mktemp -d) +ENGINE=${ENGINE:-podman} + +# You need to build an injected image first + +# Fetch some binaries +$ENGINE run --user root --rm -i \ + -v "$TMP:/export" \ + --entrypoint /bin/bash \ + parity/polkadot -c \ + 'cp "$(which polkadot)" /export' + +echo "Checking binaries we got:" +tree $TMP + +./build-injected.sh $TMP diff --git a/docker/scripts/staking-miner/README.md b/docker/scripts/staking-miner/README.md new file mode 100644 index 000000000000..3610e1130316 --- /dev/null +++ b/docker/scripts/staking-miner/README.md @@ -0,0 +1,37 @@ +# staking-miner container image + +## Build using the Builder + +``` +./build.sh +``` + +## Build the injected Image + +You first need a valid Linux binary to inject. Let's assume this binary is located in `BIN_FOLDER`. + +``` +./build-injected.sh "$BIN_FOLDER" +``` + +## Test + +Here is how to test the image. We can generate a valid seed but the staking-miner will quickly notice that our +account is not funded and "does not exist". + +You may pass any ENV supported by the binary and must provide at least a few such as `SEED` and `URI`: +``` +ENV SEED="" +ENV URI="wss://rpc.polkadot.io:443" +ENV RUST_LOG="info" +``` + +``` +export SEED=$(subkey generate -n polkadot --output-type json | jq -r .secretSeed) +podman run --rm -it \ + -e URI="wss://rpc.polkadot.io:443" \ + -e RUST_LOG="info" \ + -e SEED \ + localhost/parity/staking-miner \ + dry-run seq-phragmen +``` diff --git a/docker/scripts/staking-miner/build-injected.sh b/docker/scripts/staking-miner/build-injected.sh new file mode 100755 index 000000000000..efe323b5fed8 --- /dev/null +++ b/docker/scripts/staking-miner/build-injected.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_staking-miner_binary +# This script replace the former dedicated staking-miner "injected" Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=staking-miner +export ARTIFACTS_FOLDER=$1 + +$PROJECT_ROOT/docker/scripts/build-injected.sh diff --git a/docker/scripts/staking-miner/build.sh b/docker/scripts/staking-miner/build.sh new file mode 100755 index 000000000000..c2b6ab77e531 --- /dev/null +++ b/docker/scripts/staking-miner/build.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_staking-miner_binary +# This script replace the former dedicated staking-miner "injected" Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` +ENGINE=podman + +echo "Building the staking-miner using the Builder image" +echo "PROJECT_ROOT=$PROJECT_ROOT" +$ENGINE build -t staking-miner -f "${PROJECT_ROOT}/docker/dockerfiles/staking-miner/staking-miner_builder.Dockerfile" "$PROJECT_ROOT" diff --git a/docker/scripts/staking-miner/staking-miner_Dockerfile.README.md b/docker/scripts/staking-miner/staking-miner_Dockerfile.README.md new file mode 100644 index 000000000000..ce424c42f479 --- /dev/null +++ b/docker/scripts/staking-miner/staking-miner_Dockerfile.README.md @@ -0,0 +1,3 @@ +# Staking-miner Docker image + +## [GitHub](https://github.com/paritytech/polkadot/tree/master/utils/staking-miner) diff --git a/docker/scripts/staking-miner/staking-miner_builder.Dockerfile b/docker/scripts/staking-miner/staking-miner_builder.Dockerfile new file mode 100644 index 000000000000..0ae77f36c79d --- /dev/null +++ b/docker/scripts/staking-miner/staking-miner_builder.Dockerfile @@ -0,0 +1,43 @@ +FROM paritytech/ci-linux:production as builder + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG IMAGE_NAME="staking-miner" +ARG PROFILE=production + +LABEL description="This is the build stage. Here we create the binary." + +WORKDIR /app +COPY . /app +RUN cargo build --locked --profile $PROFILE --package staking-miner + +# ===== SECOND STAGE ====== + +FROM docker.io/parity/base-bin:latest +LABEL description="This is the 2nd stage: a very small image where we copy the binary." +LABEL io.parity.image.authors="devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.title="${IMAGE_NAME}" \ + io.parity.image.description="${IMAGE_NAME} for substrate based chains" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/${IMAGE_NAME}/${IMAGE_NAME}_builder.Dockerfile" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="https://github.com/paritytech/polkadot/" + +ARG PROFILE=release +COPY --from=builder /app/target/$PROFILE/staking-miner /usr/local/bin + +# show backtraces +ENV RUST_BACKTRACE 1 + +USER parity + +ENV SEED="" +ENV URI="wss://rpc.polkadot.io" +ENV RUST_LOG="info" + +# check if the binary works in this container +RUN /usr/local/bin/staking-miner --version + +ENTRYPOINT [ "/usr/local/bin/staking-miner" ] diff --git a/docker/scripts/staking-miner/test-build.sh b/docker/scripts/staking-miner/test-build.sh new file mode 100755 index 000000000000..0ce74e2df296 --- /dev/null +++ b/docker/scripts/staking-miner/test-build.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +TMP=$(mktemp -d) +ENGINE=${ENGINE:-podman} + +# You need to build an injected image first + +# Fetch some binaries +$ENGINE run --user root --rm -i \ + -v "$TMP:/export" \ + --entrypoint /bin/bash \ + parity/staking-miner -c \ + 'cp "$(which staking-miner)" /export' + +echo "Checking binaries we got:" +tree $TMP + +./build-injected.sh $TMP diff --git a/cumulus/docs/container.md b/docs/container.md similarity index 96% rename from cumulus/docs/container.md rename to docs/container.md index ef7c52a44fab..afd3b27957c2 100644 --- a/cumulus/docs/container.md +++ b/docs/container.md @@ -52,7 +52,7 @@ anyone to get a working container image without requiring any of the Rust toolch ```bash docker build \ --tag $OWNER/$IMAGE_NAME \ - --file ./docker/polkadot-parachain_builder.Containerfile . + --file ./docker/dockerfiles/polkadot-parachain/polkadot-parachain_builder.Containerfile . ``` You may then run your new container: diff --git a/polkadot/doc/docker.md b/docs/docker.md similarity index 98% rename from polkadot/doc/docker.md rename to docs/docker.md index dc679908ec61..53619ca1a971 100644 --- a/polkadot/doc/docker.md +++ b/docs/docker.md @@ -144,7 +144,7 @@ There are 3 options to build a Polkadot container image: To get up and running with the smallest footprint on your system, you may use an existing Polkadot Container image. You may also build a Polkadot container image yourself (it takes a while...) using the container specs -`scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile`. +`docker/dockerfiles/polkadot/polkadot_builder.Dockerfile`. ### Debian injected diff --git a/polkadot/.github/workflows/release-40_publish-rc-image.yml b/polkadot/.github/workflows/release-40_publish-rc-image.yml deleted file mode 100644 index 3d91c5b8c682..000000000000 --- a/polkadot/.github/workflows/release-40_publish-rc-image.yml +++ /dev/null @@ -1,132 +0,0 @@ -name: Release - Publish RC Container image -# see https://github.com/paritytech/release-engineering/issues/97#issuecomment-1651372277 - -on: - workflow_dispatch: - inputs: - release_id: - description: | - Release ID. - You can find it using the command: - curl -s \ - -H "Authorization: Bearer ${GITHUB_TOKEN}" https://api.github.com/repos/$OWNER/$REPO/releases | \ - jq '.[] | { name: .name, id: .id }' - required: true - type: string - registry: - description: "Container registry" - required: true - type: string - default: docker.io - owner: - description: Owner of the container image repo - required: true - type: string - default: parity - -env: - RELEASE_ID: ${{ inputs.release_id }} - ENGINE: docker - REGISTRY: ${{ inputs.registry }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - DOCKER_OWNER: ${{ inputs.owner || github.repository_owner }} - REPO: ${{ github.repository }} - -jobs: - fetch-artifacts: - runs-on: ubuntu-latest - - steps: - - name: Checkout sources - uses: actions/checkout@v3 - - - name: Fetch all artifacts - run: | - . ./scripts/ci/common/lib.sh - fetch_release_artifacts - - - name: Cache the artifacts - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - key: artifacts-${{ github.sha }} - path: | - ./release-artifacts/**/* - - build-container: - runs-on: ubuntu-latest - needs: fetch-artifacts - - strategy: - matrix: - binary: ["polkadot", "staking-miner"] - - steps: - - name: Checkout sources - uses: actions/checkout@v3 - - - name: Get artifacts from cache - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - key: artifacts-${{ github.sha }} - fail-on-cache-miss: true - path: | - ./release-artifacts/**/* - - - name: Check sha256 ${{ matrix.binary }} - working-directory: ./release-artifacts - run: | - . ../scripts/ci/common/lib.sh - - echo "Checking binary ${{ matrix.binary }}" - check_sha256 ${{ matrix.binary }} && echo "OK" || echo "ERR" - - - name: Check GPG ${{ matrix.binary }} - working-directory: ./release-artifacts - run: | - . ../scripts/ci/common/lib.sh - import_gpg_keys - check_gpg ${{ matrix.binary }} - - - name: Fetch commit and tag - id: fetch_refs - run: | - release=release-${{ inputs.release_id }} && \ - echo "release=${release}" >> $GITHUB_OUTPUT - - commit=$(git rev-parse --short HEAD) && \ - echo "commit=${commit}" >> $GITHUB_OUTPUT - - tag=$(git name-rev --tags --name-only $(git rev-parse HEAD)) && \ - [ "${tag}" != "undefined" ] && echo "tag=${tag}" >> $GITHUB_OUTPUT || \ - echo "No tag, doing without" - - - name: Build Injected Container image for ${{ matrix.binary }} - env: - BIN_FOLDER: ./release-artifacts - BINARY: ${{ matrix.binary }} - TAGS: ${{join(steps.fetch_refs.outputs.*, ',')}} - run: | - echo "Building container for ${{ matrix.binary }}" - ./scripts/ci/dockerfiles/build-injected.sh - - - name: Login to Dockerhub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Push Container image for ${{ matrix.binary }} - id: docker_push - env: - BINARY: ${{ matrix.binary }} - run: | - $ENGINE images | grep ${BINARY} - $ENGINE push --all-tags ${REGISTRY}/${DOCKER_OWNER}/${BINARY} - - - name: Check version for the published image for ${{ matrix.binary }} - env: - BINARY: ${{ matrix.binary }} - RELEASE_TAG: ${{ steps.fetch_refs.outputs.release }} - run: | - echo "Checking tag ${RELEASE_TAG} for image ${REGISTRY}/${DOCKER_OWNER}/${BINARY}" - $ENGINE run -i ${REGISTRY}/${DOCKER_OWNER}/${BINARY}:${RELEASE_TAG} --version diff --git a/polkadot/.github/workflows/release-51_publish-docker-manual.yml b/polkadot/.github/workflows/release-51_publish-docker-manual.yml deleted file mode 100644 index 919769f8700d..000000000000 --- a/polkadot/.github/workflows/release-51_publish-docker-manual.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Release - Publish Docker image (manual dispatch) - -on: - workflow_dispatch: - inputs: - version: - description: version to build/release - default: v0.9.18 - required: true - date: - description: release date of version - default: "2022-02-23T19:11:58Z" - required: true - -jobs: - main: - runs-on: ubuntu-latest - steps: - - name: Checkout sources - uses: actions/checkout@v3 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@95cb08cb2672c73d4ffd2f422e6d11953d2a9c70 # v2.1.0 - - name: Cache Docker layers - uses: actions/cache@v3 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - name: Login to Dockerhub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and push - id: docker_build - uses: docker/build-push-action@v4 - with: - push: true - file: scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile - tags: | - parity/polkadot:latest - parity/polkadot:${{ github.event.inputs.version }} - build-args: | - POLKADOT_VERSION=${{ github.event.inputs.version }} - VCS_REF=${{ github.ref }} - BUILD_DATE=${{ github.event.inputs.date }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - name: Image digest - run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/polkadot/utils/staking-miner/README.md b/polkadot/utils/staking-miner/README.md index 8fec746e6eea..90a00eeac089 100644 --- a/polkadot/utils/staking-miner/README.md +++ b/polkadot/utils/staking-miner/README.md @@ -51,7 +51,7 @@ to be installed. The trade-off however is that it takes a little longer to build tasks. You may build the multi-stage image the root of the Polkadot repository with: ``` TODO: UPDATE THAT -docker build -t staking-miner -f scripts/ci/dockerfiles/staking-miner/staking-miner_builder.Dockerfile . +docker build -t staking-miner -f docker/dockerfiles/staking-miner/staking-miner_builder.Dockerfile . ``` ### Running From 1e2a2f0c69f3d8179a45363ddad18d26b5531183 Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 6 Sep 2023 18:19:21 +0200 Subject: [PATCH 25/28] Fix nothing scheduled on session boundary (#1403) * Fix scheduled state at session boundaries. * Cleanup + better docs. * More cleanup and fixes. * Remove 12s hack. * Add dep. * Make clippy happy --------- Co-authored-by: eskimor --- Cargo.lock | 1 + polkadot/primitives/src/lib.rs | 4 +- polkadot/primitives/src/v5/mod.rs | 54 ---- polkadot/runtime/parachains/Cargo.toml | 1 + polkadot/runtime/parachains/src/assigner.rs | 4 +- .../parachains/src/assigner_on_demand/mod.rs | 5 +- .../src/assigner_on_demand/tests.rs | 6 +- .../parachains/src/assigner_parachains.rs | 6 +- polkadot/runtime/parachains/src/builder.rs | 14 +- .../runtime/parachains/src/configuration.rs | 17 +- .../runtime/parachains/src/inclusion/mod.rs | 208 ++++++------- .../runtime/parachains/src/inclusion/tests.rs | 129 +++----- .../parachains/src/paras_inherent/mod.rs | 32 +- .../parachains/src/paras_inherent/tests.rs | 22 +- .../parachains/src/runtime_api_impl/v5.rs | 66 ++-- polkadot/runtime/parachains/src/scheduler.rs | 281 +++++++++++------- .../parachains/src/scheduler/common.rs | 50 +--- .../parachains/src/scheduler/migration.rs | 1 - .../runtime/parachains/src/scheduler/tests.rs | 237 ++++++--------- 19 files changed, 492 insertions(+), 646 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0d08c175bde5..2fff57879891 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12792,6 +12792,7 @@ dependencies = [ "pallet-timestamp", "pallet-vesting", "parity-scale-codec", + "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-primitives-test-helpers", diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 729908cc12ba..9121b3790858 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -41,8 +41,8 @@ pub use v5::{ BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber, CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, CollatorSignature, - CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, CoreOccupied, CoreState, - DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, ExecutorParam, + CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, CoreState, DisputeState, + DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, ExecutorParam, ExecutorParams, ExecutorParamsHash, ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, Hash, HashT, HeadData, Header, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, IndexedVec, InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce, diff --git a/polkadot/primitives/src/v5/mod.rs b/polkadot/primitives/src/v5/mod.rs index eed4cc2b36ba..30782f95611f 100644 --- a/polkadot/primitives/src/v5/mod.rs +++ b/polkadot/primitives/src/v5/mod.rs @@ -830,60 +830,6 @@ pub struct ParathreadEntry { pub retries: u32, } -/// An assignment for a parachain scheduled to be backed and included in a relay chain block. -#[derive(Clone, Encode, Decode, PartialEq, TypeInfo, RuntimeDebug)] -pub struct Assignment { - /// Assignment's ParaId - pub para_id: Id, -} - -impl Assignment { - /// Create a new `Assignment`. - pub fn new(para_id: Id) -> Self { - Self { para_id } - } -} - -/// An entry tracking a paras -#[derive(Clone, Encode, Decode, TypeInfo, PartialEq, RuntimeDebug)] -pub struct ParasEntry { - /// The `Assignment` - pub assignment: Assignment, - /// The number of times the entry has timed out in availability. - pub availability_timeouts: u32, - /// The block height where this entry becomes invalid. - pub ttl: N, -} - -impl ParasEntry { - /// Return `Id` from the underlying `Assignment`. - pub fn para_id(&self) -> Id { - self.assignment.para_id - } - - /// Create a new `ParasEntry`. - pub fn new(assignment: Assignment, now: N) -> Self { - ParasEntry { assignment, availability_timeouts: 0, ttl: now } - } -} - -/// What is occupying a specific availability core. -#[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(PartialEq))] -pub enum CoreOccupied { - /// The core is not occupied. - Free, - /// A paras. - Paras(ParasEntry), -} - -impl CoreOccupied { - /// Is core free? - pub fn is_free(&self) -> bool { - matches!(self, Self::Free) - } -} - /// A helper data-type for tracking validator-group rotations. #[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] #[cfg_attr(feature = "std", derive(PartialEq))] diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 0e2f6aa1aa24..77eba0bc10ba 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -50,6 +50,7 @@ rand_chacha = { version = "0.3.1", default-features = false } static_assertions = { version = "1.1.0", optional = true } polkadot-parachain-primitives = { path = "../../parachain", default-features = false } polkadot-runtime-metrics = { path = "../metrics", default-features = false} +polkadot-core-primitives = { path = "../../core-primitives", default-features = false } [dev-dependencies] futures = "0.3.21" diff --git a/polkadot/runtime/parachains/src/assigner.rs b/polkadot/runtime/parachains/src/assigner.rs index 55434da11f30..b21e857a4713 100644 --- a/polkadot/runtime/parachains/src/assigner.rs +++ b/polkadot/runtime/parachains/src/assigner.rs @@ -17,11 +17,11 @@ //! The Polkadot multiplexing assignment provider. //! Provides blockspace assignments for both bulk and on demand parachains. use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{v5::Assignment, CoreIndex, Id as ParaId}; +use primitives::{CoreIndex, Id as ParaId}; use crate::{ configuration, paras, - scheduler::common::{AssignmentProvider, AssignmentProviderConfig}, + scheduler::common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, }; pub use pallet::*; diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 0c9813d144f3..75c29bd6fbe4 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -34,7 +34,7 @@ mod tests; use crate::{ configuration, paras, - scheduler::common::{AssignmentProvider, AssignmentProviderConfig}, + scheduler::common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, }; use frame_support::{ @@ -46,7 +46,7 @@ use frame_support::{ }, }; use frame_system::pallet_prelude::*; -use primitives::{v5::Assignment, CoreIndex, Id as ParaId}; +use primitives::{CoreIndex, Id as ParaId}; use sp_runtime::{ traits::{One, SaturatedConversion}, FixedPointNumber, FixedPointOperand, FixedU128, Perbill, Saturating, @@ -606,7 +606,6 @@ impl AssignmentProvider> for Pallet { fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig> { let config = >::config(); AssignmentProviderConfig { - availability_period: config.paras_availability_period, max_availability_timeouts: config.on_demand_retries, ttl: config.on_demand_ttl, } diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index 8041179cd90c..fe9a4e52bd07 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -24,13 +24,11 @@ use crate::{ System, Test, }, paras::{ParaGenesisArgs, ParaKind}, + scheduler::common::Assignment, }; use frame_support::{assert_noop, assert_ok, error::BadOrigin}; use pallet_balances::Error as BalancesError; -use primitives::{ - v5::{Assignment, ValidationCode}, - BlockNumber, SessionIndex, -}; +use primitives::{v5::ValidationCode, BlockNumber, SessionIndex}; use sp_std::collections::btree_map::BTreeMap; fn schedule_blank_para(id: ParaId, parakind: ParaKind) { diff --git a/polkadot/runtime/parachains/src/assigner_parachains.rs b/polkadot/runtime/parachains/src/assigner_parachains.rs index 9a6b970597d5..d605d8660515 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains.rs @@ -19,11 +19,11 @@ use crate::{ configuration, paras, - scheduler::common::{AssignmentProvider, AssignmentProviderConfig}, + scheduler::common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, }; use frame_system::pallet_prelude::BlockNumberFor; pub use pallet::*; -use primitives::{v5::Assignment, CoreIndex, Id as ParaId}; +use primitives::{CoreIndex, Id as ParaId}; #[frame_support::pallet] pub mod pallet { @@ -57,9 +57,7 @@ impl AssignmentProvider> for Pallet { fn push_assignment_for_core(_: CoreIndex, _: Assignment) {} fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig> { - let config = >::config(); AssignmentProviderConfig { - availability_period: config.paras_availability_period, // The next assignment already goes to the same [`ParaId`], no timeout tracking needed. max_availability_timeouts: 0, // The next assignment already goes to the same [`ParaId`], this can be any number diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 4921af5bedda..dced24df0aec 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -18,18 +18,20 @@ use crate::{ configuration, inclusion, initializer, paras, paras::ParaKind, paras_inherent, - scheduler::{self, common::AssignmentProviderConfig}, + scheduler::{ + self, + common::{Assignment, AssignmentProviderConfig}, + CoreOccupied, ParasEntry, + }, session_info, shared, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use primitives::{ - collator_signature_payload, - v5::{Assignment, ParasEntry}, - AvailabilityBitfield, BackedCandidate, CandidateCommitments, CandidateDescriptor, - CandidateHash, CollatorId, CollatorSignature, CommittedCandidateReceipt, CompactStatement, - CoreIndex, CoreOccupied, DisputeStatement, DisputeStatementSet, GroupIndex, HeadData, + collator_signature_payload, AvailabilityBitfield, BackedCandidate, CandidateCommitments, + CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature, CommittedCandidateReceipt, + CompactStatement, CoreIndex, DisputeStatement, DisputeStatementSet, GroupIndex, HeadData, Id as ParaId, IndexedVec, InherentData as ParachainsInherentData, InvalidDisputeStatementKind, PersistedValidationData, SessionIndex, SigningContext, UncheckedSigned, ValidDisputeStatementKind, ValidationCode, ValidatorId, ValidatorIndex, ValidityAttestation, diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 0c66bb5bdf96..33039cd08ca4 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -190,11 +190,20 @@ pub struct HostConfiguration { /// /// Must be non-zero. pub group_rotation_frequency: BlockNumber, - /// The availability period, in blocks. This is the amount of blocks - /// after inclusion that validators have to make the block available and signal its - /// availability to the chain. + /// The minimum availability period, in blocks. /// - /// Must be at least 1. + /// This is the minimum amount of blocks after a core became occupied that validators have time + /// to make the block available. + /// + /// This value only has effect on group rotations. If backers backed something at the end of + /// their rotation, the occupied core affects the backing group that comes afterwards. We limit + /// the effect one backing group can have on the next to `paras_availability_period` blocks. + /// + /// Within a group rotation there is no timeout as backers are only affecting themselves. + /// + /// Must be at least 1. With a value of 1, the previous group will not be able to negatively + /// affect the following group at the expense of a tight availability timeline at group + /// rotation boundaries. pub paras_availability_period: BlockNumber, /// The amount of blocks ahead to schedule paras. pub scheduling_lookahead: u32, diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index a9ee2d1b9612..bb16c804150d 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -22,7 +22,7 @@ use crate::{ configuration::{self, HostConfiguration}, disputes, dmp, hrmp, paras, - scheduler::{self, common::CoreAssignment}, + scheduler::{self, AvailabilityTimeoutStatus}, shared::{self, AllowedRelayParentsTracker}, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; @@ -46,7 +46,10 @@ use scale_info::TypeInfo; use sp_runtime::{traits::One, DispatchError, SaturatedConversion, Saturating}; #[cfg(feature = "std")] use sp_std::fmt; -use sp_std::{collections::btree_set::BTreeSet, prelude::*}; +use sp_std::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + prelude::*, +}; pub use pallet::*; @@ -597,7 +600,7 @@ impl Pallet { pub(crate) fn process_candidates( allowed_relay_parents: &AllowedRelayParentsTracker>, candidates: Vec>, - scheduled: Vec>>, + scheduled: &BTreeMap, group_validators: GV, ) -> Result, DispatchError> where @@ -620,20 +623,18 @@ impl Pallet { // Do all checks before writing storage. let core_indices_and_backers = { - let mut skip = 0; let mut core_indices_and_backers = Vec::with_capacity(candidates.len()); let mut last_core = None; - let mut check_assignment_in_order = - |assignment: &CoreAssignment>| -> DispatchResult { - ensure!( - last_core.map_or(true, |core| assignment.core > core), - Error::::ScheduledOutOfOrder, - ); + let mut check_assignment_in_order = |core_idx| -> DispatchResult { + ensure!( + last_core.map_or(true, |core| core_idx > core), + Error::::ScheduledOutOfOrder, + ); - last_core = Some(assignment.core); - Ok(()) - }; + last_core = Some(core_idx); + Ok(()) + }; // We combine an outer loop over candidates with an inner loop over the scheduled, // where each iteration of the outer loop picks up at the position @@ -645,9 +646,7 @@ impl Pallet { // // In the meantime, we do certain sanity checks on the candidates and on the scheduled // list. - 'next_backed_candidate: for (candidate_idx, backed_candidate) in - candidates.iter().enumerate() - { + for (candidate_idx, backed_candidate) in candidates.iter().enumerate() { let relay_parent_hash = backed_candidate.descriptor().relay_parent; let para_id = backed_candidate.descriptor().para_id; @@ -681,108 +680,89 @@ impl Pallet { let para_id = backed_candidate.descriptor().para_id; let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; - for (i, core_assignment) in scheduled[skip..].iter().enumerate() { - check_assignment_in_order(core_assignment)?; + let core_idx = *scheduled.get(¶_id).ok_or(Error::::UnscheduledCandidate)?; + check_assignment_in_order(core_idx)?; + ensure!( + >::get(¶_id).is_none() && + >::get(¶_id).is_none(), + Error::::CandidateScheduledBeforeParaFree, + ); - if para_id == core_assignment.paras_entry.para_id() { - ensure!( - >::get(¶_id).is_none() && - >::get(¶_id).is_none(), - Error::::CandidateScheduledBeforeParaFree, - ); + // The candidate based upon relay parent `N` should be backed by a group + // assigned to core at block `N + 1`. Thus, `relay_parent_number + 1` + // will always land in the current session. + let group_idx = >::group_assigned_to_core( + core_idx, + relay_parent_number + One::one(), + ) + .ok_or_else(|| { + log::warn!( + target: LOG_TARGET, + "Failed to compute group index for candidate {}", + candidate_idx + ); + Error::::InvalidAssignment + })?; + let group_vals = + group_validators(group_idx).ok_or_else(|| Error::::InvalidGroupIndex)?; - // account for already skipped, and then skip this one. - skip = i + skip + 1; - - // The candidate based upon relay parent `N` should be backed by a group - // assigned to core at block `N + 1`. Thus, `relay_parent_number + 1` - // will always land in the current session. - let group_idx = >::group_assigned_to_core( - core_assignment.core, - relay_parent_number + One::one(), - ) - .ok_or_else(|| { - log::warn!( - target: LOG_TARGET, - "Failed to compute group index for candidate {}", - candidate_idx - ); - Error::::InvalidAssignment - })?; - let group_vals = group_validators(group_idx) - .ok_or_else(|| Error::::InvalidGroupIndex)?; - - // check the signatures in the backing and that it is a majority. - { - let maybe_amount_validated = primitives::check_candidate_backing( - &backed_candidate, - &signing_context, - group_vals.len(), - |intra_group_vi| { - group_vals - .get(intra_group_vi) - .and_then(|vi| validators.get(vi.0 as usize)) - .map(|v| v.clone()) - }, - ); - - match maybe_amount_validated { - Ok(amount_validated) => ensure!( - amount_validated >= - effective_minimum_backing_votes( - group_vals.len(), - minimum_backing_votes - ), - Error::::InsufficientBacking, + // check the signatures in the backing and that it is a majority. + { + let maybe_amount_validated = primitives::check_candidate_backing( + &backed_candidate, + &signing_context, + group_vals.len(), + |intra_group_vi| { + group_vals + .get(intra_group_vi) + .and_then(|vi| validators.get(vi.0 as usize)) + .map(|v| v.clone()) + }, + ); + + match maybe_amount_validated { + Ok(amount_validated) => ensure!( + amount_validated >= + effective_minimum_backing_votes( + group_vals.len(), + minimum_backing_votes ), - Err(()) => { - Err(Error::::InvalidBacking)?; - }, - } - - let mut backer_idx_and_attestation = - Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( - backed_candidate.validator_indices.count_ones(), - ); - let candidate_receipt = backed_candidate.receipt(); - - for ((bit_idx, _), attestation) in backed_candidate - .validator_indices - .iter() - .enumerate() - .filter(|(_, signed)| **signed) - .zip(backed_candidate.validity_votes.iter().cloned()) - { - let val_idx = group_vals - .get(bit_idx) - .expect("this query succeeded above; qed"); - backer_idx_and_attestation.push((*val_idx, attestation)); - - backers.set(val_idx.0 as _, true); - } - candidate_receipt_with_backing_validator_indices - .push((candidate_receipt, backer_idx_and_attestation)); - } - - core_indices_and_backers.push(( - (core_assignment.core, core_assignment.paras_entry.para_id()), - backers, - group_idx, - relay_parent_number, - )); - continue 'next_backed_candidate + Error::::InsufficientBacking, + ), + Err(()) => { + Err(Error::::InvalidBacking)?; + }, } - } - // end of loop reached means that the candidate didn't appear in the non-traversed - // section of the `scheduled` slice. either it was not scheduled or didn't appear in - // `candidates` in the correct order. - ensure!(false, Error::::UnscheduledCandidate); - } + let mut backer_idx_and_attestation = + Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( + backed_candidate.validator_indices.count_ones(), + ); + let candidate_receipt = backed_candidate.receipt(); + + for ((bit_idx, _), attestation) in backed_candidate + .validator_indices + .iter() + .enumerate() + .filter(|(_, signed)| **signed) + .zip(backed_candidate.validity_votes.iter().cloned()) + { + let val_idx = + group_vals.get(bit_idx).expect("this query succeeded above; qed"); + backer_idx_and_attestation.push((*val_idx, attestation)); + + backers.set(val_idx.0 as _, true); + } + candidate_receipt_with_backing_validator_indices + .push((candidate_receipt, backer_idx_and_attestation)); + } - // check remainder of scheduled cores, if any. - for assignment in scheduled[skip..].iter() { - check_assignment_in_order(assignment)?; + core_indices_and_backers.push(( + (core_idx, para_id), + backers, + group_idx, + relay_parent_number, + )); } core_indices_and_backers @@ -1043,13 +1023,13 @@ impl Pallet { /// /// Returns a vector of cleaned-up core IDs. pub(crate) fn collect_pending( - pred: impl Fn(CoreIndex, BlockNumberFor) -> bool, + pred: impl Fn(BlockNumberFor) -> AvailabilityTimeoutStatus>, ) -> Vec { let mut cleaned_up_ids = Vec::new(); let mut cleaned_up_cores = Vec::new(); for (para_id, pending_record) in >::iter() { - if pred(pending_record.core, pending_record.backed_in_number) { + if pred(pending_record.backed_in_number).timed_out { cleaned_up_ids.push(para_id); cleaned_up_cores.push(pending_record.core); } diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 70c5e959038a..7677108d73de 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -36,7 +36,6 @@ use frame_support::assert_noop; use keyring::Sr25519Keyring; use parity_scale_codec::DecodeAll; use primitives::{ - v5::{Assignment, ParasEntry}, BlockNumber, CandidateCommitments, CandidateDescriptor, CollatorId, CompactStatement as Statement, Hash, SignedAvailabilityBitfield, SignedStatement, ValidationCode, ValidatorId, ValidityAttestation, PARACHAIN_KEY_TYPE_ID, @@ -380,7 +379,9 @@ fn collect_pending_cleans_up_pending() { (chain_b, ParaKind::Parachain), (thread_a, ParaKind::Parathread), ]; - new_test_ext(genesis_config(paras)).execute_with(|| { + let mut config = genesis_config(paras); + config.configuration.config.group_rotation_frequency = 3; + new_test_ext(config).execute_with(|| { let default_candidate = TestCandidateBuilder::default().build(); >::insert( chain_a, @@ -408,7 +409,7 @@ fn collect_pending_cleans_up_pending() { descriptor: default_candidate.descriptor, availability_votes: default_availability_votes(), relay_parent_number: 0, - backed_in_number: 0, + backed_in_number: 5, backers: default_backing_bitfield(), backing_group: GroupIndex::from(1), }, @@ -422,7 +423,7 @@ fn collect_pending_cleans_up_pending() { assert!(>::get(&chain_a).is_some()); assert!(>::get(&chain_b).is_some()); - ParaInclusion::collect_pending(|core, _since| core == CoreIndex::from(0)); + ParaInclusion::collect_pending(Scheduler::availability_timeout_predicate()); assert!(>::get(&chain_a).is_none()); assert!(>::get(&chain_b).is_some()); @@ -910,23 +911,12 @@ fn candidate_checks() { ]; Scheduler::set_validator_groups(validator_groups); - let entry_ttl = 10_000; let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); - let chain_a_assignment = CoreAssignment { - core: CoreIndex::from(0), - paras_entry: ParasEntry::new(Assignment::new(chain_a), entry_ttl), - }; - - let chain_b_assignment = CoreAssignment { - core: CoreIndex::from(1), - paras_entry: ParasEntry::new(Assignment::new(chain_b), entry_ttl), - }; + let chain_a_assignment = (chain_a, CoreIndex::from(0)); - let thread_a_assignment = CoreAssignment { - core: CoreIndex::from(2), - paras_entry: ParasEntry::new(Assignment::new(thread_a), entry_ttl), - }; + let chain_b_assignment = (chain_b, CoreIndex::from(1)); + let thread_a_assignment = (thread_a, CoreIndex::from(2)); let allowed_relay_parents = default_allowed_relay_parent_tracker(); // unscheduled candidate. @@ -955,7 +945,7 @@ fn candidate_checks() { ParaInclusion::process_candidates( &allowed_relay_parents, vec![backed], - vec![chain_b_assignment.clone()], + &[chain_b_assignment].into_iter().collect(), &group_validators, ), Error::::UnscheduledCandidate @@ -1010,10 +1000,10 @@ fn candidate_checks() { ParaInclusion::process_candidates( &allowed_relay_parents, vec![backed_b, backed_a], - vec![chain_a_assignment.clone(), chain_b_assignment.clone()], + &[chain_a_assignment, chain_b_assignment].into_iter().collect(), &group_validators, ), - Error::::UnscheduledCandidate + Error::::ScheduledOutOfOrder ); } @@ -1043,7 +1033,7 @@ fn candidate_checks() { ParaInclusion::process_candidates( &allowed_relay_parents, vec![backed], - vec![chain_a_assignment.clone()], + &[chain_a_assignment].into_iter().collect(), &group_validators, ), Error::::InsufficientBacking @@ -1100,7 +1090,7 @@ fn candidate_checks() { ParaInclusion::process_candidates( &allowed_relay_parents, vec![backed_b, backed_a], - vec![chain_a_assignment.clone(), chain_b_assignment.clone()], + &[chain_a_assignment, chain_b_assignment].into_iter().collect(), &group_validators, ), Error::::DisallowedRelayParent @@ -1138,7 +1128,7 @@ fn candidate_checks() { ParaInclusion::process_candidates( &allowed_relay_parents, vec![backed], - vec![thread_a_assignment.clone()], + &[thread_a_assignment].into_iter().collect(), &group_validators, ), Error::::NotCollatorSigned @@ -1188,7 +1178,7 @@ fn candidate_checks() { ParaInclusion::process_candidates( &allowed_relay_parents, vec![backed], - vec![chain_a_assignment.clone()], + &[chain_a_assignment].into_iter().collect(), &group_validators, ), Error::::CandidateScheduledBeforeParaFree @@ -1228,7 +1218,7 @@ fn candidate_checks() { ParaInclusion::process_candidates( &allowed_relay_parents, vec![backed], - vec![chain_a_assignment.clone()], + &[chain_a_assignment].into_iter().collect(), &group_validators, ), Error::::CandidateScheduledBeforeParaFree @@ -1272,7 +1262,7 @@ fn candidate_checks() { ParaInclusion::process_candidates( &allowed_relay_parents, vec![backed], - vec![chain_a_assignment.clone()], + &[chain_a_assignment].into_iter().collect(), &group_validators, ), Error::::PrematureCodeUpgrade @@ -1306,7 +1296,7 @@ fn candidate_checks() { ParaInclusion::process_candidates( &allowed_relay_parents, vec![backed], - vec![chain_a_assignment.clone()], + &[chain_a_assignment].into_iter().collect(), &group_validators, ), Err(Error::::ValidationDataHashMismatch.into()), @@ -1341,7 +1331,7 @@ fn candidate_checks() { ParaInclusion::process_candidates( &allowed_relay_parents, vec![backed], - vec![chain_a_assignment.clone()], + &[chain_a_assignment].into_iter().collect(), &group_validators, ), Error::::InvalidValidationCodeHash @@ -1376,7 +1366,7 @@ fn candidate_checks() { ParaInclusion::process_candidates( &allowed_relay_parents, vec![backed], - vec![chain_a_assignment.clone()], + &[chain_a_assignment].into_iter().collect(), &group_validators, ), Error::::ParaHeadMismatch @@ -1446,21 +1436,9 @@ fn backing_works() { let allowed_relay_parents = default_allowed_relay_parent_tracker(); - let entry_ttl = 10_000; - let chain_a_assignment = CoreAssignment { - core: CoreIndex::from(0), - paras_entry: ParasEntry::new(Assignment::new(chain_a), entry_ttl), - }; - - let chain_b_assignment = CoreAssignment { - core: CoreIndex::from(1), - paras_entry: ParasEntry::new(Assignment::new(chain_b), entry_ttl), - }; - - let thread_a_assignment = CoreAssignment { - core: CoreIndex::from(2), - paras_entry: ParasEntry::new(Assignment::new(thread_a), entry_ttl), - }; + let chain_a_assignment = (chain_a, CoreIndex::from(0)); + let chain_b_assignment = (chain_b, CoreIndex::from(1)); + let thread_a_assignment = (thread_a, CoreIndex::from(2)); let mut candidate_a = TestCandidateBuilder { para_id: chain_a, @@ -1548,11 +1526,9 @@ fn backing_works() { } = ParaInclusion::process_candidates( &allowed_relay_parents, backed_candidates.clone(), - vec![ - chain_a_assignment.clone(), - chain_b_assignment.clone(), - thread_a_assignment.clone(), - ], + &[chain_a_assignment, chain_b_assignment, thread_a_assignment] + .into_iter() + .collect(), &group_validators, ) .expect("candidates scheduled, in order, and backed"); @@ -1738,12 +1714,7 @@ fn can_include_candidate_with_ok_code_upgrade() { Scheduler::set_validator_groups(validator_groups); let allowed_relay_parents = default_allowed_relay_parent_tracker(); - let entry_ttl = 10_000; - let chain_a_assignment = CoreAssignment { - core: CoreIndex::from(0), - paras_entry: ParasEntry::new(Assignment::new(chain_a), entry_ttl), - }; - + let chain_a_assignment = (chain_a, CoreIndex::from(0)); let mut candidate_a = TestCandidateBuilder { para_id: chain_a, relay_parent: System::parent_hash(), @@ -1769,7 +1740,7 @@ fn can_include_candidate_with_ok_code_upgrade() { ParaInclusion::process_candidates( &allowed_relay_parents, vec![backed_a], - vec![chain_a_assignment.clone()], + &[chain_a_assignment].into_iter().collect(), &group_validators, ) .expect("candidates scheduled, in order, and backed"); @@ -1895,28 +1866,10 @@ fn check_allowed_relay_parents() { max_ancestry_len, ); - let chain_a_assignment = CoreAssignment { - core: CoreIndex::from(0), - paras_entry: ParasEntry { - assignment: Assignment { para_id: chain_a }, - availability_timeouts: 0, - ttl: 5, - }, - }; + let chain_a_assignment = (chain_a, CoreIndex::from(0)); - let chain_b_assignment = CoreAssignment { - core: CoreIndex::from(1), - paras_entry: ParasEntry { - assignment: Assignment { para_id: chain_b }, - availability_timeouts: 0, - ttl: 5, - }, - }; - - let thread_a_assignment = CoreAssignment { - core: CoreIndex::from(2), - paras_entry: ParasEntry::new(Assignment::new(thread_a), 5), - }; + let chain_b_assignment = (chain_b, CoreIndex::from(1)); + let thread_a_assignment = (thread_a, CoreIndex::from(2)); let mut candidate_a = TestCandidateBuilder { para_id: chain_a, @@ -1998,11 +1951,9 @@ fn check_allowed_relay_parents() { ParaInclusion::process_candidates( &allowed_relay_parents, backed_candidates.clone(), - vec![ - chain_a_assignment.clone(), - chain_b_assignment.clone(), - thread_a_assignment.clone(), - ], + &[chain_a_assignment, chain_b_assignment, thread_a_assignment] + .into_iter() + .collect(), &group_validators, ) .expect("candidates scheduled, in order, and backed"); @@ -2212,15 +2163,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { let allowed_relay_parents = default_allowed_relay_parent_tracker(); - let chain_a_assignment = CoreAssignment { - core: CoreIndex::from(0), - paras_entry: ParasEntry { - assignment: Assignment { para_id: chain_a }, - availability_timeouts: 0, - ttl: 5, - }, - }; - + let chain_a_assignment = (chain_a, CoreIndex::from(0)); let mut candidate_a = TestCandidateBuilder { para_id: chain_a, relay_parent: System::parent_hash(), @@ -2246,7 +2189,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { ParaInclusion::process_candidates( &allowed_relay_parents, vec![backed_a], - vec![chain_a_assignment.clone()], + &[chain_a_assignment].into_iter().collect(), &group_validators, ) .expect("candidates scheduled, in order, and backed"); diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 6244f44e434b..8e918d35d5ff 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -29,10 +29,7 @@ use crate::{ initializer, metrics::METRICS, paras, - scheduler::{ - self, - common::{CoreAssignment, FreedReason}, - }, + scheduler::{self, FreedReason}, shared, ParaId, }; use bitvec::prelude::BitVec; @@ -245,8 +242,8 @@ pub mod pallet { T: Config, { // Handle timeouts for any availability core work. - let availability_pred = >::availability_timeout_predicate(); - let freed_timeout = if let Some(pred) = availability_pred { + let freed_timeout = if >::availability_timeout_check_required() { + let pred = >::availability_timeout_predicate(); >::collect_pending(pred) } else { Vec::new() @@ -320,7 +317,7 @@ impl Pallet { /// /// When called from `create_inherent` the `context` must be set to /// `ProcessInherentDataContext::ProvideInherent` so it guarantees the invariant that inherent - /// is not overweight. + /// is not overweight. /// It is **mandatory** that calls from `enter` set `context` to /// `ProcessInherentDataContext::Enter` to ensure the weight invariant is checked. /// @@ -583,7 +580,10 @@ impl Pallet { let freed = collect_all_freed_cores::(freed_concluded.iter().cloned()); - let scheduled = >::update_claimqueue(freed, now); + >::update_claimqueue(freed, now); + let scheduled = >::scheduled_paras() + .map(|(core_idx, para_id)| (para_id, core_idx)) + .collect(); METRICS.on_candidates_processed_total(backed_candidates.len() as u64); @@ -608,7 +608,7 @@ impl Pallet { .verify_backed_candidate(&allowed_relay_parents, candidate_idx, backed_candidate) .is_err() }, - &scheduled[..], + &scheduled, ); METRICS.on_candidates_sanitized(backed_candidates.len() as u64); @@ -620,7 +620,7 @@ impl Pallet { } = >::process_candidates( &allowed_relay_parents, backed_candidates.clone(), - scheduled, + &scheduled, >::group_validators, )?; // Note which of the scheduled cores were actually occupied by a backed candidate. @@ -917,7 +917,7 @@ fn sanitize_backed_candidates< >( mut backed_candidates: Vec>, mut candidate_has_concluded_invalid_dispute_or_is_invalid: F, - scheduled: &[CoreAssignment>], + scheduled: &BTreeMap, ) -> Vec> { // Remove any candidates that were concluded invalid. // This does not assume sorting. @@ -925,11 +925,6 @@ fn sanitize_backed_candidates< !candidate_has_concluded_invalid_dispute_or_is_invalid(candidate_idx, backed_candidate) }); - let scheduled_paras_to_core_idx = scheduled - .into_iter() - .map(|core_assignment| (core_assignment.paras_entry.para_id(), core_assignment.core)) - .collect::>(); - // Assure the backed candidate's `ParaId`'s core is free. // This holds under the assumption that `Scheduler::schedule` is called _before_. // We don't check the relay-parent because this is done in the closure when @@ -938,7 +933,7 @@ fn sanitize_backed_candidates< backed_candidates.retain(|backed_candidate| { let desc = backed_candidate.descriptor(); - scheduled_paras_to_core_idx.get(&desc.para_id).is_some() + scheduled.get(&desc.para_id).is_some() }); // Sort the `Vec` last, once there is a guarantee that these @@ -948,8 +943,7 @@ fn sanitize_backed_candidates< // but also allows this to be done in place. backed_candidates.sort_by(|x, y| { // Never panics, since we filtered all panic arguments out in the previous `fn retain`. - scheduled_paras_to_core_idx[&x.descriptor().para_id] - .cmp(&scheduled_paras_to_core_idx[&y.descriptor().para_id]) + scheduled[&x.descriptor().para_id].cmp(&scheduled[&y.descriptor().para_id]) }); backed_candidates diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index ab515cb37565..7c70fcea1943 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -963,10 +963,7 @@ mod sanitizers { use crate::mock::Test; use keyring::Sr25519Keyring; - use primitives::{ - v5::{Assignment, ParasEntry}, - PARACHAIN_KEY_TYPE_ID, - }; + use primitives::PARACHAIN_KEY_TYPE_ID; use sc_keystore::LocalKeystore; use sp_keystore::{Keystore, KeystorePtr}; use std::sync::Arc; @@ -1239,21 +1236,10 @@ mod sanitizers { let has_concluded_invalid = |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; - let entry_ttl = 10_000; let scheduled = (0_usize..2) .into_iter() - .map(|idx| { - let core_idx = CoreIndex::from(idx as u32); - let ca = CoreAssignment { - paras_entry: ParasEntry::new( - Assignment::new(ParaId::from(1_u32 + idx as u32)), - entry_ttl, - ), - core: core_idx, - }; - ca - }) - .collect::>(); + .map(|idx| (ParaId::from(1_u32 + idx as u32), CoreIndex::from(idx as u32))) + .collect::>(); let group_validators = |group_index: GroupIndex| { match group_index { @@ -1304,7 +1290,7 @@ mod sanitizers { // nothing is scheduled, so no paraids match, thus all backed candidates are skipped { - let scheduled = &Vec::new(); + let scheduled = &BTreeMap::new(); assert!(sanitize_backed_candidates::( backed_candidates.clone(), has_concluded_invalid, diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v5.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v5.rs index bac1268f53bd..46a609e0368d 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v5.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v5.rs @@ -18,17 +18,17 @@ //! functions. use crate::{ - configuration, disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent, scheduler, + disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent, + scheduler::{self, CoreOccupied}, session_info, shared, }; use frame_system::pallet_prelude::*; use primitives::{ slashing, AuthorityDiscoveryId, CandidateEvent, CandidateHash, CommittedCandidateReceipt, - CoreIndex, CoreOccupied, CoreState, DisputeState, ExecutorParams, GroupIndex, - GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCore, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, + CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, + Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCore, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; use sp_runtime::traits::One; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -52,29 +52,15 @@ pub fn validator_groups( /// Implementation for the `availability_cores` function of the runtime API. pub fn availability_cores() -> Vec>> { let cores = >::availability_cores(); - let config = >::config(); let now = >::block_number() + One::one(); - let rotation_info = >::group_rotation_info(now); - let time_out_at = |backed_in_number, availability_period| { - let time_out_at = backed_in_number + availability_period; - - let current_window = rotation_info.last_rotation_at() + availability_period; - let next_rotation = rotation_info.next_rotation_at(); - - // If we are within `period` blocks of rotation, timeouts are being checked - // actively. We could even time out this block. - if time_out_at < current_window { - time_out_at - } else if time_out_at <= next_rotation { - // Otherwise, it will time out at the sooner of the next rotation - next_rotation - } else { - // or the scheduled time-out. This is by definition within `period` blocks - // of `next_rotation` and is thus a valid timeout block. - time_out_at - } - }; + // This explicit update is only strictly required for session boundaries: + // + // At the end of a session we clear the claim queues: Without this update call, nothing would be + // scheduled to the client. + >::update_claimqueue(Vec::new(), now); + + let time_out_for = >::availability_timeout_predicate(); let group_responsible_for = |backed_in_number, core_index| match >::group_assigned_to_core( @@ -93,7 +79,9 @@ pub fn availability_cores() -> Vec = cores + let scheduled: BTreeMap<_, _> = >::scheduled_paras().collect(); + + cores .into_iter() .enumerate() .map(|(i, core)| match core { @@ -108,7 +96,7 @@ pub fn availability_cores() -> Vec>::next_up_on_time_out(CoreIndex( i as u32, )), @@ -121,19 +109,15 @@ pub fn availability_cores() -> Vec CoreState::Free, + CoreOccupied::Free => { + if let Some(para_id) = scheduled.get(&CoreIndex(i as _)).cloned() { + CoreState::Scheduled(primitives::ScheduledCore { para_id, collator: None }) + } else { + CoreState::Free + } + }, }) - .collect(); - - // This will overwrite only `Free` cores if the scheduler module is working as intended. - for scheduled in >::scheduled_claimqueue() { - core_states[scheduled.core.0 as usize] = CoreState::Scheduled(primitives::ScheduledCore { - para_id: scheduled.paras_entry.para_id(), - collator: None, - }); - } - - core_states + .collect() } /// Returns current block number being processed and the corresponding root hash. diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index 577bcd153b5b..60b2a9254600 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -39,11 +39,11 @@ use crate::{configuration, initializer::SessionChangeNotification, paras}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::BlockNumberFor; +pub use polkadot_core_primitives::v2::BlockNumber; use primitives::{ - v5::ParasEntry, CoreIndex, CoreOccupied, GroupIndex, GroupRotationInfo, Id as ParaId, - ScheduledCore, ValidatorIndex, + CoreIndex, GroupIndex, GroupRotationInfo, Id as ParaId, ScheduledCore, ValidatorIndex, }; -use sp_runtime::traits::{One, Saturating}; +use sp_runtime::traits::One; use sp_std::{ collections::{btree_map::BTreeMap, vec_deque::VecDeque}, prelude::*, @@ -51,7 +51,7 @@ use sp_std::{ pub mod common; -use common::{AssignmentProvider, AssignmentProviderConfig, CoreAssignment, FreedReason}; +use common::{Assignment, AssignmentProvider, AssignmentProviderConfig}; pub use pallet::*; @@ -101,6 +101,36 @@ pub mod pallet { pub(crate) type AvailabilityCores = StorageValue<_, Vec>>, ValueQuery>; + /// Representation of a core in `AvailabilityCores`. + /// + /// This is not to be confused with `CoreState` which is an enriched variant of this and exposed + /// to the node side. It also provides information about scheduled/upcoming assignments for + /// example and is computed on the fly in the `availability_cores` runtime call. + #[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] + #[cfg_attr(feature = "std", derive(PartialEq))] + pub enum CoreOccupied { + /// No candidate is waiting availability on this core right now (the core is not occupied). + Free, + /// A para is currently waiting for availability/inclusion on this core. + Paras(ParasEntry), + } + + impl CoreOccupied { + /// Is core free? + pub fn is_free(&self) -> bool { + matches!(self, Self::Free) + } + } + + /// Reasons a core might be freed. + #[derive(Clone, Copy)] + pub enum FreedReason { + /// The core's work concluded and the parablock assigned to it is considered available. + Concluded, + /// The core's work timed out. + TimedOut, + } + /// The block number where the session start occurred. Used to track how many group rotations /// have occurred. /// @@ -124,6 +154,68 @@ pub mod pallet { BTreeMap>>>>, ValueQuery, >; + + /// Assignments as tracked in the claim queue. + #[derive(Clone, Encode, Decode, TypeInfo, PartialEq, RuntimeDebug)] + pub struct ParasEntry { + /// The underlying `Assignment` + pub assignment: Assignment, + /// The number of times the entry has timed out in availability already. + pub availability_timeouts: u32, + /// The block height until this entry needs to be backed. + /// + /// If missed the entry will be removed from the claim queue without ever having occupied + /// the core. + pub ttl: N, + } + + impl ParasEntry { + /// Return `Id` from the underlying `Assignment`. + pub fn para_id(&self) -> ParaId { + self.assignment.para_id + } + + /// Create a new `ParasEntry`. + pub fn new(assignment: Assignment, now: N) -> Self { + ParasEntry { assignment, availability_timeouts: 0, ttl: now } + } + } + + /// How a core is mapped to a backing group and a `ParaId` + #[derive(Clone, Encode, Decode, PartialEq, TypeInfo)] + #[cfg_attr(feature = "std", derive(Debug))] + pub struct CoreAssignment { + /// The core that is assigned. + pub core: CoreIndex, + /// The para id and accompanying information needed to collate and back a parablock. + pub paras_entry: ParasEntry, + } + + impl CoreAssignment { + /// Returns the [`ParaId`] of the assignment. + pub fn para_id(&self) -> ParaId { + self.paras_entry.para_id() + } + + /// Returns the inner [`ParasEntry`] of the assignment. + pub fn to_paras_entry(self) -> ParasEntry { + self.paras_entry + } + } + + /// Availability timeout status of a core. + pub(crate) struct AvailabilityTimeoutStatus { + /// Is the core already timed out? + /// + /// If this is true the core will be freed at this block. + pub timed_out: bool, + + /// When does this core timeout. + /// + /// The block number the core times out. If `timed_out` is true, this will correspond to + /// now (current block number). + pub live_until: BlockNumber, + } } type PositionInClaimqueue = u32; @@ -368,50 +460,47 @@ impl Pallet { Some(GroupIndex(group_idx as u32)) } - /// Returns an optional predicate that should be used for timing out occupied cores. - /// - /// If `None`, no timing-out should be done. The predicate accepts the index of the core, and - /// the block number since which it has been occupied, and the respective parachain timeouts, - /// i.e. only within `config.paras_availability_period` of the last rotation would this return - /// `Some`, unless there are no rotations. + /// Returns a predicate that should be used for timing out occupied cores. /// - /// The timeout used to depend, but does not depend any more on group rotations. First of all - /// it only matters if a para got another chance (a retry). If there is a retry and it happens - /// still within the same group rotation a censoring backing group would need to censor again - /// and lose out again on backing rewards. This is bad for the censoring backing group, it does - /// not matter for the parachain as long as it is retried often enough (so it eventually gets a - /// try on another backing group) - the effect is similar to having a prolonged timeout. It - /// should also be noted that for both malicious and offline backing groups it is actually more - /// realistic that the candidate will not be backed to begin with, instead of getting backed - /// and then not made available. + /// This only ever times out cores that have been occupied across a group rotation boundary. pub(crate) fn availability_timeout_predicate( - ) -> Option) -> bool> { - let now = >::block_number(); + ) -> impl Fn(BlockNumberFor) -> AvailabilityTimeoutStatus> { let config = >::config(); - let session_start = >::get(); + let now = >::block_number(); + let rotation_info = Self::group_rotation_info(now); - let blocks_since_session_start = now.saturating_sub(session_start); - let blocks_since_last_rotation = - blocks_since_session_start % config.group_rotation_frequency.max(1u8.into()); + let next_rotation = rotation_info.next_rotation_at(); - if blocks_since_last_rotation >= config.paras_availability_period { - None - } else { - Some(|core_index: CoreIndex, pending_since| { - let availability_cores = AvailabilityCores::::get(); - let AssignmentProviderConfig { availability_period, .. } = - T::AssignmentProvider::get_provider_config(core_index); - let now = >::block_number(); - match availability_cores.get(core_index.0 as usize) { - None => true, // out-of-bounds, doesn't really matter what is returned. - Some(CoreOccupied::Free) => true, // core free, still doesn't matter. - Some(CoreOccupied::Paras(_)) => - now.saturating_sub(pending_since) >= availability_period, - } - }) + let times_out = Self::availability_timeout_check_required(); + + move |pending_since| { + let time_out_at = if times_out { + // We are at the beginning of the rotation, here availability period is relevant. + // Note: blocks backed in this rotation will never time out here as backed_in + + // config.paras_availability_period will always be > now for these blocks, as + // otherwise above condition would not be true. + pending_since + config.paras_availability_period + } else { + next_rotation + config.paras_availability_period + }; + + AvailabilityTimeoutStatus { timed_out: time_out_at <= now, live_until: time_out_at } } } + /// Is evaluation of `availability_timeout_predicate` necessary at the current block? + /// + /// This can be used to avoid calling `availability_timeout_predicate` for each core in case + /// this function returns false. + pub(crate) fn availability_timeout_check_required() -> bool { + let config = >::config(); + let now = >::block_number() + One::one(); + let rotation_info = Self::group_rotation_info(now); + + let current_window = rotation_info.last_rotation_at() + config.paras_availability_period; + now < current_window + } + /// Returns a helper for determining group rotation. pub(crate) fn group_rotation_info( now: BlockNumberFor, @@ -508,7 +597,7 @@ impl Pallet { pub(crate) fn update_claimqueue( just_freed_cores: impl IntoIterator, now: BlockNumberFor, - ) -> Vec>> { + ) { Self::move_claimqueue_forward(); Self::free_cores_and_fill_claimqueue(just_freed_cores, now) } @@ -534,61 +623,58 @@ impl Pallet { fn free_cores_and_fill_claimqueue( just_freed_cores: impl IntoIterator, now: BlockNumberFor, - ) -> Vec>> { + ) { let (mut concluded_paras, mut timedout_paras) = Self::free_cores(just_freed_cores); // This can only happen on new sessions at which we move all assignments back to the // provider. Hence, there's nothing we need to do here. if ValidatorGroups::::get().is_empty() { - vec![] - } else { - let n_lookahead = Self::claimqueue_lookahead(); - let n_session_cores = T::AssignmentProvider::session_core_count(); - let cq = ClaimQueue::::get(); - let ttl = >::config().on_demand_ttl; - - for core_idx in 0..n_session_cores { - let core_idx = CoreIndex::from(core_idx); - - // add previously timedout paras back into the queue - if let Some(mut entry) = timedout_paras.remove(&core_idx) { - let AssignmentProviderConfig { max_availability_timeouts, .. } = - T::AssignmentProvider::get_provider_config(core_idx); - if entry.availability_timeouts < max_availability_timeouts { - // Increment the timeout counter. - entry.availability_timeouts += 1; - // Reset the ttl so that a timed out assignment. - entry.ttl = now + ttl; - Self::add_to_claimqueue(core_idx, entry); - // The claim has been added back into the claimqueue. - // Do not pop another assignment for the core. - continue - } else { - // Consider timed out assignments for on demand parachains as concluded for - // the assignment provider - let ret = concluded_paras.insert(core_idx, entry.para_id()); - debug_assert!(ret.is_none()); - } + return + } + let n_lookahead = Self::claimqueue_lookahead(); + let n_session_cores = T::AssignmentProvider::session_core_count(); + let cq = ClaimQueue::::get(); + let ttl = >::config().on_demand_ttl; + + for core_idx in 0..n_session_cores { + let core_idx = CoreIndex::from(core_idx); + + // add previously timedout paras back into the queue + if let Some(mut entry) = timedout_paras.remove(&core_idx) { + let AssignmentProviderConfig { max_availability_timeouts, .. } = + T::AssignmentProvider::get_provider_config(core_idx); + if entry.availability_timeouts < max_availability_timeouts { + // Increment the timeout counter. + entry.availability_timeouts += 1; + // Reset the ttl so that a timed out assignment. + entry.ttl = now + ttl; + Self::add_to_claimqueue(core_idx, entry); + // The claim has been added back into the claimqueue. + // Do not pop another assignment for the core. + continue + } else { + // Consider timed out assignments for on demand parachains as concluded for + // the assignment provider + let ret = concluded_paras.insert(core_idx, entry.para_id()); + debug_assert!(ret.is_none()); } + } - // We consider occupied cores to be part of the claimqueue - let n_lookahead_used = cq.get(&core_idx).map_or(0, |v| v.len() as u32) + - if Self::is_core_occupied(core_idx) { 1 } else { 0 }; - for _ in n_lookahead_used..n_lookahead { - let concluded_para = concluded_paras.remove(&core_idx); - if let Some(assignment) = - T::AssignmentProvider::pop_assignment_for_core(core_idx, concluded_para) - { - Self::add_to_claimqueue(core_idx, ParasEntry::new(assignment, now + ttl)); - } + // We consider occupied cores to be part of the claimqueue + let n_lookahead_used = cq.get(&core_idx).map_or(0, |v| v.len() as u32) + + if Self::is_core_occupied(core_idx) { 1 } else { 0 }; + for _ in n_lookahead_used..n_lookahead { + let concluded_para = concluded_paras.remove(&core_idx); + if let Some(assignment) = + T::AssignmentProvider::pop_assignment_for_core(core_idx, concluded_para) + { + Self::add_to_claimqueue(core_idx, ParasEntry::new(assignment, now + ttl)); } } - - debug_assert!(timedout_paras.is_empty()); - debug_assert!(concluded_paras.is_empty()); - - Self::scheduled_claimqueue() } + + debug_assert!(timedout_paras.is_empty()); + debug_assert!(concluded_paras.is_empty()); } fn is_core_occupied(core_idx: CoreIndex) -> bool { @@ -623,29 +709,22 @@ impl Pallet { .ok_or("remove returned None")? .ok_or("Element in Claimqueue was None.")?; - // Since the core is now occupied, the next entry in the claimqueue in order to achieve - // 12 second block times needs to be None - if core_claims.front() != Some(&None) { - core_claims.push_front(None); - } Ok((pos as u32, pe)) }) } - // TODO: Temporary to imitate the old schedule() call. Will be adjusted when we make the - // scheduler AB ready - pub(crate) fn scheduled_claimqueue() -> Vec>> { + /// Paras scheduled next in the claim queue. + pub(crate) fn scheduled_paras() -> impl Iterator { + Self::scheduled_entries().map(|(core_idx, e)| (core_idx, e.assignment.para_id)) + } + + /// Internal access to entries at the top of the claim queue. + fn scheduled_entries() -> impl Iterator>)> { let claimqueue = ClaimQueue::::get(); claimqueue .into_iter() - .flat_map(|(core_idx, v)| { - v.front() - .cloned() - .flatten() - .map(|pe| CoreAssignment { core: core_idx, paras_entry: pe }) - }) - .collect() + .filter_map(|(core_idx, v)| v.front().cloned().flatten().map(|e| (core_idx, e))) } #[cfg(any(feature = "runtime-benchmarks", test))] diff --git a/polkadot/runtime/parachains/src/scheduler/common.rs b/polkadot/runtime/parachains/src/scheduler/common.rs index 0e8e8338b17b..316e8e3b760c 100644 --- a/polkadot/runtime/parachains/src/scheduler/common.rs +++ b/polkadot/runtime/parachains/src/scheduler/common.rs @@ -17,10 +17,7 @@ //! Common traits and types used by the scheduler and assignment providers. use frame_support::pallet_prelude::*; -use primitives::{ - v5::{Assignment, ParasEntry}, - CoreIndex, Id as ParaId, -}; +use primitives::{CoreIndex, Id as ParaId}; use scale_info::TypeInfo; use sp_std::prelude::*; @@ -28,21 +25,22 @@ use sp_std::prelude::*; #[allow(unused)] use crate::configuration::HostConfiguration; -/// Reasons a core might be freed -#[derive(Clone, Copy)] -pub enum FreedReason { - /// The core's work concluded and the parablock assigned to it is considered available. - Concluded, - /// The core's work timed out. - TimedOut, +/// An assignment for a parachain scheduled to be backed and included in a relay chain block. +#[derive(Clone, Encode, Decode, PartialEq, TypeInfo, RuntimeDebug)] +pub struct Assignment { + /// Assignment's ParaId + pub para_id: ParaId, +} + +impl Assignment { + /// Create a new `Assignment`. + pub fn new(para_id: ParaId) -> Self { + Self { para_id } + } } /// A set of variables required by the scheduler in order to operate. pub struct AssignmentProviderConfig { - /// The availability period specified by the implementation. - /// See [`HostConfiguration::paras_availability_period`] for more information. - pub availability_period: BlockNumber, - /// How many times a collation can time out on availability. /// Zero timeouts still means that a collation can be provided as per the slot auction /// assignment provider. @@ -72,25 +70,3 @@ pub trait AssignmentProvider { /// Returns a set of variables needed by the scheduler fn get_provider_config(core_idx: CoreIndex) -> AssignmentProviderConfig; } - -/// How a core is mapped to a backing group and a `ParaId` -#[derive(Clone, Encode, Decode, PartialEq, TypeInfo)] -#[cfg_attr(feature = "std", derive(Debug))] -pub struct CoreAssignment { - /// The core that is assigned. - pub core: CoreIndex, - /// The para id and accompanying information needed to collate and back a parablock. - pub paras_entry: ParasEntry, -} - -impl CoreAssignment { - /// Returns the [`ParaId`] of the assignment. - pub fn para_id(&self) -> ParaId { - self.paras_entry.para_id() - } - - /// Returns the inner [`ParasEntry`] of the assignment. - pub fn to_paras_entry(self) -> ParasEntry { - self.paras_entry - } -} diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index 32ac9deaf68f..accff7016ed1 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -20,7 +20,6 @@ use super::*; use frame_support::{ pallet_prelude::ValueQuery, storage_alias, traits::OnRuntimeUpgrade, weights::Weight, }; -use primitives::vstaging::Assignment; mod v0 { use super::*; diff --git a/polkadot/runtime/parachains/src/scheduler/tests.rs b/polkadot/runtime/parachains/src/scheduler/tests.rs index e203531ca49d..108f365d6b5c 100644 --- a/polkadot/runtime/parachains/src/scheduler/tests.rs +++ b/polkadot/runtime/parachains/src/scheduler/tests.rs @@ -18,7 +18,7 @@ use super::*; use frame_support::assert_ok; use keyring::Sr25519Keyring; -use primitives::{v5::Assignment, BlockNumber, SessionIndex, ValidationCode, ValidatorId}; +use primitives::{BlockNumber, SessionIndex, ValidationCode, ValidatorId}; use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use crate::{ @@ -427,33 +427,27 @@ fn fill_claimqueue_fills() { { assert_eq!(Scheduler::claimqueue_len(), 2 * lookahead); - let scheduled = Scheduler::scheduled_claimqueue(); + let scheduled: BTreeMap<_, _> = Scheduler::scheduled_entries().collect(); // Cannot assert on indices anymore as they depend on the assignment providers assert!(claimqueue_contains_para_ids::(vec![chain_a, chain_b])); assert_eq!( - scheduled[0], - CoreAssignment { - core: CoreIndex(0), - paras_entry: ParasEntry { - assignment: Assignment { para_id: chain_a }, - availability_timeouts: 0, - ttl: 6 - }, - } + scheduled.get(&CoreIndex(0)).unwrap(), + &ParasEntry { + assignment: Assignment { para_id: chain_a }, + availability_timeouts: 0, + ttl: 6 + }, ); assert_eq!( - scheduled[1], - CoreAssignment { - core: CoreIndex(1), - paras_entry: ParasEntry { - assignment: Assignment { para_id: chain_b }, - availability_timeouts: 0, - ttl: 6 - }, - } + scheduled.get(&CoreIndex(1)).unwrap(), + &ParasEntry { + assignment: Assignment { para_id: chain_b }, + availability_timeouts: 0, + ttl: 6 + }, ); } @@ -481,42 +475,33 @@ fn fill_claimqueue_fills() { { assert_eq!(Scheduler::claimqueue_len(), 5); - let scheduled = Scheduler::scheduled_claimqueue(); + let scheduled: BTreeMap<_, _> = Scheduler::scheduled_entries().collect(); assert_eq!( - scheduled[0], - CoreAssignment { - core: CoreIndex(0), - paras_entry: ParasEntry { - assignment: Assignment { para_id: chain_a }, - availability_timeouts: 0, - ttl: 6 - }, - } + scheduled.get(&CoreIndex(0)).unwrap(), + &ParasEntry { + assignment: Assignment { para_id: chain_a }, + availability_timeouts: 0, + ttl: 6 + }, ); assert_eq!( - scheduled[1], - CoreAssignment { - core: CoreIndex(1), - paras_entry: ParasEntry { - assignment: Assignment { para_id: chain_b }, - availability_timeouts: 0, - ttl: 6 - }, - } + scheduled.get(&CoreIndex(1)).unwrap(), + &ParasEntry { + assignment: Assignment { para_id: chain_b }, + availability_timeouts: 0, + ttl: 6 + }, ); // Was added a block later, note the TTL. assert_eq!( - scheduled[2], - CoreAssignment { - core: CoreIndex(2), - paras_entry: ParasEntry { - assignment: Assignment { para_id: thread_a }, - availability_timeouts: 0, - ttl: 7 - }, - } + scheduled.get(&CoreIndex(2)).unwrap(), + &ParasEntry { + assignment: Assignment { para_id: thread_a }, + availability_timeouts: 0, + ttl: 7 + }, ); // Sits on the same core as `thread_a` assert_eq!( @@ -528,15 +513,12 @@ fn fill_claimqueue_fills() { }) ); assert_eq!( - scheduled[3], - CoreAssignment { - core: CoreIndex(3), - paras_entry: ParasEntry { - assignment: Assignment { para_id: thread_c }, - availability_timeouts: 0, - ttl: 7 - }, - } + scheduled.get(&CoreIndex(3)).unwrap(), + &ParasEntry { + assignment: Assignment { para_id: thread_c }, + availability_timeouts: 0, + ttl: 7 + }, ); } }); @@ -608,7 +590,7 @@ fn schedule_schedules_including_just_freed() { let mut now = 2; run_to_block(now, |_| None); - assert_eq!(Scheduler::scheduled_claimqueue().len(), 4); + assert_eq!(Scheduler::scheduled_paras().collect::>().len(), 4); // cores 0, 1, 2, and 3 should be occupied. mark them as such. let mut occupied_map: BTreeMap = BTreeMap::new(); @@ -630,7 +612,7 @@ fn schedule_schedules_including_just_freed() { // core 4 is free assert!(cores[4] == CoreOccupied::Free); - assert!(Scheduler::scheduled_claimqueue().is_empty()); + assert!(Scheduler::scheduled_paras().collect::>().is_empty()); // All core index entries in the claimqueue should have `None` in them. Scheduler::claimqueue().iter().for_each(|(_core_idx, core_queue)| { @@ -657,21 +639,18 @@ fn schedule_schedules_including_just_freed() { run_to_block(now, |_| None); { - let scheduled = Scheduler::scheduled_claimqueue(); + let scheduled: BTreeMap<_, _> = Scheduler::scheduled_entries().collect(); // cores 0 and 1 are occupied by lease holding parachains. cores 2 and 3 are occupied by // on-demand parachain claims. core 4 was free. assert_eq!(scheduled.len(), 1); assert_eq!( - scheduled[0], - CoreAssignment { - core: CoreIndex(4), - paras_entry: ParasEntry { - assignment: Assignment { para_id: thread_b }, - availability_timeouts: 0, - ttl: 8 - }, - } + scheduled.get(&CoreIndex(4)).unwrap(), + &ParasEntry { + assignment: Assignment { para_id: thread_b }, + availability_timeouts: 0, + ttl: 8 + }, ); } @@ -686,54 +665,42 @@ fn schedule_schedules_including_just_freed() { Scheduler::update_claimqueue(just_updated, now); { - let scheduled = Scheduler::scheduled_claimqueue(); + let scheduled: BTreeMap<_, _> = Scheduler::scheduled_entries().collect(); // 1 thing scheduled before, + 3 cores freed. assert_eq!(scheduled.len(), 4); assert_eq!( - scheduled[0], - CoreAssignment { - core: CoreIndex(0), - paras_entry: ParasEntry { - assignment: Assignment { para_id: chain_a }, - availability_timeouts: 0, - ttl: 8 - }, - } + scheduled.get(&CoreIndex(0)).unwrap(), + &ParasEntry { + assignment: Assignment { para_id: chain_a }, + availability_timeouts: 0, + ttl: 8 + }, ); assert_eq!( - scheduled[1], - CoreAssignment { - core: CoreIndex(2), - paras_entry: ParasEntry { - assignment: Assignment { para_id: thread_d }, - availability_timeouts: 0, - ttl: 8 - }, - } + scheduled.get(&CoreIndex(2)).unwrap(), + &ParasEntry { + assignment: Assignment { para_id: thread_d }, + availability_timeouts: 0, + ttl: 8 + }, ); // Although C was descheduled, the core `4` was occupied so C goes back to the queue. assert_eq!( - scheduled[2], - CoreAssignment { - core: CoreIndex(3), - paras_entry: ParasEntry { - assignment: Assignment { para_id: thread_c }, - availability_timeouts: 1, - ttl: 8 - }, - } + scheduled.get(&CoreIndex(3)).unwrap(), + &ParasEntry { + assignment: Assignment { para_id: thread_c }, + availability_timeouts: 1, + ttl: 8 + }, ); assert_eq!( - scheduled[3], - CoreAssignment { - core: CoreIndex(4), - paras_entry: ParasEntry { - assignment: Assignment { para_id: thread_b }, - availability_timeouts: 0, - ttl: 8 - }, - } + scheduled.get(&CoreIndex(4)).unwrap(), + &ParasEntry { + assignment: Assignment { para_id: thread_b }, + availability_timeouts: 0, + ttl: 8 + }, ); // The only assignment yet to be popped on to the claim queue is `thread_e`. @@ -900,14 +867,14 @@ fn schedule_rotates_groups() { run_to_block(now, |_| None); let assert_groups_rotated = |rotations: u32, now: &BlockNumberFor| { - let scheduled = Scheduler::scheduled_claimqueue(); + let scheduled: BTreeMap<_, _> = Scheduler::scheduled_paras().collect(); assert_eq!(scheduled.len(), 2); assert_eq!( - Scheduler::group_assigned_to_core(scheduled[0].core, *now).unwrap(), + Scheduler::group_assigned_to_core(CoreIndex(0), *now).unwrap(), GroupIndex((0u32 + rotations) % on_demand_cores) ); assert_eq!( - Scheduler::group_assigned_to_core(scheduled[1].core, *now).unwrap(), + Scheduler::group_assigned_to_core(CoreIndex(1), *now).unwrap(), GroupIndex((1u32 + rotations) % on_demand_cores) ); }; @@ -999,7 +966,7 @@ fn on_demand_claims_are_pruned_after_timing_out() { ] .into_iter() .collect(); - let core_assignments = Scheduler::update_claimqueue(just_updated, now); + Scheduler::update_claimqueue(just_updated, now); // ParaId a exists in the claim queue until max_retries is reached. if n < max_retries + now { @@ -1008,13 +975,9 @@ fn on_demand_claims_are_pruned_after_timing_out() { assert!(!claimqueue_contains_para_ids::(vec![thread_a])); } - // Occupy the cores based on the output of update_claimqueue. - Scheduler::occupied( - core_assignments - .iter() - .map(|core_assignment| (core_assignment.core, core_assignment.para_id())) - .collect(), - ); + let core_assignments = Scheduler::scheduled_paras().collect(); + // Occupy the cores based on the result of update_claimqueue. + Scheduler::occupied(core_assignments); } // ParaId a does not exist in the claimqueue/availability_cores after @@ -1054,7 +1017,7 @@ fn on_demand_claims_are_pruned_after_timing_out() { } } - let core_assignments = Scheduler::update_claimqueue(just_updated, now); + Scheduler::update_claimqueue(just_updated, now); // ParaId a exists in the claim queue until groups are rotated. if n < 31 { @@ -1063,13 +1026,9 @@ fn on_demand_claims_are_pruned_after_timing_out() { assert!(!claimqueue_contains_para_ids::(vec![thread_a])); } - // Occupy the cores based on the output of update_claimqueue. - Scheduler::occupied( - core_assignments - .iter() - .map(|core_assignment| (core_assignment.core, core_assignment.para_id())) - .collect(), - ); + let core_assignments = Scheduler::scheduled_paras().collect(); + // Occupy the cores based on the result of update_claimqueue. + Scheduler::occupied(core_assignments); } // ParaId a does not exist in the claimqueue/availability_cores after @@ -1124,33 +1083,25 @@ fn availability_predicate_works() { run_to_block(1 + paras_availability_period, |_| None); - assert!(Scheduler::availability_timeout_predicate().is_none()); + assert!(!Scheduler::availability_timeout_check_required()); run_to_block(1 + group_rotation_frequency, |_| None); { - let pred = Scheduler::availability_timeout_predicate() - .expect("predicate exists recently after rotation"); - let now = System::block_number(); - let would_be_timed_out = now - paras_availability_period; - for i in 0..AvailabilityCores::::get().len() { - // returns true for unoccupied cores. - // And can time out paras at this stage. - assert!(pred(CoreIndex(i as u32), would_be_timed_out)); - } + assert!(Scheduler::availability_timeout_check_required()); + let pred = Scheduler::availability_timeout_predicate(); + let last_rotation = Scheduler::group_rotation_info(now).last_rotation_at(); - assert!(!pred(CoreIndex(0), now)); - assert!(!pred(CoreIndex(1), now)); - assert!(pred(CoreIndex(2), now)); + let would_be_timed_out = now - paras_availability_period; + let should_not_be_timed_out = last_rotation; - // check the tight bound. - assert!(pred(CoreIndex(0), now - paras_availability_period)); - assert!(pred(CoreIndex(1), now - paras_availability_period)); + assert!(pred(would_be_timed_out).timed_out); + assert!(!pred(should_not_be_timed_out).timed_out); + assert!(!pred(now).timed_out); // check the threshold is exact. - assert!(!pred(CoreIndex(0), now - paras_availability_period + 1)); - assert!(!pred(CoreIndex(1), now - paras_availability_period + 1)); + assert!(!pred(would_be_timed_out + 1).timed_out); } run_to_block(1 + group_rotation_frequency + paras_availability_period, |_| None); From 0090152386c32740b223e7c1aea76cb3df013332 Mon Sep 17 00:00:00 2001 From: Chevdor Date: Wed, 6 Sep 2023 19:34:41 +0200 Subject: [PATCH 26/28] Prevent a fail prdoc check to block (#1433) --- .github/workflows/check-prdoc.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index 219952fdbfba..323be1d4e06e 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -48,4 +48,4 @@ jobs: if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} run: | echo "Checking for PR#${GITHUB_PR} in $MOUNT" - $ENGINE run --rm -v $PWD/prdoc:/doc $IMAGE check -n ${GITHUB_PR} + $ENGINE run --rm -v $PWD/prdoc:/doc $IMAGE check -n ${GITHUB_PR} || true From a47943983f18ce00ceab3052bf2f9a2c62f80327 Mon Sep 17 00:00:00 2001 From: ordian Date: Wed, 6 Sep 2023 20:39:42 +0200 Subject: [PATCH 27/28] zombienet: use another collator image for the slashing test (#1386) * zombienet: use test-parachain image for the slashing test * use the right image * try polkadot-parachain image * try naming collator alice :see_no_evil: * add needed job for the pipeline * fix user id in polkadot-parachain-debug image * small tweaks to the test * another small tweak * yet another small tweak * bump zombienet version --------- Co-authored-by: Javier Viola --- .gitlab-ci.yml | 2 +- .gitlab/pipeline/zombienet/polkadot.yml | 4 ++++ .../polkadot-parachain-debug_unsigned_injected.Dockerfile | 2 +- .../functional/0005-parachains-disputes-past-session.toml | 7 +++---- .../functional/0005-parachains-disputes-past-session.zndsl | 6 +++--- 5 files changed, 12 insertions(+), 9 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2e0465ba1eb1..748db808de69 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,7 +30,7 @@ variables: RUSTY_CACHIER_COMPRESSION_METHOD: zstd NEXTEST_FAILURE_OUTPUT: immediate-final NEXTEST_SUCCESS_OUTPUT: final - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.65" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.67" DOCKER_IMAGES_VERSION: "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" default: diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 87b821742c67..349807a610d4 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -9,6 +9,7 @@ - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${BUILD_RELEASE_VERSION}" - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} + - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:${DOCKER_IMAGES_VERSION}" - export MALUS_IMAGE="${MALUS_IMAGE}":${PIPELINE_IMAGE_TAG} - echo "Zombienet Tests Config" - echo "gh-dir ${GH_DIR}" @@ -16,6 +17,7 @@ - echo "polkadot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - echo "polkadot secondary image ${ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}" - echo "colander image ${COL_IMAGE}" + - echo "cumulus image ${CUMULUS_IMAGE}" - echo "malus image ${MALUS_IMAGE}" stage: zombienet image: "${ZOMBIENET_IMAGE}" @@ -28,6 +30,8 @@ artifacts: true - job: build-push-image-colander artifacts: true + - job: build-push-image-polkadot-parachain-debug + artifacts: true extends: - .kubernetes-env - .zombienet-refs diff --git a/docker/dockerfiles/polkadot-parachain/polkadot-parachain-debug_unsigned_injected.Dockerfile b/docker/dockerfiles/polkadot-parachain/polkadot-parachain-debug_unsigned_injected.Dockerfile index 7a2202d9c52b..75cc2b9e629d 100644 --- a/docker/dockerfiles/polkadot-parachain/polkadot-parachain-debug_unsigned_injected.Dockerfile +++ b/docker/dockerfiles/polkadot-parachain/polkadot-parachain-debug_unsigned_injected.Dockerfile @@ -28,7 +28,7 @@ RUN apt-get update && \ apt-get clean && \ find /var/lib/apt/lists/ -type f -not -name lock -delete; \ # add user and link ~/.local/share/polkadot-parachain to /data - useradd -m -u 10000 -U -s /bin/sh -d /polkadot-parachain polkadot-parachain && \ + useradd -m -u 1000 -U -s /bin/sh -d /polkadot-parachain polkadot-parachain && \ mkdir -p /data /polkadot-parachain/.local/share && \ chown -R polkadot-parachain:polkadot-parachain /data && \ ln -s /data /polkadot-parachain/.local/share/polkadot-parachain && \ diff --git a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml index 50c465950f72..25d922bb6827 100644 --- a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml +++ b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml @@ -5,7 +5,7 @@ bootnode = true [relaychain.genesis.runtime.configuration.config] max_validators_per_core = 1 needed_approvals = 2 - group_rotation_frequency = 3 + group_rotation_frequency = 2 [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" @@ -39,8 +39,7 @@ id = 1000 cumulus_based = true [parachains.collator] - name = "collator" + name = "alice" command = "polkadot-parachain" - image = "docker.io/parity/polkadot-parachain:latest" - # image = "{{COL_IMAGE}}" + image = "{{CUMULUS_IMAGE}}" args = ["-lparachain=debug"] diff --git a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.zndsl b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.zndsl index bc3674f4f53d..a3f1f0669ac9 100644 --- a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.zndsl +++ b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.zndsl @@ -21,18 +21,18 @@ malus-validator: resume malus-validator: log line matches "Suggesting malicious candidate" within 200 seconds # Pause first flaky node -# Availability and finality will continue with 3/4 nodes online (incl. malus) +# Availability will continue with 3/4 nodes online (incl. malus) honest-flaky-validator-0: pause # Wait for the dispute -honest-flaky-validator-1: reports parachain_candidate_disputes_total is at least 1 within 40 seconds +honest-flaky-validator-1: reports parachain_candidate_disputes_total is at least 1 within 60 seconds # Pause second flaky node so that we do not revert blocks due to f+1 invalid votes # Availability and finality will stop honest-flaky-validator-1: pause # Wait for 1 full session to pass after the last unconcluded dispute. -sleep 120 seconds +sleep 110 seconds # Now resume flaky validators honest-flaky-validator: resume From 91deee7a1dba52e5e73d1a97d9fd5b8ad1e916a4 Mon Sep 17 00:00:00 2001 From: gupnik <17176722+gupnik@users.noreply.github.com> Date: Thu, 7 Sep 2023 08:05:31 +0530 Subject: [PATCH 28/28] Adds base benchmark for do_tick in broker pallet (#1235) * Adds base benchmark for do_tick * ".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime=dev --target_dir=substrate --pallet=pallet_broker * Update substrate/frame/broker/src/benchmarking.rs Co-authored-by: Oliver Tale-Yazdi * Update substrate/frame/broker/src/benchmarking.rs * ".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime=dev --target_dir=substrate --pallet=pallet_broker * Addresses review comment --------- Co-authored-by: command-bot <> Co-authored-by: Oliver Tale-Yazdi --- substrate/frame/broker/src/benchmarking.rs | 21 ++ substrate/frame/broker/src/tick_impls.rs | 7 +- substrate/frame/broker/src/weights.rs | 367 ++++++++++++--------- 3 files changed, 239 insertions(+), 156 deletions(-) diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index 663bf2f466cf..d22f3936c3e2 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -852,6 +852,27 @@ mod benches { } } + #[benchmark] + fn do_tick_base() -> Result<(), BenchmarkError> { + setup_and_start_sale::()?; + + advance_to::(5); + + let mut status = Status::::get().unwrap(); + status.last_committed_timeslice = 3; + Status::::put(&status); + + #[block] + { + Broker::::do_tick(); + } + + let updated_status = Status::::get().unwrap(); + assert_eq!(status, updated_status); + + Ok(()) + } + // Implements a test for each benchmark. Execute with: // `cargo test -p pallet-broker --features runtime-benchmarks`. impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/frame/broker/src/tick_impls.rs b/substrate/frame/broker/src/tick_impls.rs index a1a50a61908d..7df8bd39d42f 100644 --- a/substrate/frame/broker/src/tick_impls.rs +++ b/substrate/frame/broker/src/tick_impls.rs @@ -36,13 +36,14 @@ impl Pallet { /// - Request revenue information for a previous timeslice /// - Initialize an instantaneous core pool historical revenue record pub(crate) fn do_tick() -> Weight { + let mut meter = WeightMeter::new(); + meter.consume(T::WeightInfo::do_tick_base()); + let (mut status, config) = match (Status::::get(), Configuration::::get()) { (Some(s), Some(c)) => (s, c), - _ => return Weight::zero(), + _ => return meter.consumed(), }; - let mut meter = WeightMeter::new(); - if Self::process_core_count(&mut status) { meter.consume(T::WeightInfo::process_core_count(status.core_count.into())); } diff --git a/substrate/frame/broker/src/weights.rs b/substrate/frame/broker/src/weights.rs index 93b568bf2a03..b3a151c6062c 100644 --- a/substrate/frame/broker/src/weights.rs +++ b/substrate/frame/broker/src/weights.rs @@ -18,10 +18,10 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-09-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` +//! HOSTNAME: `runner-pzhd7p6z-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: // target/production/substrate-node @@ -32,12 +32,12 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_broker // --chain=dev -// --header=./HEADER-APACHE2 -// --output=./frame/broker/src/weights.rs -// --template=./.maintain/frame-weight-template.hbs +// --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/broker/src/weights.rs +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -74,6 +74,7 @@ pub trait WeightInfo { fn process_pool() -> Weight; fn process_core_schedule() -> Weight; fn request_revenue_info_at() -> Weight; + fn do_tick_base() -> Weight; } /// Weights for `pallet_broker` using the Substrate node and recommended hardware. @@ -85,8 +86,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_448_000 picoseconds. - Weight::from_parts(3_729_000, 0) + // Minimum execution time: 3_040_000 picoseconds. + Weight::from_parts(3_344_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -95,8 +96,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 22_537_000 picoseconds. - Weight::from_parts(23_335_000, 7496) + // Minimum execution time: 21_259_000 picoseconds. + Weight::from_parts(22_110_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -106,8 +107,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 21_668_000 picoseconds. - Weight::from_parts(22_442_000, 7496) + // Minimum execution time: 20_330_000 picoseconds. + Weight::from_parts(20_826_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -117,8 +118,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 13_606_000 picoseconds. - Weight::from_parts(14_104_000, 1526) + // Minimum execution time: 13_411_000 picoseconds. + Weight::from_parts(13_960_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -137,12 +138,14 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(_n: u32, ) -> Weight { + fn start_sales(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 64_012_000 picoseconds. - Weight::from_parts(67_819_922, 8499) + // Minimum execution time: 57_770_000 picoseconds. + Weight::from_parts(61_047_512, 8499) + // Standard Error: 165 + .saturating_add(Weight::from_parts(3, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(16_u64)) } @@ -160,8 +163,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `568` // Estimated: `2053` - // Minimum execution time: 48_110_000 picoseconds. - Weight::from_parts(49_234_000, 2053) + // Minimum execution time: 51_196_000 picoseconds. + Weight::from_parts(52_382_000, 2053) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -183,8 +186,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `686` // Estimated: `4698` - // Minimum execution time: 69_580_000 picoseconds. - Weight::from_parts(70_914_000, 4698) + // Minimum execution time: 71_636_000 picoseconds. + Weight::from_parts(73_679_000, 4698) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -194,8 +197,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 17_687_000 picoseconds. - Weight::from_parts(18_573_000, 3550) + // Minimum execution time: 19_182_000 picoseconds. + Weight::from_parts(19_775_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -205,8 +208,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 19_675_000 picoseconds. - Weight::from_parts(20_234_000, 3550) + // Minimum execution time: 20_688_000 picoseconds. + Weight::from_parts(21_557_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -216,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 19_426_000 picoseconds. - Weight::from_parts(20_414_000, 3550) + // Minimum execution time: 21_190_000 picoseconds. + Weight::from_parts(22_215_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -233,8 +236,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `740` // Estimated: `4681` - // Minimum execution time: 31_751_000 picoseconds. - Weight::from_parts(32_966_000, 4681) + // Minimum execution time: 34_591_000 picoseconds. + Weight::from_parts(36_227_000, 4681) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -252,8 +255,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `775` // Estimated: `5996` - // Minimum execution time: 36_709_000 picoseconds. - Weight::from_parts(38_930_000, 5996) + // Minimum execution time: 40_346_000 picoseconds. + Weight::from_parts(41_951_000, 5996) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -261,20 +264,20 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::InstaPoolContribution` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:3 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:0) + /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `m` is `[1, 3]`. fn claim_revenue(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `720` + // Measured: `859` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 55_510_000 picoseconds. - Weight::from_parts(56_665_061, 6196) - // Standard Error: 61_729 - .saturating_add(Weight::from_parts(1_724_824, 0).saturating_mul(m.into())) + // Minimum execution time: 75_734_000 picoseconds. + Weight::from_parts(78_168_395, 6196) + // Standard Error: 63_180 + .saturating_add(Weight::from_parts(1_076_259, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(m.into())) } /// Storage: `System::Account` (r:1 w:1) @@ -283,8 +286,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 44_992_000 picoseconds. - Weight::from_parts(46_225_000, 3593) + // Minimum execution time: 46_383_000 picoseconds. + Weight::from_parts(47_405_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -296,8 +299,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `603` // Estimated: `3550` - // Minimum execution time: 28_207_000 picoseconds. - Weight::from_parts(28_707_000, 3550) + // Minimum execution time: 30_994_000 picoseconds. + Weight::from_parts(31_979_000, 3550) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -311,8 +314,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 31_813_000 picoseconds. - Weight::from_parts(32_612_000, 3533) + // Minimum execution time: 37_584_000 picoseconds. + Weight::from_parts(44_010_000, 3533) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -326,10 +329,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `829` + // Measured: `830` // Estimated: `3593` - // Minimum execution time: 38_571_000 picoseconds. - Weight::from_parts(39_493_000, 3593) + // Minimum execution time: 45_266_000 picoseconds. + Weight::from_parts(48_000_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -341,42 +344,53 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `525` // Estimated: `4698` - // Minimum execution time: 24_714_000 picoseconds. - Weight::from_parts(25_288_000, 4698) + // Minimum execution time: 25_365_000 picoseconds. + Weight::from_parts(26_920_000, 4698) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:0 w:1) /// The range of component `n` is `[0, 1000]`. - fn request_core_count(_n: u32, ) -> Weight { + fn request_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_258_000 picoseconds. - Weight::from_parts(7_925_570, 0) - .saturating_add(T::DbWeight::get().writes(1_u64)) + // Minimum execution time: 6_519_000 picoseconds. + Weight::from_parts(7_098_698, 0) + // Standard Error: 20 + .saturating_add(Weight::from_parts(8, 0).saturating_mul(n.into())) } - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:0) + /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) /// The range of component `n` is `[0, 1000]`. - fn process_core_count(_n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `97` - // Estimated: `3562` - // Minimum execution time: 7_136_000 picoseconds. - Weight::from_parts(7_788_194, 3562) + fn process_core_count(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `98` + // Estimated: `3563` + // Minimum execution time: 7_608_000 picoseconds. + Weight::from_parts(8_157_815, 3563) + // Standard Error: 26 + .saturating_add(Weight::from_parts(48, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Broker::InstaPoolHistory` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_049_000 picoseconds. - Weight::from_parts(6_311_000, 0) - .saturating_add(T::DbWeight::get().writes(1_u64)) + // Measured: `905` + // Estimated: `4370` + // Minimum execution time: 59_993_000 picoseconds. + Weight::from_parts(61_752_000, 4370) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -393,10 +407,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 47_504_000 picoseconds. - Weight::from_parts(49_778_098, 8499) - // Standard Error: 109 - .saturating_add(Weight::from_parts(427, 0).saturating_mul(n.into())) + // Minimum execution time: 41_863_000 picoseconds. + Weight::from_parts(44_033_031, 8499) + // Standard Error: 116 + .saturating_add(Weight::from_parts(764, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)) } @@ -408,8 +422,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 9_573_000 picoseconds. - Weight::from_parts(10_034_000, 3493) + // Minimum execution time: 9_588_000 picoseconds. + Weight::from_parts(9_925_000, 3493) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -421,8 +435,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 21_331_000 picoseconds. - Weight::from_parts(22_235_000, 4681) + // Minimum execution time: 19_308_000 picoseconds. + Weight::from_parts(20_482_000, 4681) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -430,8 +444,25 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 191_000 picoseconds. - Weight::from_parts(234_000, 0) + // Minimum execution time: 147_000 picoseconds. + Weight::from_parts(184_000, 0) + } + /// Storage: `Broker::Status` (r:1 w:1) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + fn do_tick_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `699` + // Estimated: `4164` + // Minimum execution time: 19_824_000 picoseconds. + Weight::from_parts(20_983_000, 4164) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } } @@ -443,8 +474,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_448_000 picoseconds. - Weight::from_parts(3_729_000, 0) + // Minimum execution time: 3_040_000 picoseconds. + Weight::from_parts(3_344_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -453,8 +484,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 22_537_000 picoseconds. - Weight::from_parts(23_335_000, 7496) + // Minimum execution time: 21_259_000 picoseconds. + Weight::from_parts(22_110_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -464,8 +495,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 21_668_000 picoseconds. - Weight::from_parts(22_442_000, 7496) + // Minimum execution time: 20_330_000 picoseconds. + Weight::from_parts(20_826_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -475,8 +506,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 13_606_000 picoseconds. - Weight::from_parts(14_104_000, 1526) + // Minimum execution time: 13_411_000 picoseconds. + Weight::from_parts(13_960_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -495,12 +526,14 @@ impl WeightInfo for () { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(_n: u32, ) -> Weight { + fn start_sales(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 64_012_000 picoseconds. - Weight::from_parts(67_819_922, 8499) + // Minimum execution time: 57_770_000 picoseconds. + Weight::from_parts(61_047_512, 8499) + // Standard Error: 165 + .saturating_add(Weight::from_parts(3, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(16_u64)) } @@ -518,8 +551,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `568` // Estimated: `2053` - // Minimum execution time: 48_110_000 picoseconds. - Weight::from_parts(49_234_000, 2053) + // Minimum execution time: 51_196_000 picoseconds. + Weight::from_parts(52_382_000, 2053) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -541,8 +574,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `686` // Estimated: `4698` - // Minimum execution time: 69_580_000 picoseconds. - Weight::from_parts(70_914_000, 4698) + // Minimum execution time: 71_636_000 picoseconds. + Weight::from_parts(73_679_000, 4698) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -552,8 +585,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 17_687_000 picoseconds. - Weight::from_parts(18_573_000, 3550) + // Minimum execution time: 19_182_000 picoseconds. + Weight::from_parts(19_775_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -563,8 +596,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 19_675_000 picoseconds. - Weight::from_parts(20_234_000, 3550) + // Minimum execution time: 20_688_000 picoseconds. + Weight::from_parts(21_557_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -574,8 +607,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 19_426_000 picoseconds. - Weight::from_parts(20_414_000, 3550) + // Minimum execution time: 21_190_000 picoseconds. + Weight::from_parts(22_215_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -591,8 +624,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `740` // Estimated: `4681` - // Minimum execution time: 31_751_000 picoseconds. - Weight::from_parts(32_966_000, 4681) + // Minimum execution time: 34_591_000 picoseconds. + Weight::from_parts(36_227_000, 4681) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -610,8 +643,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `775` // Estimated: `5996` - // Minimum execution time: 36_709_000 picoseconds. - Weight::from_parts(38_930_000, 5996) + // Minimum execution time: 40_346_000 picoseconds. + Weight::from_parts(41_951_000, 5996) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -619,20 +652,20 @@ impl WeightInfo for () { /// Proof: `Broker::InstaPoolContribution` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:3 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:0) + /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `m` is `[1, 3]`. fn claim_revenue(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `720` + // Measured: `859` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 55_510_000 picoseconds. - Weight::from_parts(56_665_061, 6196) - // Standard Error: 61_729 - .saturating_add(Weight::from_parts(1_724_824, 0).saturating_mul(m.into())) + // Minimum execution time: 75_734_000 picoseconds. + Weight::from_parts(78_168_395, 6196) + // Standard Error: 63_180 + .saturating_add(Weight::from_parts(1_076_259, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(m.into())) } /// Storage: `System::Account` (r:1 w:1) @@ -641,8 +674,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 44_992_000 picoseconds. - Weight::from_parts(46_225_000, 3593) + // Minimum execution time: 46_383_000 picoseconds. + Weight::from_parts(47_405_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -654,8 +687,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `603` // Estimated: `3550` - // Minimum execution time: 28_207_000 picoseconds. - Weight::from_parts(28_707_000, 3550) + // Minimum execution time: 30_994_000 picoseconds. + Weight::from_parts(31_979_000, 3550) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -669,8 +702,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 31_813_000 picoseconds. - Weight::from_parts(32_612_000, 3533) + // Minimum execution time: 37_584_000 picoseconds. + Weight::from_parts(44_010_000, 3533) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -684,10 +717,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `829` + // Measured: `830` // Estimated: `3593` - // Minimum execution time: 38_571_000 picoseconds. - Weight::from_parts(39_493_000, 3593) + // Minimum execution time: 45_266_000 picoseconds. + Weight::from_parts(48_000_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -699,42 +732,53 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `525` // Estimated: `4698` - // Minimum execution time: 24_714_000 picoseconds. - Weight::from_parts(25_288_000, 4698) + // Minimum execution time: 25_365_000 picoseconds. + Weight::from_parts(26_920_000, 4698) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:0 w:1) /// The range of component `n` is `[0, 1000]`. - fn request_core_count(_n: u32, ) -> Weight { + fn request_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_258_000 picoseconds. - Weight::from_parts(7_925_570, 0) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + // Minimum execution time: 6_519_000 picoseconds. + Weight::from_parts(7_098_698, 0) + // Standard Error: 20 + .saturating_add(Weight::from_parts(8, 0).saturating_mul(n.into())) } - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:0) + /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) /// The range of component `n` is `[0, 1000]`. - fn process_core_count(_n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `97` - // Estimated: `3562` - // Minimum execution time: 7_136_000 picoseconds. - Weight::from_parts(7_788_194, 3562) + fn process_core_count(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `98` + // Estimated: `3563` + // Minimum execution time: 7_608_000 picoseconds. + Weight::from_parts(8_157_815, 3563) + // Standard Error: 26 + .saturating_add(Weight::from_parts(48, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Broker::InstaPoolHistory` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_049_000 picoseconds. - Weight::from_parts(6_311_000, 0) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + // Measured: `905` + // Estimated: `4370` + // Minimum execution time: 59_993_000 picoseconds. + Weight::from_parts(61_752_000, 4370) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -751,10 +795,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 47_504_000 picoseconds. - Weight::from_parts(49_778_098, 8499) - // Standard Error: 109 - .saturating_add(Weight::from_parts(427, 0).saturating_mul(n.into())) + // Minimum execution time: 41_863_000 picoseconds. + Weight::from_parts(44_033_031, 8499) + // Standard Error: 116 + .saturating_add(Weight::from_parts(764, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(15_u64)) } @@ -766,8 +810,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 9_573_000 picoseconds. - Weight::from_parts(10_034_000, 3493) + // Minimum execution time: 9_588_000 picoseconds. + Weight::from_parts(9_925_000, 3493) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -779,8 +823,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 21_331_000 picoseconds. - Weight::from_parts(22_235_000, 4681) + // Minimum execution time: 19_308_000 picoseconds. + Weight::from_parts(20_482_000, 4681) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -788,7 +832,24 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 191_000 picoseconds. - Weight::from_parts(234_000, 0) + // Minimum execution time: 147_000 picoseconds. + Weight::from_parts(184_000, 0) + } + /// Storage: `Broker::Status` (r:1 w:1) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + fn do_tick_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `699` + // Estimated: `4164` + // Minimum execution time: 19_824_000 picoseconds. + Weight::from_parts(20_983_000, 4164) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } }