From ed713ad1ffb9b1da2e3dc13c61c69e176fccb34e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 27 Jan 2022 02:12:32 -0600 Subject: [PATCH 01/42] initial stab at candidate_context --- node/subsystem-util/src/candidate_context.rs | 133 +++++++++++++++++++ node/subsystem-util/src/lib.rs | 2 + node/subsystem-util/src/runtime/mod.rs | 2 + 3 files changed, 137 insertions(+) create mode 100644 node/subsystem-util/src/candidate_context.rs diff --git a/node/subsystem-util/src/candidate_context.rs b/node/subsystem-util/src/candidate_context.rs new file mode 100644 index 000000000000..40d195219b46 --- /dev/null +++ b/node/subsystem-util/src/candidate_context.rs @@ -0,0 +1,133 @@ +// Copyright 2017-2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// TODO [now]: document everything and make members public. +use std::collections::HashMap; +use polkadot_primitives::v1::Id as ParaId; + +#[derive(Debug, Clone, PartialEq)] +pub struct InboundHrmpChannelContext { + messages_remaining: usize, +} + +#[derive(Debug, Clone, PartialEq)] +pub struct OutboundHrmpChannelContext { + bytes_remaining: usize, + messages_remaining: usize, +} + +#[derive(Debug, Clone, PartialEq)] +pub struct InboundHrmpChannelUpdate { + messages_consumed: usize, +} + +#[derive(Debug, Clone, PartialEq)] +pub struct OutboundHrmpChannelUpdate { + bytes_submitted: usize, + messages_submitted: usize, +} + +#[derive(Debug, Clone, PartialEq)] +pub struct ContextLimitations { + ump_remaining: usize, + ump_rmaining_bytes: usize, + dmp_remaining_messages: usize, + hrmp_channels_in: HashMap, + hrmp_channels_out: HashMap, + // TODO [now]: some session-wide config members like maximums? + // Other expected criteria like the DMP advancement rule? + // TODO [now]: validation code hash & allowed code upgrade. +} + +// TODO [now] +pub struct Error; + +#[derive(Debug, Clone, PartialEq)] +pub struct Context { + base: ContextLimitations, + updates: Vec, + + // base + all updates. + cumulative: ContextLimitations, +} + +impl Context { + pub fn from_base(base: ContextLimitations) -> Self { + Context { + base: base.clone(), + updates: Vec::new(), + cumulative: base, + } + } + + // TODO [now]: add error type + pub fn from_base_and_updates( + base: ContextLimitations, + updates: impl IntoIterator, + ) -> Result { + let mut context = Self::from_base(base); + for update in updates { + context.push(update)?; + } + Ok(context) + } + + pub fn push(&mut self, update: ContextUpdate) -> Result<(), Error> { + unimplemented!() + } + + pub fn limitations(&self) -> &ContextLimitations { + &self.cumulative + } + + pub fn updates(&self) -> &[ContextUpdate] { + &self.updates[..] + } + + pub fn rebase(&self, new_base: ContextLimitations) -> Result { + unimplemented!() + + // TODO [now]. We will want a mode where this just gets as far as it can. + // That could be done in the error type, quite reasonably. + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct ContextUpdate { + ump_messages_submitted: usize, + ump_bytes_submitted: usize, + dmp_messages_consumed: usize, + hrmp_in: HashMap, + hrmp_out: HashMap, +} + +impl ContextUpdate { + pub fn blank() -> Self { + ContextUpdate { + ump_messages_submitted: 0, + ump_bytes_submitted: 0, + dmp_messages_consumed: 0, + hrmp_in: HashMap::new(), + hrmp_out: HashMap::new(), + } + } +} + +// TODO [now] : function to compare limitations against updates. + +#[cfg(test)] +mod tests { + use super::*; + + // TODO [now]: Pushing, rebasing +} diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index bf120c945f02..c044ac8f1245 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -86,6 +86,8 @@ pub mod reexports { pub mod rolling_session_window; /// Convenient and efficient runtime info access. pub mod runtime; +/// Contexts for interpreting (probable) validity of prospective chains. +pub mod candidate_context; mod determine_new_blocks; diff --git a/node/subsystem-util/src/runtime/mod.rs b/node/subsystem-util/src/runtime/mod.rs index d7afac0b58c2..7031d234705f 100644 --- a/node/subsystem-util/src/runtime/mod.rs +++ b/node/subsystem-util/src/runtime/mod.rs @@ -329,3 +329,5 @@ where recv_runtime(request_validation_code_by_hash(relay_parent, validation_code_hash, sender).await) .await } + +// TODO [now] : a way of getting all [`ContextLimitations`] from runtime. From 300cc3afb4473f8cb25a54fb1bdc5ea962e1ef73 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 27 Jan 2022 02:12:40 -0600 Subject: [PATCH 02/42] fmt --- node/subsystem-util/src/candidate_context.rs | 10 +++------- node/subsystem-util/src/lib.rs | 4 ++-- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/node/subsystem-util/src/candidate_context.rs b/node/subsystem-util/src/candidate_context.rs index 40d195219b46..8f32c8d440d0 100644 --- a/node/subsystem-util/src/candidate_context.rs +++ b/node/subsystem-util/src/candidate_context.rs @@ -12,8 +12,8 @@ // GNU General Public License for more details. // TODO [now]: document everything and make members public. -use std::collections::HashMap; use polkadot_primitives::v1::Id as ParaId; +use std::collections::HashMap; #[derive(Debug, Clone, PartialEq)] pub struct InboundHrmpChannelContext { @@ -63,17 +63,13 @@ pub struct Context { impl Context { pub fn from_base(base: ContextLimitations) -> Self { - Context { - base: base.clone(), - updates: Vec::new(), - cumulative: base, - } + Context { base: base.clone(), updates: Vec::new(), cumulative: base } } // TODO [now]: add error type pub fn from_base_and_updates( base: ContextLimitations, - updates: impl IntoIterator, + updates: impl IntoIterator, ) -> Result { let mut context = Self::from_base(base); for update in updates { diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index c044ac8f1245..e69cd8f0fb74 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -82,12 +82,12 @@ pub mod reexports { pub use polkadot_overseer::gen::{SpawnNamed, SpawnedSubsystem, Subsystem, SubsystemContext}; } +/// Contexts for interpreting (probable) validity of prospective chains. +pub mod candidate_context; /// A rolling session window cache. pub mod rolling_session_window; /// Convenient and efficient runtime info access. pub mod runtime; -/// Contexts for interpreting (probable) validity of prospective chains. -pub mod candidate_context; mod determine_new_blocks; From 90816b09ff1f996ca138870f90c9200cecb132ea Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 27 Jan 2022 19:49:10 -0600 Subject: [PATCH 03/42] docs & more TODOs --- node/subsystem-util/src/candidate_context.rs | 75 +++++++++++++++----- 1 file changed, 57 insertions(+), 18 deletions(-) diff --git a/node/subsystem-util/src/candidate_context.rs b/node/subsystem-util/src/candidate_context.rs index 8f32c8d440d0..8d7337e81408 100644 --- a/node/subsystem-util/src/candidate_context.rs +++ b/node/subsystem-util/src/candidate_context.rs @@ -15,35 +15,52 @@ use polkadot_primitives::v1::Id as ParaId; use std::collections::HashMap; +/// Limitations on inbound HRMP channels. #[derive(Debug, Clone, PartialEq)] -pub struct InboundHrmpChannelContext { - messages_remaining: usize, +pub struct InboundHrmpChannelLimitations { + /// The number of messages remaining to be processed. + pub messages_remaining: usize, } +/// Limitations on outbound HRMP channels. #[derive(Debug, Clone, PartialEq)] -pub struct OutboundHrmpChannelContext { - bytes_remaining: usize, - messages_remaining: usize, +pub struct OutboundHrmpChannelLimitations { + /// The maximum bytes that can be written to the channel. + pub bytes_remaining: usize, + /// The maximum messages that can be written to the channel. + pub messages_remaining: usize, } +/// An update to inbound HRMP channels. #[derive(Debug, Clone, PartialEq)] pub struct InboundHrmpChannelUpdate { - messages_consumed: usize, + /// The number of messages consumed from the channel. + pub messages_consumed: usize, } +/// An update to outbound HRMP channels. #[derive(Debug, Clone, PartialEq)] pub struct OutboundHrmpChannelUpdate { - bytes_submitted: usize, - messages_submitted: usize, + /// The number of bytes submitted to the channel. + pub bytes_submitted: usize, + /// The number of messages submitted to the channel. + pub messages_submitted: usize, } +/// Limitations on the actions that can be taken by a new parachain +/// block. #[derive(Debug, Clone, PartialEq)] pub struct ContextLimitations { - ump_remaining: usize, - ump_rmaining_bytes: usize, - dmp_remaining_messages: usize, - hrmp_channels_in: HashMap, - hrmp_channels_out: HashMap, + /// The amount of UMP messages remaining. + pub ump_remaining: usize, + /// The amount of UMP bytes remaining. + pub ump_remaining_bytes: usize, + /// The amount of remaining DMP messages. + pub dmp_remaining_messages: usize, + /// The limitations of all registered inbound HRMP channels. + pub hrmp_channels_in: HashMap, + /// The limitations of all registered outbound HRMP channels. + pub hrmp_channels_out: HashMap, // TODO [now]: some session-wide config members like maximums? // Other expected criteria like the DMP advancement rule? // TODO [now]: validation code hash & allowed code upgrade. @@ -52,6 +69,10 @@ pub struct ContextLimitations { // TODO [now] pub struct Error; +/// A context used for judging parachain candidate validity. +/// +/// This is a combination of base limitations, which come from a +/// base relay-chain state and a series of updates to those limitations. #[derive(Debug, Clone, PartialEq)] pub struct Context { base: ContextLimitations, @@ -62,6 +83,7 @@ pub struct Context { } impl Context { + /// Create a context from a given base. pub fn from_base(base: ContextLimitations) -> Self { Context { base: base.clone(), updates: Vec::new(), cumulative: base } } @@ -78,18 +100,22 @@ impl Context { Ok(context) } + /// Push an update onto a context. pub fn push(&mut self, update: ContextUpdate) -> Result<(), Error> { unimplemented!() } + /// Get the limitations associated with this context. pub fn limitations(&self) -> &ContextLimitations { &self.cumulative } + /// Get all updates associated with this context. pub fn updates(&self) -> &[ContextUpdate] { &self.updates[..] } + /// Rebase this context onto a new base. pub fn rebase(&self, new_base: ContextLimitations) -> Result { unimplemented!() @@ -98,16 +124,29 @@ impl Context { } } +// TODO [now]: this needs 2 parts: what we take away from the limitations, +// and what we add to the limitations. +// +// The first is the change from the previous relay-parent to the current state. +// And the second is based on the outputs of the candidate. +/// An update to a context. #[derive(Debug, Clone, PartialEq)] pub struct ContextUpdate { - ump_messages_submitted: usize, - ump_bytes_submitted: usize, - dmp_messages_consumed: usize, - hrmp_in: HashMap, - hrmp_out: HashMap, + // TODO [now] : relay-parent? + /// The number of messages submitted to UMP + pub ump_messages_submitted: usize, + /// The number of message-bytes submitted to UMP + pub ump_bytes_submitted: usize, + /// The number of DMP messages consumed. + pub dmp_messages_consumed: usize, + /// Updates to inbound HRMP channels. + pub hrmp_in: HashMap, + /// Updates to outbound HRMP channels. + pub hrmp_out: HashMap, } impl ContextUpdate { + /// Create a blank context update. pub fn blank() -> Self { ContextUpdate { ump_messages_submitted: 0, From cc47e9c21db708d981c54b0fb76ffb403727715f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 2 Feb 2022 17:05:47 -0600 Subject: [PATCH 04/42] some cleanups --- node/subsystem-util/src/candidate_context.rs | 126 ++++++++++--------- 1 file changed, 64 insertions(+), 62 deletions(-) diff --git a/node/subsystem-util/src/candidate_context.rs b/node/subsystem-util/src/candidate_context.rs index 8d7337e81408..6f1b746bdd10 100644 --- a/node/subsystem-util/src/candidate_context.rs +++ b/node/subsystem-util/src/candidate_context.rs @@ -12,7 +12,10 @@ // GNU General Public License for more details. // TODO [now]: document everything and make members public. -use polkadot_primitives::v1::Id as ParaId; +use polkadot_primitives::v1::{ + BlockNumber, CandidateCommitments, Id as ParaId, Hash, PersistedValidationData, + ValidationCodeHash, HeadData, +}; use std::collections::HashMap; /// Limitations on inbound HRMP channels. @@ -48,7 +51,8 @@ pub struct OutboundHrmpChannelUpdate { } /// Limitations on the actions that can be taken by a new parachain -/// block. +/// block. These limitations are implicitly associated with some particular +/// parachain, which should be apparent from usage. #[derive(Debug, Clone, PartialEq)] pub struct ContextLimitations { /// The amount of UMP messages remaining. @@ -61,48 +65,58 @@ pub struct ContextLimitations { pub hrmp_channels_in: HashMap, /// The limitations of all registered outbound HRMP channels. pub hrmp_channels_out: HashMap, - // TODO [now]: some session-wide config members like maximums? - // Other expected criteria like the DMP advancement rule? - // TODO [now]: validation code hash & allowed code upgrade. + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: usize, + /// The required parent head-data of the parachain. + pub required_parent: HeadData, + /// The expected validation-code-hash of this parachain. + pub validation_code_hash: ValidationCodeHash, + /// Whether the go-ahead signal is set as-of this parachain. + pub go_ahead: bool, // TODO [now] use nice enums like the runtime. + /// Whether a code upgrade is allowed. + pub code_upgrade_allowed: bool, // TODO [now] use nice enums like the runtime } // TODO [now] pub struct Error; +/// Information about a relay-chain block. +#[derive(Debug, Clone, PartialEq)] +pub struct RelayChainBlockInfo { + /// The hash of the relay-chain block. + pub hash: Hash, + /// The number of the relay-chain block. + pub number: BlockNumber, + /// The storage-root of the relay-chain block. + pub storage_root: Hash, +} + /// A context used for judging parachain candidate validity. /// +/// A context is associated with some particular parachain, and this should be +/// apparent from its usage. +/// /// This is a combination of base limitations, which come from a /// base relay-chain state and a series of updates to those limitations. #[derive(Debug, Clone, PartialEq)] pub struct Context { - base: ContextLimitations, - updates: Vec, + base: RelayChainBlockInfo, + base_limitations: ContextLimitations, - // base + all updates. + // base + all extensions. + extensions: Vec, cumulative: ContextLimitations, } impl Context { - /// Create a context from a given base. - pub fn from_base(base: ContextLimitations) -> Self { - Context { base: base.clone(), updates: Vec::new(), cumulative: base } - } - - // TODO [now]: add error type - pub fn from_base_and_updates( - base: ContextLimitations, - updates: impl IntoIterator, - ) -> Result { - let mut context = Self::from_base(base); - for update in updates { - context.push(update)?; + /// Create a context from a given base and base limitations. + pub fn from_base(base: RelayChainBlockInfo, limitations: ContextLimitations) -> Self { + Context { + base, + base_limitations: limitations.clone(), + extensions: Vec::new(), + cumulative: limitations, } - Ok(context) - } - - /// Push an update onto a context. - pub fn push(&mut self, update: ContextUpdate) -> Result<(), Error> { - unimplemented!() } /// Get the limitations associated with this context. @@ -110,13 +124,16 @@ impl Context { &self.cumulative } - /// Get all updates associated with this context. - pub fn updates(&self) -> &[ContextUpdate] { - &self.updates[..] + /// Get all extensions associated with this context. + pub fn extensions(&self) -> &[Extension] { + &self.extensions[..] } /// Rebase this context onto a new base. - pub fn rebase(&self, new_base: ContextLimitations) -> Result { + /// + /// If the `base` is the current `base`, this is a no-op and is guaranteed to succeed. + /// If the `base` is the same as one of the extensions, this succeeds only if the + pub fn rebase(&self, base: RelayChainBlockInfo, new_base: ContextLimitations) -> Result { unimplemented!() // TODO [now]. We will want a mode where this just gets as far as it can. @@ -124,42 +141,27 @@ impl Context { } } -// TODO [now]: this needs 2 parts: what we take away from the limitations, -// and what we add to the limitations. -// -// The first is the change from the previous relay-parent to the current state. -// And the second is based on the outputs of the candidate. -/// An update to a context. +/// An extension to a context, representing another prospective parachain block. +/// +/// This has two parts: the first is the new relay-parent and its associated limitations, +/// and the second is information about the advancement of the parachain. #[derive(Debug, Clone, PartialEq)] -pub struct ContextUpdate { - // TODO [now] : relay-parent? - /// The number of messages submitted to UMP - pub ump_messages_submitted: usize, - /// The number of message-bytes submitted to UMP - pub ump_bytes_submitted: usize, - /// The number of DMP messages consumed. - pub dmp_messages_consumed: usize, - /// Updates to inbound HRMP channels. - pub hrmp_in: HashMap, - /// Updates to outbound HRMP channels. - pub hrmp_out: HashMap, +pub struct Extension { + /// The new relay-parent. + pub relay_parent: RelayChainBlockInfo, + /// The limitations associated with this relay-parent. + pub limitations: ContextLimitations, + /// The advancement of the parachain which is part of the extension. + pub advancement: Advancement, } -impl ContextUpdate { - /// Create a blank context update. - pub fn blank() -> Self { - ContextUpdate { - ump_messages_submitted: 0, - ump_bytes_submitted: 0, - dmp_messages_consumed: 0, - hrmp_in: HashMap::new(), - hrmp_out: HashMap::new(), - } - } +#[derive(Debug, Clone, PartialEq)] +pub struct Advancement { + commitments: CandidateCommitments, + // We don't want the candidate descriptor, because that commmits to + // things like the merkle root. } -// TODO [now] : function to compare limitations against updates. - #[cfg(test)] mod tests { use super::*; From 62b622681d01d996087949d66ef372711c0330d1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 2 Feb 2022 17:45:57 -0600 Subject: [PATCH 05/42] reframe as inclusion_emulator --- .../src/inclusion_emulator/mod.rs | 14 ++++ .../staging.rs} | 69 ++++--------------- node/subsystem-util/src/lib.rs | 5 +- 3 files changed, 30 insertions(+), 58 deletions(-) create mode 100644 node/subsystem-util/src/inclusion_emulator/mod.rs rename node/subsystem-util/src/{candidate_context.rs => inclusion_emulator/staging.rs} (69%) diff --git a/node/subsystem-util/src/inclusion_emulator/mod.rs b/node/subsystem-util/src/inclusion_emulator/mod.rs new file mode 100644 index 000000000000..6ab19fa660bd --- /dev/null +++ b/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -0,0 +1,14 @@ +// Copyright 2017-2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +pub mod staging; diff --git a/node/subsystem-util/src/candidate_context.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs similarity index 69% rename from node/subsystem-util/src/candidate_context.rs rename to node/subsystem-util/src/inclusion_emulator/staging.rs index 6f1b746bdd10..5fc077c81489 100644 --- a/node/subsystem-util/src/candidate_context.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -11,6 +11,11 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. +//! The implementation of the inclusion emulator for the 'staging' runtime version. +//! +//! This is currently v1, but will evolve to v3. +// TODO https://github.com/paritytech/polkadot/issues/4803 + // TODO [now]: document everything and make members public. use polkadot_primitives::v1::{ BlockNumber, CandidateCommitments, Id as ParaId, Hash, PersistedValidationData, @@ -18,14 +23,14 @@ use polkadot_primitives::v1::{ }; use std::collections::HashMap; -/// Limitations on inbound HRMP channels. +/// Constraints on inbound HRMP channels. #[derive(Debug, Clone, PartialEq)] pub struct InboundHrmpChannelLimitations { /// The number of messages remaining to be processed. pub messages_remaining: usize, } -/// Limitations on outbound HRMP channels. +/// Constraints on outbound HRMP channels. #[derive(Debug, Clone, PartialEq)] pub struct OutboundHrmpChannelLimitations { /// The maximum bytes that can be written to the channel. @@ -50,11 +55,11 @@ pub struct OutboundHrmpChannelUpdate { pub messages_submitted: usize, } -/// Limitations on the actions that can be taken by a new parachain +/// Constraints on the actions that can be taken by a new parachain /// block. These limitations are implicitly associated with some particular /// parachain, which should be apparent from usage. #[derive(Debug, Clone, PartialEq)] -pub struct ContextLimitations { +pub struct Constraints { /// The amount of UMP messages remaining. pub ump_remaining: usize, /// The amount of UMP bytes remaining. @@ -91,56 +96,6 @@ pub struct RelayChainBlockInfo { pub storage_root: Hash, } -/// A context used for judging parachain candidate validity. -/// -/// A context is associated with some particular parachain, and this should be -/// apparent from its usage. -/// -/// This is a combination of base limitations, which come from a -/// base relay-chain state and a series of updates to those limitations. -#[derive(Debug, Clone, PartialEq)] -pub struct Context { - base: RelayChainBlockInfo, - base_limitations: ContextLimitations, - - // base + all extensions. - extensions: Vec, - cumulative: ContextLimitations, -} - -impl Context { - /// Create a context from a given base and base limitations. - pub fn from_base(base: RelayChainBlockInfo, limitations: ContextLimitations) -> Self { - Context { - base, - base_limitations: limitations.clone(), - extensions: Vec::new(), - cumulative: limitations, - } - } - - /// Get the limitations associated with this context. - pub fn limitations(&self) -> &ContextLimitations { - &self.cumulative - } - - /// Get all extensions associated with this context. - pub fn extensions(&self) -> &[Extension] { - &self.extensions[..] - } - - /// Rebase this context onto a new base. - /// - /// If the `base` is the current `base`, this is a no-op and is guaranteed to succeed. - /// If the `base` is the same as one of the extensions, this succeeds only if the - pub fn rebase(&self, base: RelayChainBlockInfo, new_base: ContextLimitations) -> Result { - unimplemented!() - - // TODO [now]. We will want a mode where this just gets as far as it can. - // That could be done in the error type, quite reasonably. - } -} - /// An extension to a context, representing another prospective parachain block. /// /// This has two parts: the first is the new relay-parent and its associated limitations, @@ -150,16 +105,18 @@ pub struct Extension { /// The new relay-parent. pub relay_parent: RelayChainBlockInfo, /// The limitations associated with this relay-parent. - pub limitations: ContextLimitations, + pub limitations: Constraints, /// The advancement of the parachain which is part of the extension. pub advancement: Advancement, } #[derive(Debug, Clone, PartialEq)] pub struct Advancement { - commitments: CandidateCommitments, + /// The commitments to the output of the execution. + pub commitments: CandidateCommitments, // We don't want the candidate descriptor, because that commmits to // things like the merkle root. + // TODO [now]: finalize this definition. } #[cfg(test)] diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index e69cd8f0fb74..adb77a331f55 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -82,8 +82,9 @@ pub mod reexports { pub use polkadot_overseer::gen::{SpawnNamed, SpawnedSubsystem, Subsystem, SubsystemContext}; } -/// Contexts for interpreting (probable) validity of prospective chains. -pub mod candidate_context; +/// An emulator for node-side code to predict the results of on-chain parachain inclusion +/// and predict future constraints. +pub mod inclusion_emulator; /// A rolling session window cache. pub mod rolling_session_window; /// Convenient and efficient runtime info access. From b9f4d40847158ba3908a3bd9c69929d5e4c06b35 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 2 Feb 2022 18:10:52 -0600 Subject: [PATCH 06/42] documentations yes --- .../src/inclusion_emulator/staging.rs | 29 ++++++++++++++++--- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 5fc077c81489..13242d4124f3 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -15,8 +15,29 @@ //! //! This is currently v1, but will evolve to v3. // TODO https://github.com/paritytech/polkadot/issues/4803 +//! +//! A set of utilities for node-side code to emulate the logic the runtime uses for checking +//! parachain blocks in order to build prospective parachains that are produced ahead of the +//! relay chain. These utilities allow the node-side to predict, with high accuracy, what +//! the relay-chain will accept in the near future. +//! +//! This module has 2 key data types: [`Constraints`] and [`Fragment`]s. [`Constraints`] exhaustively +//! define the set of valid inputs and outputs to parachain execution. A [`Fragment`] indicates +//! a parachain block, anchored to the relay-chain at a particular relay-chain block, known as the +//! relay-parent. +//! +//! Every relay-parent is implicitly associated with a unique set of [`Constraints`] that describe +//! the properties that must be true for a block to be included in a direct child of that block, +//! assuming there is no intermediate parachain block pending availability. +//! +//! However, the key factor that makes asynchronously-grown prospective chains +//! possible is the fact that the relay-chain accepts candidate blocks based on whether they +//! are valid under the constraints of the present moment, not based on whether they were +//! valid at the time of construction. +//! +//! As such, [`Fragment`]s are often, but not always constructed in such a way that they are +//! invalid at first and become valid later on, as the relay chain grows. -// TODO [now]: document everything and make members public. use polkadot_primitives::v1::{ BlockNumber, CandidateCommitments, Id as ParaId, Hash, PersistedValidationData, ValidationCodeHash, HeadData, @@ -96,16 +117,16 @@ pub struct RelayChainBlockInfo { pub storage_root: Hash, } -/// An extension to a context, representing another prospective parachain block. +/// A parachain fragment, representing another prospective parachain block. /// /// This has two parts: the first is the new relay-parent and its associated limitations, /// and the second is information about the advancement of the parachain. #[derive(Debug, Clone, PartialEq)] -pub struct Extension { +pub struct Fragment { /// The new relay-parent. pub relay_parent: RelayChainBlockInfo, /// The limitations associated with this relay-parent. - pub limitations: Constraints, + pub relay_parent_constraints: Constraints, /// The advancement of the parachain which is part of the extension. pub advancement: Advancement, } From 7169066f6ed9183af197b61851964c04a830a05a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 3 Feb 2022 17:48:36 -0600 Subject: [PATCH 07/42] update types --- .../src/inclusion_emulator/staging.rs | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 13242d4124f3..425ee4c40659 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -40,7 +40,7 @@ use polkadot_primitives::v1::{ BlockNumber, CandidateCommitments, Id as ParaId, Hash, PersistedValidationData, - ValidationCodeHash, HeadData, + ValidationCodeHash, HeadData, UpgradeGoAhead, UpgradeRestriction, }; use std::collections::HashMap; @@ -97,15 +97,12 @@ pub struct Constraints { pub required_parent: HeadData, /// The expected validation-code-hash of this parachain. pub validation_code_hash: ValidationCodeHash, - /// Whether the go-ahead signal is set as-of this parachain. - pub go_ahead: bool, // TODO [now] use nice enums like the runtime. - /// Whether a code upgrade is allowed. - pub code_upgrade_allowed: bool, // TODO [now] use nice enums like the runtime + /// The go-ahead signal as-of this parachain. + pub go_ahead: UpgradeGoAhead, + /// The code upgrade restriction signal as-of this parachain. + pub upgrade_restriction: UpgradeRestriction, } -// TODO [now] -pub struct Error; - /// Information about a relay-chain block. #[derive(Debug, Clone, PartialEq)] pub struct RelayChainBlockInfo { From 264214860fe84baf4e28596ec58c5332865d8b33 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 3 Feb 2022 19:43:08 -0600 Subject: [PATCH 08/42] add constraint modifications --- .../src/inclusion_emulator/staging.rs | 76 +++++++++++++------ 1 file changed, 51 insertions(+), 25 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 425ee4c40659..f796582e416a 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -39,12 +39,13 @@ //! invalid at first and become valid later on, as the relay chain grows. use polkadot_primitives::v1::{ - BlockNumber, CandidateCommitments, Id as ParaId, Hash, PersistedValidationData, - ValidationCodeHash, HeadData, UpgradeGoAhead, UpgradeRestriction, + BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, + PersistedValidationData, UpgradeGoAhead, UpgradeRestriction, ValidationCodeHash, }; use std::collections::HashMap; /// Constraints on inbound HRMP channels. +// TODO [now]: reframe inbound HRMP limitations as channels existing + next valid watermark. #[derive(Debug, Clone, PartialEq)] pub struct InboundHrmpChannelLimitations { /// The number of messages remaining to be processed. @@ -60,22 +61,6 @@ pub struct OutboundHrmpChannelLimitations { pub messages_remaining: usize, } -/// An update to inbound HRMP channels. -#[derive(Debug, Clone, PartialEq)] -pub struct InboundHrmpChannelUpdate { - /// The number of messages consumed from the channel. - pub messages_consumed: usize, -} - -/// An update to outbound HRMP channels. -#[derive(Debug, Clone, PartialEq)] -pub struct OutboundHrmpChannelUpdate { - /// The number of bytes submitted to the channel. - pub bytes_submitted: usize, - /// The number of messages submitted to the channel. - pub messages_submitted: usize, -} - /// Constraints on the actions that can be taken by a new parachain /// block. These limitations are implicitly associated with some particular /// parachain, which should be apparent from usage. @@ -122,19 +107,60 @@ pub struct RelayChainBlockInfo { pub struct Fragment { /// The new relay-parent. pub relay_parent: RelayChainBlockInfo, - /// The limitations associated with this relay-parent. + /// The constraints associated with this relay-parent. pub relay_parent_constraints: Constraints, - /// The advancement of the parachain which is part of the extension. - pub advancement: Advancement, + /// The core information about the prospective candidate. + pub prospective: ProspectiveCandidate, +} + +/// An update to inbound HRMP channels. +#[derive(Debug, Clone, PartialEq)] +pub struct InboundHrmpChannelModification { + /// The number of messages consumed from the channel. + pub messages_consumed: usize, +} + +/// An update to outbound HRMP channels. +#[derive(Debug, Clone, PartialEq)] +pub struct OutboundHrmpChannelModification { + /// The number of bytes submitted to the channel. + pub bytes_submitted: usize, + /// The number of messages submitted to the channel. + pub messages_submitted: usize, +} + +/// Modifications to constraints as a result of prospective candidates. +#[derive(Debug, Clone, PartialEq)] +pub struct ConstraintModifications { + /// The required parent head to build upon. + /// `None` indicates 'unmodified'. + pub required_head: Option, + /// Inbound HRMP channel modifications. + pub inbound_hrmp: HashMap, + /// Outbound HRMP channel modifications. + pub outbound_hrmp: HashMap, + /// The amount of UMP messages sent. + pub ump_messages_sent: usize, + /// The amount of UMP bytes sent. + pub ump_bytes_sent: usize, + /// The amount of DMP messages processed. + pub dmp_messages_processed: usize, + // TODO [now]: figure out how to handle code upgrades. } +/// The prospective candidate. #[derive(Debug, Clone, PartialEq)] -pub struct Advancement { +pub struct ProspectiveCandidate { /// The commitments to the output of the execution. pub commitments: CandidateCommitments, - // We don't want the candidate descriptor, because that commmits to - // things like the merkle root. - // TODO [now]: finalize this definition. + /// The collator that created the candidate. + pub collator: CollatorId, + /// The signature of the collator on the payload. + pub collator_signature: CollatorSignature, + /// The persisted validation data used to create the candidate. + pub persisted_validation_data: PersistedValidationData, + /// The hash of the PoV. + pub pov_hash: Hash, } #[cfg(test)] From b123217371559a3463b99bfc133ef8e1836945e8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 3 Feb 2022 19:53:20 -0600 Subject: [PATCH 09/42] watermark --- .../src/inclusion_emulator/staging.rs | 25 ++++++++----------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index f796582e416a..85432bf03112 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -45,11 +45,10 @@ use polkadot_primitives::v1::{ use std::collections::HashMap; /// Constraints on inbound HRMP channels. -// TODO [now]: reframe inbound HRMP limitations as channels existing + next valid watermark. #[derive(Debug, Clone, PartialEq)] -pub struct InboundHrmpChannelLimitations { - /// The number of messages remaining to be processed. - pub messages_remaining: usize, +pub struct InboundHrmpLimitations { + /// An exhaustive set of all valid watermarks. + pub valid_watermarks: Vec, } /// Constraints on outbound HRMP channels. @@ -73,7 +72,7 @@ pub struct Constraints { /// The amount of remaining DMP messages. pub dmp_remaining_messages: usize, /// The limitations of all registered inbound HRMP channels. - pub hrmp_channels_in: HashMap, + pub hrmp_inbound: InboundHrmpLimitations, /// The limitations of all registered outbound HRMP channels. pub hrmp_channels_out: HashMap, /// The maximum Proof-of-Validity size allowed, in bytes. @@ -113,13 +112,6 @@ pub struct Fragment { pub prospective: ProspectiveCandidate, } -/// An update to inbound HRMP channels. -#[derive(Debug, Clone, PartialEq)] -pub struct InboundHrmpChannelModification { - /// The number of messages consumed from the channel. - pub messages_consumed: usize, -} - /// An update to outbound HRMP channels. #[derive(Debug, Clone, PartialEq)] pub struct OutboundHrmpChannelModification { @@ -135,8 +127,8 @@ pub struct ConstraintModifications { /// The required parent head to build upon. /// `None` indicates 'unmodified'. pub required_head: Option, - /// Inbound HRMP channel modifications. - pub inbound_hrmp: HashMap, + /// The new HRMP watermark + pub hrmp_watermark: BlockNumber, /// Outbound HRMP channel modifications. pub outbound_hrmp: HashMap, /// The amount of UMP messages sent. @@ -146,6 +138,9 @@ pub struct ConstraintModifications { /// The amount of DMP messages processed. pub dmp_messages_processed: usize, // TODO [now]: figure out how to handle code upgrades. + // In the block after a go-ahead signal, we know that the code of the + // parachain updated. We will need to scrape this from the relay-chain state. + // We'd need to scrape that from the relay-chain state. } /// The prospective candidate. @@ -161,6 +156,8 @@ pub struct ProspectiveCandidate { pub persisted_validation_data: PersistedValidationData, /// The hash of the PoV. pub pov_hash: Hash, + /// The validation code hash used by the candidate. + pub validation_code_hash: ValidationCodeHash, } #[cfg(test)] From 2b43d0b3534335218401ab5fdd127c59e0d22ce8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 4 Feb 2022 17:23:08 -0600 Subject: [PATCH 10/42] produce modifications --- .../src/inclusion_emulator/staging.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 85432bf03112..af9aee404b6a 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -160,6 +160,25 @@ pub struct ProspectiveCandidate { pub validation_code_hash: ValidationCodeHash, } +impl ProspectiveCandidate { + /// Produce a set of constraint modifications based on the outputs + /// of the candidate. + pub fn constraint_modifications(&self) -> ConstraintModifications { + ConstraintModifications { + required_head: Some(self.commitments.head_data.clone()), + hrmp_watermark: self.commitments.hrmp_watermark, + outbound_hrmp: { + // TODO [now]: have we enforced that HRMP messages are ascending at this point? + // probably better not to assume that and do sanity-checking at other points. + unimplemented!() + }, + ump_messages_sent: self.commitments.upward_messages.len(), + ump_bytes_sent: self.commitments.upward_messages.iter().map(|msg| msg.len()).sum(), + dmp_messages_processed: self.commitments.processed_downward_messages as _, + } + } +} + #[cfg(test)] mod tests { use super::*; From 8c0c3a3fad171268d3650324ecf4f90e617678d8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 7 Feb 2022 18:53:44 -0600 Subject: [PATCH 11/42] v2 primitives: re-export all v1 for consistency --- primitives/src/v2/mod.rs | 78 ++++++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/primitives/src/v2/mod.rs b/primitives/src/v2/mod.rs index 065d5cc3c057..fe3aef940d5d 100644 --- a/primitives/src/v2/mod.rs +++ b/primitives/src/v2/mod.rs @@ -16,8 +16,6 @@ //! `V2` Primitives. -use crate::v1; - use parity_scale_codec::{Decode, Encode}; use primitives::RuntimeDebug; use scale_info::TypeInfo; @@ -26,6 +24,8 @@ use sp_std::{collections::btree_map::BTreeMap, prelude::*}; #[cfg(feature = "std")] use parity_util_mem::MallocSizeOf; +pub use crate::v1::*; + /// Information about validator sets of a session. #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(PartialEq, MallocSizeOf))] @@ -33,11 +33,11 @@ pub struct SessionInfo { /****** New in v2 *******/ /// All the validators actively participating in parachain consensus. /// Indices are into the broader validator set. - pub active_validator_indices: Vec, + pub active_validator_indices: Vec, /// A secure random seed for the session, gathered from BABE. pub random_seed: [u8; 32], /// The amount of sessions to keep for disputes. - pub dispute_period: v1::SessionIndex, + pub dispute_period: SessionIndex, /****** Old fields ******/ /// Validators in canonical ordering. @@ -47,7 +47,7 @@ pub struct SessionInfo { /// [`max_validators`](https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148). /// /// `SessionInfo::validators` will be limited to to `max_validators` when set. - pub validators: Vec, + pub validators: Vec, /// Validators' authority discovery keys for the session in canonical ordering. /// /// NOTE: The first `validators.len()` entries will match the corresponding validators in @@ -55,7 +55,7 @@ pub struct SessionInfo { /// participating in parachain consensus - see /// [`max_validators`](https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148) #[cfg_attr(feature = "std", ignore_malloc_size_of = "outside type")] - pub discovery_keys: Vec, + pub discovery_keys: Vec, /// The assignment keys for validators. /// /// NOTE: There might be more authorities in the current session, than validators participating @@ -66,11 +66,11 @@ pub struct SessionInfo { /// ```ignore /// assignment_keys.len() == validators.len() && validators.len() <= discovery_keys.len() /// ``` - pub assignment_keys: Vec, + pub assignment_keys: Vec, /// Validators in shuffled ordering - these are the validator groups as produced /// by the `Scheduler` module for the session and are typically referred to by /// `GroupIndex`. - pub validator_groups: Vec>, + pub validator_groups: Vec>, /// The number of availability cores used by the protocol during this session. pub n_cores: u32, /// The zeroth delay tranche width. @@ -86,8 +86,8 @@ pub struct SessionInfo { pub needed_approvals: u32, } -impl From for SessionInfo { - fn from(old: v1::SessionInfo) -> SessionInfo { +impl From for SessionInfo { + fn from(old: crate::v1::SessionInfo) -> SessionInfo { SessionInfo { // new fields active_validator_indices: Vec::new(), @@ -115,11 +115,11 @@ pub struct PvfCheckStatement { /// `true` if the subject passed pre-checking and `false` otherwise. pub accept: bool, /// The validation code hash that was checked. - pub subject: v1::ValidationCodeHash, + pub subject: ValidationCodeHash, /// The index of a session during which this statement is considered valid. - pub session_index: v1::SessionIndex, + pub session_index: SessionIndex, /// The index of the validator from which this statement originates. - pub validator_index: v1::ValidatorIndex, + pub validator_index: ValidatorIndex, } impl PvfCheckStatement { @@ -136,97 +136,97 @@ impl PvfCheckStatement { sp_api::decl_runtime_apis! { /// The API for querying the state of parachains on-chain. #[api_version(2)] - pub trait ParachainHost { + pub trait ParachainHost { /// Get the current validators. - fn validators() -> Vec; + fn validators() -> Vec; /// Returns the validator groups and rotation info localized based on the hypothetical child /// of a block whose state this is invoked on. Note that `now` in the `GroupRotationInfo` /// should be the successor of the number of the block. - fn validator_groups() -> (Vec>, v1::GroupRotationInfo); + fn validator_groups() -> (Vec>, GroupRotationInfo); /// Yields information on all availability cores as relevant to the child block. /// Cores are either free or occupied. Free cores can have paras assigned to them. - fn availability_cores() -> Vec>; + fn availability_cores() -> Vec>; /// Yields the persisted validation data for the given `ParaId` along with an assumption that /// should be used if the para currently occupies a core. /// /// Returns `None` if either the para is not registered or the assumption is `Freed` /// and the para already occupies a core. - fn persisted_validation_data(para_id: v1::Id, assumption: v1::OccupiedCoreAssumption) - -> Option>; + fn persisted_validation_data(para_id: Id, assumption: OccupiedCoreAssumption) + -> Option>; /// Returns the persisted validation data for the given `ParaId` along with the corresponding /// validation code hash. Instead of accepting assumption about the para, matches the validation /// data hash against an expected one and yields `None` if they're not equal. fn assumed_validation_data( - para_id: v1::Id, - expected_persisted_validation_data_hash: v1::Hash, - ) -> Option<(v1::PersistedValidationData, v1::ValidationCodeHash)>; + para_id: Id, + expected_persisted_validation_data_hash: Hash, + ) -> Option<(PersistedValidationData, ValidationCodeHash)>; /// Checks if the given validation outputs pass the acceptance criteria. - fn check_validation_outputs(para_id: v1::Id, outputs: v1::CandidateCommitments) -> bool; + fn check_validation_outputs(para_id: Id, outputs: CandidateCommitments) -> bool; /// Returns the session index expected at a child of the block. /// /// This can be used to instantiate a `SigningContext`. - fn session_index_for_child() -> v1::SessionIndex; + fn session_index_for_child() -> SessionIndex; /// Old method to fetch v1 session info. #[changed_in(2)] - fn session_info(index: v1::SessionIndex) -> Option; + fn session_info(index: SessionIndex) -> Option; /// Fetch the validation code used by a para, making the given `OccupiedCoreAssumption`. /// /// Returns `None` if either the para is not registered or the assumption is `Freed` /// and the para already occupies a core. - fn validation_code(para_id: v1::Id, assumption: v1::OccupiedCoreAssumption) - -> Option; + fn validation_code(para_id: Id, assumption: OccupiedCoreAssumption) + -> Option; /// Get the receipt of a candidate pending availability. This returns `Some` for any paras /// assigned to occupied cores in `availability_cores` and `None` otherwise. - fn candidate_pending_availability(para_id: v1::Id) -> Option>; + fn candidate_pending_availability(para_id: Id) -> Option>; /// Get a vector of events concerning candidates that occurred within a block. - fn candidate_events() -> Vec>; + fn candidate_events() -> Vec>; /// Get all the pending inbound messages in the downward message queue for a para. fn dmq_contents( - recipient: v1::Id, - ) -> Vec>; + recipient: Id, + ) -> Vec>; /// Get the contents of all channels addressed to the given recipient. Channels that have no /// messages in them are also included. - fn inbound_hrmp_channels_contents(recipient: v1::Id) -> BTreeMap>>; + fn inbound_hrmp_channels_contents(recipient: Id) -> BTreeMap>>; /// Get the validation code from its hash. - fn validation_code_by_hash(hash: v1::ValidationCodeHash) -> Option; + fn validation_code_by_hash(hash: ValidationCodeHash) -> Option; /// Scrape dispute relevant from on-chain, backing votes and resolved disputes. - fn on_chain_votes() -> Option>; + fn on_chain_votes() -> Option>; /***** Added in v2 *****/ /// Get the session info for the given session, if stored. /// /// NOTE: This function is only available since parachain host version 2. - fn session_info(index: v1::SessionIndex) -> Option; + fn session_info(index: SessionIndex) -> Option; /// Submits a PVF pre-checking statement into the transaction pool. /// /// NOTE: This function is only available since parachain host version 2. - fn submit_pvf_check_statement(stmt: PvfCheckStatement, signature: v1::ValidatorSignature); + fn submit_pvf_check_statement(stmt: PvfCheckStatement, signature: ValidatorSignature); /// Returns code hashes of PVFs that require pre-checking by validators in the active set. /// /// NOTE: This function is only available since parachain host version 2. - fn pvfs_require_precheck() -> Vec; + fn pvfs_require_precheck() -> Vec; /// Fetch the hash of the validation code used by a para, making the given `OccupiedCoreAssumption`. /// /// NOTE: This function is only available since parachain host version 2. - fn validation_code_hash(para_id: v1::Id, assumption: v1::OccupiedCoreAssumption) - -> Option; + fn validation_code_hash(para_id: Id, assumption: OccupiedCoreAssumption) + -> Option; } } From cf8c17cbc586fed2fd59215b387f67d34d717923 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 7 Feb 2022 19:01:43 -0600 Subject: [PATCH 12/42] vstaging primitives --- primitives/src/lib.rs | 13 +++++++++++++ primitives/src/vstaging/mod.rs | 31 +++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 primitives/src/vstaging/mod.rs diff --git a/primitives/src/lib.rs b/primitives/src/lib.rs index febcb175d0c9..dbff50917237 100644 --- a/primitives/src/lib.rs +++ b/primitives/src/lib.rs @@ -19,6 +19,19 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] +/// The minimum supported version of the primitives by this implementation. +pub const MIN_SUPPORTED_VERSION: u32 = 1; +/// The maximum supported version of the primitives by this implementation. +pub const MAX_SUPPORTED_VERSION: u32 = 2; + +/// The STAGING version. +pub const STAGING_VERSION: u32 = u32::MAX; + pub mod v0; pub mod v1; pub mod v2; + +// The 'staging' version is special - while other versions are set in stone, +// the staging version is malleable. Once it's released, it gets the next +// version number. +pub mod vstaging; diff --git a/primitives/src/vstaging/mod.rs b/primitives/src/vstaging/mod.rs new file mode 100644 index 000000000000..8e17b2c00f9b --- /dev/null +++ b/primitives/src/vstaging/mod.rs @@ -0,0 +1,31 @@ +// Copyright 2017-2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Staging Primitives. + +use parity_scale_codec::{Decode, Encode}; +use primitives::RuntimeDebug; +use scale_info::TypeInfo; +use sp_std::prelude::*; + +pub use crate::v2::*; + +sp_api::decl_runtime_apis! { + /// The API for querying the state of parachains on-chain. + // In the staging API, this is u32::MAX. + #[api_version(4294967295)] + pub trait ParachainHost {} +} From 51cdc50fe5b51b134457066df790cba20e23296b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 7 Feb 2022 20:29:42 -0600 Subject: [PATCH 13/42] emulator constraints: handle code upgrades --- .../src/inclusion_emulator/staging.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index af9aee404b6a..7edbc6b278a7 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -13,7 +13,7 @@ //! The implementation of the inclusion emulator for the 'staging' runtime version. //! -//! This is currently v1, but will evolve to v3. +//! This is currently v1 (v2?), but will evolve to v3. // TODO https://github.com/paritytech/polkadot/issues/4803 //! //! A set of utilities for node-side code to emulate the logic the runtime uses for checking @@ -38,7 +38,7 @@ //! As such, [`Fragment`]s are often, but not always constructed in such a way that they are //! invalid at first and become valid later on, as the relay chain grows. -use polkadot_primitives::v1::{ +use polkadot_primitives::v2::{ BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, PersistedValidationData, UpgradeGoAhead, UpgradeRestriction, ValidationCodeHash, }; @@ -77,6 +77,8 @@ pub struct Constraints { pub hrmp_channels_out: HashMap, /// The maximum Proof-of-Validity size allowed, in bytes. pub max_pov_size: usize, + /// The maximum number of HRMP messages allowed per candidate. + pub max_hrmp_num_per_candidate: usize, /// The required parent head-data of the parachain. pub required_parent: HeadData, /// The expected validation-code-hash of this parachain. @@ -85,6 +87,9 @@ pub struct Constraints { pub go_ahead: UpgradeGoAhead, /// The code upgrade restriction signal as-of this parachain. pub upgrade_restriction: UpgradeRestriction, + /// The future validation code hash, if any, and at what relay-parent + /// number the upgrade would be minimally applied. + pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, } /// Information about a relay-chain block. @@ -137,10 +142,8 @@ pub struct ConstraintModifications { pub ump_bytes_sent: usize, /// The amount of DMP messages processed. pub dmp_messages_processed: usize, - // TODO [now]: figure out how to handle code upgrades. - // In the block after a go-ahead signal, we know that the code of the - // parachain updated. We will need to scrape this from the relay-chain state. - // We'd need to scrape that from the relay-chain state. + /// Whether a scheduled code upgrade was applied. + pub code_upgrade_applied: usize, } /// The prospective candidate. From acadae5a71602fca6849bda7ae44c0d27ead021d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 7 Feb 2022 20:35:19 -0600 Subject: [PATCH 14/42] produce outbound HRMP modifications --- .../src/inclusion_emulator/staging.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 7edbc6b278a7..ef34f39d4298 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -142,8 +142,6 @@ pub struct ConstraintModifications { pub ump_bytes_sent: usize, /// The amount of DMP messages processed. pub dmp_messages_processed: usize, - /// Whether a scheduled code upgrade was applied. - pub code_upgrade_applied: usize, } /// The prospective candidate. @@ -161,6 +159,8 @@ pub struct ProspectiveCandidate { pub pov_hash: Hash, /// The validation code hash used by the candidate. pub validation_code_hash: ValidationCodeHash, + // TODO [now]: do code upgrades go here? if so, we can't produce + // modifications just from a candidate. } impl ProspectiveCandidate { @@ -171,9 +171,18 @@ impl ProspectiveCandidate { required_head: Some(self.commitments.head_data.clone()), hrmp_watermark: self.commitments.hrmp_watermark, outbound_hrmp: { - // TODO [now]: have we enforced that HRMP messages are ascending at this point? - // probably better not to assume that and do sanity-checking at other points. - unimplemented!() + let mut outbound_hrmp = HashMap::new(); + for message in &self.commitments.horizontal_messages { + let record = outbound_hrmp.entry(message.recipient.clone()).or_insert(OutboundHrmpChannelModification { + bytes_submitted: 0, + messages_submitted: 0, + }); + + record.bytes_submitted += message.data.len(); + record.messages_submitted += 1; + } + + outbound_hrmp }, ump_messages_sent: self.commitments.upward_messages.len(), ump_bytes_sent: self.commitments.upward_messages.iter().map(|msg| msg.len()).sum(), From 754619e25a3039cd95286506cfd4aa4f030b210c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 7 Feb 2022 20:45:26 -0600 Subject: [PATCH 15/42] stack. --- .../src/inclusion_emulator/staging.rs | 34 ++++++++++++++----- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index ef34f39d4298..d5d517d574da 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -118,7 +118,7 @@ pub struct Fragment { } /// An update to outbound HRMP channels. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Default)] pub struct OutboundHrmpChannelModification { /// The number of bytes submitted to the channel. pub bytes_submitted: usize, @@ -131,7 +131,7 @@ pub struct OutboundHrmpChannelModification { pub struct ConstraintModifications { /// The required parent head to build upon. /// `None` indicates 'unmodified'. - pub required_head: Option, + pub required_head: HeadData, /// The new HRMP watermark pub hrmp_watermark: BlockNumber, /// Outbound HRMP channel modifications. @@ -144,6 +144,27 @@ pub struct ConstraintModifications { pub dmp_messages_processed: usize, } +impl ConstraintModifications { + /// Stack other modifications on top of these. + /// + /// This does no sanity-checking, so if `other` is garbage relative + /// to `self`, then the new value will be garbage as well. + pub fn stack(&mut self, other: &Self) { + self.required_head = other.required_head.clone(); + self.hrmp_watermark = other.hrmp_watermark; + + for (id, mods) in &other.outbound_hrmp { + let record = self.outbound_hrmp.entry(id.clone()).or_default(); + record.messages_submitted += mods.messages_submitted; + record.bytes_submitted += mods.bytes_submitted; + } + + self.ump_messages_sent += other.ump_messages_sent; + self.ump_bytes_sent += other.ump_bytes_sent; + self.dmp_messages_processed += other.dmp_messages_processed; + } +} + /// The prospective candidate. #[derive(Debug, Clone, PartialEq)] pub struct ProspectiveCandidate { @@ -168,15 +189,12 @@ impl ProspectiveCandidate { /// of the candidate. pub fn constraint_modifications(&self) -> ConstraintModifications { ConstraintModifications { - required_head: Some(self.commitments.head_data.clone()), + required_head: self.commitments.head_data.clone(), hrmp_watermark: self.commitments.hrmp_watermark, outbound_hrmp: { - let mut outbound_hrmp = HashMap::new(); + let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); for message in &self.commitments.horizontal_messages { - let record = outbound_hrmp.entry(message.recipient.clone()).or_insert(OutboundHrmpChannelModification { - bytes_submitted: 0, - messages_submitted: 0, - }); + let record = outbound_hrmp.entry(message.recipient.clone()).or_default(); record.bytes_submitted += message.data.len(); record.messages_submitted += 1; From 5229f6bcea2a2333325b0681d26267153d7434af Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 8 Feb 2022 21:25:11 -0600 Subject: [PATCH 16/42] method for applying modifications --- .../src/inclusion_emulator/staging.rs | 135 +++++++++++++++++- 1 file changed, 130 insertions(+), 5 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index d5d517d574da..b31dea1ef25e 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -47,7 +47,7 @@ use std::collections::HashMap; /// Constraints on inbound HRMP channels. #[derive(Debug, Clone, PartialEq)] pub struct InboundHrmpLimitations { - /// An exhaustive set of all valid watermarks. + /// An exhaustive set of all valid watermarks, sorted ascending pub valid_watermarks: Vec, } @@ -92,6 +92,129 @@ pub struct Constraints { pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, } +/// Kinds of errors that can occur when modifying constraints. +#[derive(Debug, Clone, PartialEq)] +pub enum ModificationError { + /// The HRMP watermark is not allowed. + DisallowedHrmpWatermark(BlockNumber), + /// No such HRMP outbound channel. + NoSuchHrmpChannel(ParaId), + /// Too many messages submitted to HRMP channel. + HrmpMessagesOverflow { + /// The ID of the recipient. + para_id: ParaId, + /// The amount of remaining messages in the capacity of the channel. + messages_remaining: usize, + /// The amount of messages submitted to the channel. + messages_submitted: usize, + }, + /// Too many bytes submitted to HRMP channel. + HrmpBytesOverflow { + /// The ID of the recipient. + para_id: ParaId, + /// The amount of remaining bytes in the capacity of the channel. + bytes_remaining: usize, + /// The amount of bytes submitted to the channel. + bytes_submitted: usize, + }, + /// Too many messages submitted to UMP. + UmpMessagesOverflow { + /// The amount of remaining messages in the capacity of UMP. + messages_remaining: usize, + /// The amount of messages submitted to UMP. + messages_submitted: usize, + }, + /// Too many bytes submitted to UMP. + UmpBytesOverflow { + /// The amount of remaining bytes in the capacity of UMP. + bytes_remaining: usize, + /// The amount of bytes submitted to UMP. + bytes_submitted: usize, + }, + /// Too many messages processed from DMP. + DmpMessagesUnderflow { + /// The amount of messages waiting to be processed from DMP. + messages_remaining: usize, + /// The amount of messages processed. + messages_processed: usize, + }, +} + +impl Constraints { + /// Apply modifications to these constraints. If this succeeds, it passes + /// all sanity-checks. + pub fn apply_modifications( + &self, + modifications: ConstraintModifications, + ) -> Result { + let mut new = self.clone(); + + match new + .hrmp_inbound + .valid_watermarks + .iter() + .position(|w| w == &modifications.hrmp_watermark) + { + Some(pos) => { + let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1); + }, + None => + return Err(ModificationError::DisallowedHrmpWatermark(modifications.hrmp_watermark)), + } + + new.required_parent = modifications.required_parent; + + for (id, outbound_hrmp_mod) in modifications.outbound_hrmp { + if let Some(outbound) = new.hrmp_channels_out.get_mut(&id) { + outbound.bytes_remaining = outbound + .bytes_remaining + .checked_sub(outbound_hrmp_mod.bytes_submitted) + .ok_or(ModificationError::HrmpBytesOverflow { + para_id: id, + bytes_remaining: outbound.bytes_remaining, + bytes_submitted: outbound_hrmp_mod.bytes_submitted, + })?; + + outbound.messages_remaining = outbound + .messages_remaining + .checked_sub(outbound_hrmp_mod.messages_submitted) + .ok_or(ModificationError::HrmpMessagesOverflow { + para_id: id, + messages_remaining: outbound.messages_remaining, + messages_submitted: outbound_hrmp_mod.messages_submitted, + })?; + } else { + return Err(ModificationError::NoSuchHrmpChannel(id)) + } + } + + new.ump_remaining = new.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( + ModificationError::UmpMessagesOverflow { + messages_remaining: new.ump_remaining, + messages_submitted: modifications.ump_messages_sent, + }, + )?; + + new.ump_remaining_bytes = new + .ump_remaining_bytes + .checked_sub(modifications.ump_bytes_sent) + .ok_or(ModificationError::UmpBytesOverflow { + bytes_remaining: new.ump_remaining_bytes, + bytes_submitted: modifications.ump_bytes_sent, + })?; + + new.dmp_remaining_messages = new + .dmp_remaining_messages + .checked_sub(modifications.dmp_messages_processed) + .ok_or(ModificationError::DmpMessagesUnderflow { + messages_remaining: new.dmp_remaining_messages, + messages_processed: modifications.dmp_messages_processed, + })?; + + Ok(new) + } +} + /// Information about a relay-chain block. #[derive(Debug, Clone, PartialEq)] pub struct RelayChainBlockInfo { @@ -131,7 +254,7 @@ pub struct OutboundHrmpChannelModification { pub struct ConstraintModifications { /// The required parent head to build upon. /// `None` indicates 'unmodified'. - pub required_head: HeadData, + pub required_parent: HeadData, /// The new HRMP watermark pub hrmp_watermark: BlockNumber, /// Outbound HRMP channel modifications. @@ -150,7 +273,7 @@ impl ConstraintModifications { /// This does no sanity-checking, so if `other` is garbage relative /// to `self`, then the new value will be garbage as well. pub fn stack(&mut self, other: &Self) { - self.required_head = other.required_head.clone(); + self.required_parent = other.required_parent.clone(); self.hrmp_watermark = other.hrmp_watermark; for (id, mods) in &other.outbound_hrmp { @@ -189,7 +312,7 @@ impl ProspectiveCandidate { /// of the candidate. pub fn constraint_modifications(&self) -> ConstraintModifications { ConstraintModifications { - required_head: self.commitments.head_data.clone(), + required_parent: self.commitments.head_data.clone(), hrmp_watermark: self.commitments.hrmp_watermark, outbound_hrmp: { let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); @@ -213,5 +336,7 @@ impl ProspectiveCandidate { mod tests { use super::*; - // TODO [now]: Pushing, rebasing + // TODO [now] Stacking modifications + + // TODO [now] checking outputs against constraints. } From edd2c4605e20005e2964c61fb3e7f3a5a967d528 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 8 Feb 2022 21:35:33 -0600 Subject: [PATCH 17/42] method just for sanity-checking modifications --- .../src/inclusion_emulator/staging.rs | 78 +++++++++++++++++-- 1 file changed, 71 insertions(+), 7 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index b31dea1ef25e..6f9b2dab75bd 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -141,14 +141,80 @@ pub enum ModificationError { } impl Constraints { + /// Check modifications against constraints. + pub fn check_modifications( + &self, + modifications: &ConstraintModifications, + ) -> Result<(), ModificationError> { + if self + .hrmp_inbound + .valid_watermarks + .iter() + .position(|w| w == &modifications.hrmp_watermark) + .is_none() + { + return Err(ModificationError::DisallowedHrmpWatermark(modifications.hrmp_watermark)); + } + + for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { + if let Some(outbound) = self.hrmp_channels_out.get(&id) { + outbound + .bytes_remaining + .checked_sub(outbound_hrmp_mod.bytes_submitted) + .ok_or(ModificationError::HrmpBytesOverflow { + para_id: *id, + bytes_remaining: outbound.bytes_remaining, + bytes_submitted: outbound_hrmp_mod.bytes_submitted, + })?; + + outbound + .messages_remaining + .checked_sub(outbound_hrmp_mod.messages_submitted) + .ok_or(ModificationError::HrmpMessagesOverflow { + para_id: *id, + messages_remaining: outbound.messages_remaining, + messages_submitted: outbound_hrmp_mod.messages_submitted, + })?; + } else { + return Err(ModificationError::NoSuchHrmpChannel(*id)) + } + } + + self.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( + ModificationError::UmpMessagesOverflow { + messages_remaining: self.ump_remaining, + messages_submitted: modifications.ump_messages_sent, + }, + )?; + + self + .ump_remaining_bytes + .checked_sub(modifications.ump_bytes_sent) + .ok_or(ModificationError::UmpBytesOverflow { + bytes_remaining: self.ump_remaining_bytes, + bytes_submitted: modifications.ump_bytes_sent, + })?; + + self + .dmp_remaining_messages + .checked_sub(modifications.dmp_messages_processed) + .ok_or(ModificationError::DmpMessagesUnderflow { + messages_remaining: self.dmp_remaining_messages, + messages_processed: modifications.dmp_messages_processed, + })?; + + Ok(()) + } + /// Apply modifications to these constraints. If this succeeds, it passes /// all sanity-checks. pub fn apply_modifications( &self, - modifications: ConstraintModifications, + modifications: &ConstraintModifications, ) -> Result { let mut new = self.clone(); + new.required_parent = modifications.required_parent.clone(); match new .hrmp_inbound .valid_watermarks @@ -162,15 +228,13 @@ impl Constraints { return Err(ModificationError::DisallowedHrmpWatermark(modifications.hrmp_watermark)), } - new.required_parent = modifications.required_parent; - - for (id, outbound_hrmp_mod) in modifications.outbound_hrmp { + for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { if let Some(outbound) = new.hrmp_channels_out.get_mut(&id) { outbound.bytes_remaining = outbound .bytes_remaining .checked_sub(outbound_hrmp_mod.bytes_submitted) .ok_or(ModificationError::HrmpBytesOverflow { - para_id: id, + para_id: *id, bytes_remaining: outbound.bytes_remaining, bytes_submitted: outbound_hrmp_mod.bytes_submitted, })?; @@ -179,12 +243,12 @@ impl Constraints { .messages_remaining .checked_sub(outbound_hrmp_mod.messages_submitted) .ok_or(ModificationError::HrmpMessagesOverflow { - para_id: id, + para_id: *id, messages_remaining: outbound.messages_remaining, messages_submitted: outbound_hrmp_mod.messages_submitted, })?; } else { - return Err(ModificationError::NoSuchHrmpChannel(id)) + return Err(ModificationError::NoSuchHrmpChannel(*id)) } } From ce5c54f2f09f2544828e4e59476c422644f6fbd0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 8 Feb 2022 21:50:29 -0600 Subject: [PATCH 18/42] fragments produce modifications, not prospectives --- .../src/inclusion_emulator/staging.rs | 82 +++++++++---------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 6f9b2dab75bd..20d0ad70f1ae 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -153,19 +153,18 @@ impl Constraints { .position(|w| w == &modifications.hrmp_watermark) .is_none() { - return Err(ModificationError::DisallowedHrmpWatermark(modifications.hrmp_watermark)); + return Err(ModificationError::DisallowedHrmpWatermark(modifications.hrmp_watermark)) } for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { if let Some(outbound) = self.hrmp_channels_out.get(&id) { - outbound - .bytes_remaining - .checked_sub(outbound_hrmp_mod.bytes_submitted) - .ok_or(ModificationError::HrmpBytesOverflow { + outbound.bytes_remaining.checked_sub(outbound_hrmp_mod.bytes_submitted).ok_or( + ModificationError::HrmpBytesOverflow { para_id: *id, bytes_remaining: outbound.bytes_remaining, bytes_submitted: outbound_hrmp_mod.bytes_submitted, - })?; + }, + )?; outbound .messages_remaining @@ -187,16 +186,14 @@ impl Constraints { }, )?; - self - .ump_remaining_bytes - .checked_sub(modifications.ump_bytes_sent) - .ok_or(ModificationError::UmpBytesOverflow { + self.ump_remaining_bytes.checked_sub(modifications.ump_bytes_sent).ok_or( + ModificationError::UmpBytesOverflow { bytes_remaining: self.ump_remaining_bytes, bytes_submitted: modifications.ump_bytes_sent, - })?; + }, + )?; - self - .dmp_remaining_messages + self.dmp_remaining_messages .checked_sub(modifications.dmp_messages_processed) .ok_or(ModificationError::DmpMessagesUnderflow { messages_remaining: self.dmp_remaining_messages, @@ -298,10 +295,37 @@ pub struct RelayChainBlockInfo { pub struct Fragment { /// The new relay-parent. pub relay_parent: RelayChainBlockInfo, - /// The constraints associated with this relay-parent. - pub relay_parent_constraints: Constraints, + /// The constraints this fragment is operating under. + pub operating_constraints: Constraints, /// The core information about the prospective candidate. - pub prospective: ProspectiveCandidate, + pub candidate: ProspectiveCandidate, +} + +impl Fragment { + /// Produce a set of constraint modifications based on the outputs + /// of the candidate. + pub fn constraint_modifications(&self) -> ConstraintModifications { + let commitments = &self.candidate.commitments; + + ConstraintModifications { + required_parent: commitments.head_data.clone(), + hrmp_watermark: commitments.hrmp_watermark, + outbound_hrmp: { + let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); + for message in &commitments.horizontal_messages { + let record = outbound_hrmp.entry(message.recipient.clone()).or_default(); + + record.bytes_submitted += message.data.len(); + record.messages_submitted += 1; + } + + outbound_hrmp + }, + ump_messages_sent: commitments.upward_messages.len(), + ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(), + dmp_messages_processed: commitments.processed_downward_messages as _, + } + } } /// An update to outbound HRMP channels. @@ -329,6 +353,7 @@ pub struct ConstraintModifications { pub ump_bytes_sent: usize, /// The amount of DMP messages processed. pub dmp_messages_processed: usize, + // TODO [now]: code upgrade application. } impl ConstraintModifications { @@ -371,31 +396,6 @@ pub struct ProspectiveCandidate { // modifications just from a candidate. } -impl ProspectiveCandidate { - /// Produce a set of constraint modifications based on the outputs - /// of the candidate. - pub fn constraint_modifications(&self) -> ConstraintModifications { - ConstraintModifications { - required_parent: self.commitments.head_data.clone(), - hrmp_watermark: self.commitments.hrmp_watermark, - outbound_hrmp: { - let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); - for message in &self.commitments.horizontal_messages { - let record = outbound_hrmp.entry(message.recipient.clone()).or_default(); - - record.bytes_submitted += message.data.len(); - record.messages_submitted += 1; - } - - outbound_hrmp - }, - ump_messages_sent: self.commitments.upward_messages.len(), - ump_bytes_sent: self.commitments.upward_messages.iter().map(|msg| msg.len()).sum(), - dmp_messages_processed: self.commitments.processed_downward_messages as _, - } - } -} - #[cfg(test)] mod tests { use super::*; From 170fe9365f8700bf31fd6fc22242125d187eeea1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 8 Feb 2022 21:50:59 -0600 Subject: [PATCH 19/42] make linear --- .../src/inclusion_emulator/staging.rs | 82 +++++++++---------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 20d0ad70f1ae..0412ce750af1 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -287,47 +287,6 @@ pub struct RelayChainBlockInfo { pub storage_root: Hash, } -/// A parachain fragment, representing another prospective parachain block. -/// -/// This has two parts: the first is the new relay-parent and its associated limitations, -/// and the second is information about the advancement of the parachain. -#[derive(Debug, Clone, PartialEq)] -pub struct Fragment { - /// The new relay-parent. - pub relay_parent: RelayChainBlockInfo, - /// The constraints this fragment is operating under. - pub operating_constraints: Constraints, - /// The core information about the prospective candidate. - pub candidate: ProspectiveCandidate, -} - -impl Fragment { - /// Produce a set of constraint modifications based on the outputs - /// of the candidate. - pub fn constraint_modifications(&self) -> ConstraintModifications { - let commitments = &self.candidate.commitments; - - ConstraintModifications { - required_parent: commitments.head_data.clone(), - hrmp_watermark: commitments.hrmp_watermark, - outbound_hrmp: { - let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); - for message in &commitments.horizontal_messages { - let record = outbound_hrmp.entry(message.recipient.clone()).or_default(); - - record.bytes_submitted += message.data.len(); - record.messages_submitted += 1; - } - - outbound_hrmp - }, - ump_messages_sent: commitments.upward_messages.len(), - ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(), - dmp_messages_processed: commitments.processed_downward_messages as _, - } - } -} - /// An update to outbound HRMP channels. #[derive(Debug, Clone, PartialEq, Default)] pub struct OutboundHrmpChannelModification { @@ -396,6 +355,47 @@ pub struct ProspectiveCandidate { // modifications just from a candidate. } +/// A parachain fragment, representing another prospective parachain block. +/// +/// This has two parts: the first is the new relay-parent and its associated limitations, +/// and the second is information about the advancement of the parachain. +#[derive(Debug, Clone, PartialEq)] +pub struct Fragment { + /// The new relay-parent. + pub relay_parent: RelayChainBlockInfo, + /// The constraints this fragment is operating under. + pub operating_constraints: Constraints, + /// The core information about the prospective candidate. + pub candidate: ProspectiveCandidate, +} + +impl Fragment { + /// Produce a set of constraint modifications based on the outputs + /// of the candidate. + pub fn constraint_modifications(&self) -> ConstraintModifications { + let commitments = &self.candidate.commitments; + + ConstraintModifications { + required_parent: commitments.head_data.clone(), + hrmp_watermark: commitments.hrmp_watermark, + outbound_hrmp: { + let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); + for message in &commitments.horizontal_messages { + let record = outbound_hrmp.entry(message.recipient.clone()).or_default(); + + record.bytes_submitted += message.data.len(); + record.messages_submitted += 1; + } + + outbound_hrmp + }, + ump_messages_sent: commitments.upward_messages.len(), + ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(), + dmp_messages_processed: commitments.processed_downward_messages as _, + } + } +} + #[cfg(test)] mod tests { use super::*; From 6e1654275e91a732357361fb6e9ea2b457c8500c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 8 Feb 2022 22:07:54 -0600 Subject: [PATCH 20/42] add some TODOs --- node/subsystem-util/src/inclusion_emulator/staging.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 0412ce750af1..49bc6078fa01 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -396,6 +396,10 @@ impl Fragment { } } +// TODO [now]: function for cumulative modifications to produce new constraints. + +// TODO [now]: function for 'rebasing'. + #[cfg(test)] mod tests { use super::*; From f47a6e7ce33e4d8fc8d48446517b493eb23b8201 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 8 Feb 2022 22:23:58 -0600 Subject: [PATCH 21/42] remove stacking; handle code upgrades --- .../src/inclusion_emulator/staging.rs | 42 ++++++++----------- 1 file changed, 18 insertions(+), 24 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 49bc6078fa01..a5f0f45987b3 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -138,6 +138,8 @@ pub enum ModificationError { /// The amount of messages processed. messages_processed: usize, }, + /// No validation code upgrade to apply. + AppliedNonexistentCodeUpgrade, } impl Constraints { @@ -200,6 +202,10 @@ impl Constraints { messages_processed: modifications.dmp_messages_processed, })?; + if self.future_validation_code.is_none() && modifications.code_upgrade_applied { + return Err(ModificationError::AppliedNonexistentCodeUpgrade); + } + Ok(()) } @@ -272,6 +278,12 @@ impl Constraints { messages_processed: modifications.dmp_messages_processed, })?; + new.validation_code_hash = new + .future_validation_code + .take() + .ok_or(ModificationError::AppliedNonexistentCodeUpgrade)? + .1; + Ok(new) } } @@ -312,28 +324,8 @@ pub struct ConstraintModifications { pub ump_bytes_sent: usize, /// The amount of DMP messages processed. pub dmp_messages_processed: usize, - // TODO [now]: code upgrade application. -} - -impl ConstraintModifications { - /// Stack other modifications on top of these. - /// - /// This does no sanity-checking, so if `other` is garbage relative - /// to `self`, then the new value will be garbage as well. - pub fn stack(&mut self, other: &Self) { - self.required_parent = other.required_parent.clone(); - self.hrmp_watermark = other.hrmp_watermark; - - for (id, mods) in &other.outbound_hrmp { - let record = self.outbound_hrmp.entry(id.clone()).or_default(); - record.messages_submitted += mods.messages_submitted; - record.bytes_submitted += mods.bytes_submitted; - } - - self.ump_messages_sent += other.ump_messages_sent; - self.ump_bytes_sent += other.ump_bytes_sent; - self.dmp_messages_processed += other.dmp_messages_processed; - } + /// Whether a pending code upgrade has been applied. + pub code_upgrade_applied: bool, } /// The prospective candidate. @@ -351,8 +343,6 @@ pub struct ProspectiveCandidate { pub pov_hash: Hash, /// The validation code hash used by the candidate. pub validation_code_hash: ValidationCodeHash, - // TODO [now]: do code upgrades go here? if so, we can't produce - // modifications just from a candidate. } /// A parachain fragment, representing another prospective parachain block. @@ -392,6 +382,10 @@ impl Fragment { ump_messages_sent: commitments.upward_messages.len(), ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(), dmp_messages_processed: commitments.processed_downward_messages as _, + code_upgrade_applied: self.operating_constraints.future_validation_code.map_or( + false, + |(at, _)| self.relay_parent.number >= at, + ), } } } From 5dfe2bc92f70a63cae293e788093c3cb4bd8a971 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 8 Feb 2022 22:55:17 -0600 Subject: [PATCH 22/42] take `fragment` private --- .../src/inclusion_emulator/staging.rs | 38 +++++++++++++++++-- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index a5f0f45987b3..cdc48461bbec 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -345,6 +345,14 @@ pub struct ProspectiveCandidate { pub validation_code_hash: ValidationCodeHash, } +/// Kinds of errors with the validity of a fragment. +#[derive(Debug, Clone, PartialEq)] +pub enum FragmentValidityError { + /// The validation code of + ValidationCodeMismatch(ValidationCodeHash, ValidationCodeHash), + Outputs(ModificationError) +} + /// A parachain fragment, representing another prospective parachain block. /// /// This has two parts: the first is the new relay-parent and its associated limitations, @@ -352,14 +360,38 @@ pub struct ProspectiveCandidate { #[derive(Debug, Clone, PartialEq)] pub struct Fragment { /// The new relay-parent. - pub relay_parent: RelayChainBlockInfo, + relay_parent: RelayChainBlockInfo, /// The constraints this fragment is operating under. - pub operating_constraints: Constraints, + operating_constraints: Constraints, /// The core information about the prospective candidate. - pub candidate: ProspectiveCandidate, + candidate: ProspectiveCandidate, } impl Fragment { + /// Create a new fragment. + pub fn new( + relay_parent: RelayChainBlockInfo, + operating_constraints: Constraints, + candidate: ProspectiveCandidate, + ) -> Result { + unimplemented!() + } + + /// Access the relay parent information. + pub fn relay_parent(&self) -> &RelayChainBlockInfo { + &self.relay_parent + } + + /// Access the operating constraints + pub fn operating_constraints(&self) -> &Constraints { + &self.operating_constraints + } + + /// Access the underlying prospective candidate. + pub fn candidate(&self) -> &ProspectiveCandidate { + &self.candidate + } + /// Produce a set of constraint modifications based on the outputs /// of the candidate. pub fn constraint_modifications(&self) -> ConstraintModifications { From 305ee7d114e1f93eeccb28a5deccdee4d06b5319 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 8 Feb 2022 22:56:59 -0600 Subject: [PATCH 23/42] reintroduce stacking. --- .../src/inclusion_emulator/staging.rs | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index cdc48461bbec..2eaa5f19cfd7 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -328,6 +328,28 @@ pub struct ConstraintModifications { pub code_upgrade_applied: bool, } +impl ConstraintModifications { + /// Stack other modifications on top of these. + /// + /// This does no sanity-checking, so if `other` is garbage relative + /// to `self`, then the new value will be garbage as well. + pub fn stack(&mut self, other: &Self) { + self.required_parent = other.required_parent.clone(); + self.hrmp_watermark = other.hrmp_watermark; + + for (id, mods) in &other.outbound_hrmp { + let record = self.outbound_hrmp.entry(id.clone()).or_default(); + record.messages_submitted += mods.messages_submitted; + record.bytes_submitted += mods.bytes_submitted; + } + + self.ump_messages_sent += other.ump_messages_sent; + self.ump_bytes_sent += other.ump_bytes_sent; + self.dmp_messages_processed += other.dmp_messages_processed; + self.code_upgrade_applied |= other.code_upgrade_applied; + } +} + /// The prospective candidate. #[derive(Debug, Clone, PartialEq)] pub struct ProspectiveCandidate { From ecf0287424911bc5ecbf61f7460024caa171fc9d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 8 Feb 2022 23:37:56 -0600 Subject: [PATCH 24/42] fragment constructor --- .../src/inclusion_emulator/staging.rs | 100 ++++++++++++------ 1 file changed, 69 insertions(+), 31 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 2eaa5f19cfd7..b175f7f66422 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -203,7 +203,7 @@ impl Constraints { })?; if self.future_validation_code.is_none() && modifications.code_upgrade_applied { - return Err(ModificationError::AppliedNonexistentCodeUpgrade); + return Err(ModificationError::AppliedNonexistentCodeUpgrade) } Ok(()) @@ -370,9 +370,18 @@ pub struct ProspectiveCandidate { /// Kinds of errors with the validity of a fragment. #[derive(Debug, Clone, PartialEq)] pub enum FragmentValidityError { - /// The validation code of + /// The validation code of the candidate doesn't match the + /// operating constraints. + /// + /// Expected, Got ValidationCodeMismatch(ValidationCodeHash, ValidationCodeHash), - Outputs(ModificationError) + /// The persisted-validation-data doesn't match. + /// + /// Expected, Got + PersistedValidationDataMismatch(PersistedValidationData, PersistedValidationData), + /// The outputs of the candidate are invalid under the operating + /// constraints. + OutputsInvalid(ModificationError), } /// A parachain fragment, representing another prospective parachain block. @@ -387,6 +396,9 @@ pub struct Fragment { operating_constraints: Constraints, /// The core information about the prospective candidate. candidate: ProspectiveCandidate, + /// Modifications to the constraints based on the outputs of + /// the candidate. + modifications: ConstraintModifications, } impl Fragment { @@ -396,7 +408,57 @@ impl Fragment { operating_constraints: Constraints, candidate: ProspectiveCandidate, ) -> Result { - unimplemented!() + let expected_pvd = PersistedValidationData { + parent_head: operating_constraints.required_parent.clone(), + relay_parent_number: relay_parent.number, + relay_parent_storage_root: relay_parent.storage_root, + max_pov_size: operating_constraints.max_pov_size as u32, + }; + + if expected_pvd != candidate.persisted_validation_data { + return Err(FragmentValidityError::PersistedValidationDataMismatch( + expected_pvd, + candidate.persisted_validation_data, + )) + } + + if operating_constraints.validation_code_hash != candidate.validation_code_hash { + return Err(FragmentValidityError::ValidationCodeMismatch( + operating_constraints.validation_code_hash, + candidate.validation_code_hash, + )) + } + + let modifications = { + let commitments = &candidate.commitments; + ConstraintModifications { + required_parent: commitments.head_data.clone(), + hrmp_watermark: commitments.hrmp_watermark, + outbound_hrmp: { + let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); + for message in &commitments.horizontal_messages { + let record = outbound_hrmp.entry(message.recipient.clone()).or_default(); + + record.bytes_submitted += message.data.len(); + record.messages_submitted += 1; + } + + outbound_hrmp + }, + ump_messages_sent: commitments.upward_messages.len(), + ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(), + dmp_messages_processed: commitments.processed_downward_messages as _, + code_upgrade_applied: operating_constraints + .future_validation_code + .map_or(false, |(at, _)| relay_parent.number >= at), + } + }; + + operating_constraints + .check_modifications(&modifications) + .map_err(FragmentValidityError::OutputsInvalid)?; + + Ok(Fragment { relay_parent, operating_constraints, candidate, modifications }) } /// Access the relay parent information. @@ -414,33 +476,9 @@ impl Fragment { &self.candidate } - /// Produce a set of constraint modifications based on the outputs - /// of the candidate. - pub fn constraint_modifications(&self) -> ConstraintModifications { - let commitments = &self.candidate.commitments; - - ConstraintModifications { - required_parent: commitments.head_data.clone(), - hrmp_watermark: commitments.hrmp_watermark, - outbound_hrmp: { - let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); - for message in &commitments.horizontal_messages { - let record = outbound_hrmp.entry(message.recipient.clone()).or_default(); - - record.bytes_submitted += message.data.len(); - record.messages_submitted += 1; - } - - outbound_hrmp - }, - ump_messages_sent: commitments.upward_messages.len(), - ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(), - dmp_messages_processed: commitments.processed_downward_messages as _, - code_upgrade_applied: self.operating_constraints.future_validation_code.map_or( - false, - |(at, _)| self.relay_parent.number >= at, - ), - } + /// Modifications to constraints based on the outputs of the candidate. + pub fn constraint_modifications(&self) -> &ConstraintModifications { + &self.modifications } } From 27d380ba0e1744cf6d68265a6d378c3bfbca19c8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 9 Feb 2022 15:28:24 -0600 Subject: [PATCH 25/42] add TODO --- node/subsystem-util/src/inclusion_emulator/staging.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index b175f7f66422..45f3fa603a67 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -77,6 +77,7 @@ pub struct Constraints { pub hrmp_channels_out: HashMap, /// The maximum Proof-of-Validity size allowed, in bytes. pub max_pov_size: usize, + // TODO [now]: max code size? /// The maximum number of HRMP messages allowed per candidate. pub max_hrmp_num_per_candidate: usize, /// The required parent head-data of the parachain. From a5be2ebe07a53cdded9e3a8df0eb52a25496ae68 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 10 Feb 2022 16:03:58 -0600 Subject: [PATCH 26/42] allow validating fragments against future constraints --- .../src/inclusion_emulator/staging.rs | 186 ++++++++++++------ 1 file changed, 128 insertions(+), 58 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 45f3fa603a67..737481e8f51d 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -149,14 +149,16 @@ impl Constraints { &self, modifications: &ConstraintModifications, ) -> Result<(), ModificationError> { - if self - .hrmp_inbound - .valid_watermarks - .iter() - .position(|w| w == &modifications.hrmp_watermark) - .is_none() - { - return Err(ModificationError::DisallowedHrmpWatermark(modifications.hrmp_watermark)) + if let Some(hrmp_watermark) = modifications.hrmp_watermark { + if self + .hrmp_inbound + .valid_watermarks + .iter() + .position(|w| w == &hrmp_watermark) + .is_none() + { + return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)) + } } for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { @@ -218,18 +220,23 @@ impl Constraints { ) -> Result { let mut new = self.clone(); - new.required_parent = modifications.required_parent.clone(); - match new - .hrmp_inbound - .valid_watermarks - .iter() - .position(|w| w == &modifications.hrmp_watermark) - { - Some(pos) => { - let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1); - }, - None => - return Err(ModificationError::DisallowedHrmpWatermark(modifications.hrmp_watermark)), + if let Some(required_parent) = modifications.required_parent.as_ref() { + new.required_parent = required_parent.clone(); + } + + if let Some(hrmp_watermark) = modifications.hrmp_watermark { + match new + .hrmp_inbound + .valid_watermarks + .iter() + .position(|w| w == &hrmp_watermark) + { + Some(pos) => { + let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1); + }, + None => + return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)), + } } for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { @@ -279,11 +286,13 @@ impl Constraints { messages_processed: modifications.dmp_messages_processed, })?; - new.validation_code_hash = new - .future_validation_code - .take() - .ok_or(ModificationError::AppliedNonexistentCodeUpgrade)? - .1; + if modifications.code_upgrade_applied { + new.validation_code_hash = new + .future_validation_code + .take() + .ok_or(ModificationError::AppliedNonexistentCodeUpgrade)? + .1; + } Ok(new) } @@ -313,10 +322,9 @@ pub struct OutboundHrmpChannelModification { #[derive(Debug, Clone, PartialEq)] pub struct ConstraintModifications { /// The required parent head to build upon. - /// `None` indicates 'unmodified'. - pub required_parent: HeadData, + pub required_parent: Option, /// The new HRMP watermark - pub hrmp_watermark: BlockNumber, + pub hrmp_watermark: Option, /// Outbound HRMP channel modifications. pub outbound_hrmp: HashMap, /// The amount of UMP messages sent. @@ -330,13 +338,33 @@ pub struct ConstraintModifications { } impl ConstraintModifications { + /// The 'identity' modifications: these can be applied to + /// any constraints and yield the exact same result. + pub fn identity() -> Self { + ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: HashMap::new(), + ump_messages_sent: 0, + ump_bytes_sent: 0, + dmp_messages_processed: 0, + code_upgrade_applied: false, + } + } + /// Stack other modifications on top of these. /// /// This does no sanity-checking, so if `other` is garbage relative /// to `self`, then the new value will be garbage as well. + /// + /// This is an addition which is not commutative. pub fn stack(&mut self, other: &Self) { - self.required_parent = other.required_parent.clone(); - self.hrmp_watermark = other.hrmp_watermark; + if let Some(ref new_parent) = other.required_parent { + self.required_parent = Some(new_parent.clone()); + } + if let Some(ref new_hrmp_watermark) = other.hrmp_watermark { + self.hrmp_watermark = Some(new_hrmp_watermark.clone()); + } for (id, mods) in &other.outbound_hrmp { let record = self.outbound_hrmp.entry(id.clone()).or_default(); @@ -404,37 +432,19 @@ pub struct Fragment { impl Fragment { /// Create a new fragment. + /// + /// This fails if the fragment isn't in line with the operating + /// constraints. pub fn new( relay_parent: RelayChainBlockInfo, operating_constraints: Constraints, candidate: ProspectiveCandidate, ) -> Result { - let expected_pvd = PersistedValidationData { - parent_head: operating_constraints.required_parent.clone(), - relay_parent_number: relay_parent.number, - relay_parent_storage_root: relay_parent.storage_root, - max_pov_size: operating_constraints.max_pov_size as u32, - }; - - if expected_pvd != candidate.persisted_validation_data { - return Err(FragmentValidityError::PersistedValidationDataMismatch( - expected_pvd, - candidate.persisted_validation_data, - )) - } - - if operating_constraints.validation_code_hash != candidate.validation_code_hash { - return Err(FragmentValidityError::ValidationCodeMismatch( - operating_constraints.validation_code_hash, - candidate.validation_code_hash, - )) - } - let modifications = { let commitments = &candidate.commitments; ConstraintModifications { - required_parent: commitments.head_data.clone(), - hrmp_watermark: commitments.hrmp_watermark, + required_parent: Some(commitments.head_data.clone()), + hrmp_watermark: Some(commitments.hrmp_watermark), outbound_hrmp: { let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); for message in &commitments.horizontal_messages { @@ -455,9 +465,12 @@ impl Fragment { } }; - operating_constraints - .check_modifications(&modifications) - .map_err(FragmentValidityError::OutputsInvalid)?; + validate_against_constraints( + &operating_constraints, + &relay_parent, + &candidate, + &modifications, + )?; Ok(Fragment { relay_parent, operating_constraints, candidate, modifications }) } @@ -481,11 +494,68 @@ impl Fragment { pub fn constraint_modifications(&self) -> &ConstraintModifications { &self.modifications } + + /// Validate this fragment against some set of constraints + /// instead of the operating constraints. + pub fn validate_against_constraints( + &self, + constraints: &Constraints, + ) -> Result<(), FragmentValidityError> { + validate_against_constraints( + constraints, + &self.relay_parent, + &self.candidate, + &self.modifications, + ) + } } -// TODO [now]: function for cumulative modifications to produce new constraints. +fn validate_against_constraints( + constraints: &Constraints, + relay_parent: &RelayChainBlockInfo, + candidate: &ProspectiveCandidate, + modifications: &ConstraintModifications, +) -> Result<(), FragmentValidityError> { + let expected_pvd = PersistedValidationData { + parent_head: constraints.required_parent.clone(), + relay_parent_number: relay_parent.number, + relay_parent_storage_root: relay_parent.storage_root, + max_pov_size: constraints.max_pov_size as u32, + }; + + if expected_pvd != candidate.persisted_validation_data { + return Err(FragmentValidityError::PersistedValidationDataMismatch( + expected_pvd, + candidate.persisted_validation_data.clone(), + )) + } + + if constraints.validation_code_hash != candidate.validation_code_hash { + return Err(FragmentValidityError::ValidationCodeMismatch( + constraints.validation_code_hash, + candidate.validation_code_hash, + )) + } + + constraints + .check_modifications(&modifications) + .map_err(FragmentValidityError::OutputsInvalid) +} -// TODO [now]: function for 'rebasing'. +// TODO [now]: move this to docs. +// When we get a new relay-chain block, we'll need to prune the prospective chain trees to only those that are compatible. +// This is because we need to know which prospective chains might be valid in the descendents of our new blocks. +// +// The operating constraints of our predictions won't change, because those are based on the +// relay-parents. Therefore fragments don't actually need to be updated. +// +// But instead what we need to do is +// 1. Prune off all the prospective chains which aren't based on the parent head-data at the tip of the relay-chain. +// More accurately, we need to prune off all the prospective chains which aren't based on the constraints at the +// tip of the relay-chain. This includes prospective chains which are offboarded i.e. the constraints don't exist. +// 2. Do we need to do anything that doesn't relate to the tail? We shouldn't need to update operating +// constraints. So as long as we can verify that one fragment directly follows another and that its parent +// was valid under the previous constraints, then everything seems fine. #[cfg(test)] mod tests { From 9c31cc00238d7bc8ec53a44bf4e5e35c82a41d6a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 10 Feb 2022 16:58:30 -0600 Subject: [PATCH 27/42] docs --- .../src/inclusion_emulator/staging.rs | 80 +++++++++++++++---- 1 file changed, 66 insertions(+), 14 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 737481e8f51d..70b0fb53abfe 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -37,6 +37,70 @@ //! //! As such, [`Fragment`]s are often, but not always constructed in such a way that they are //! invalid at first and become valid later on, as the relay chain grows. +//! +//! # Usage +//! +//! It's expected that the users of this module will be building up trees of +//! [`Fragment`]s and consistently pruning and adding to the tree. +//! +//! ## Operating Constraints +//! +//! The *operating constraints* of a `Fragment` are the constraints with which that fragment +//! was intended to comply. The operating constraints are defined as the base constraints +//! of the relay-parent of the fragment modified by the cumulative modifications of all +//! fragments between the relay-parent and the current fragment. +//! +//! What the operating constraints are, in practice, is a prediction about the state of the +//! relay-chain in the future. The relay-chain is aware of some current state, and we want to +//! make an intelligent prediction about what'll be accepted in the future based on +//! prior fragments that also exist off-chain. +//! +//! ## Fragment Trees +//! +//! As the relay-chain grows, some predictions come true and others come false. +//! And new predictions get made. These three changes correspond distinctly to the +//! 3 primary operations on fragment trees. +//! +//! A fragment tree is a mental model for thinking about a forking series of predictions +//! about a single parachain. There may be one or more fragment trees per parachain. +//! +//! In expectation, most parachains will have a plausibly-unique authorship method +//! which means that they should really be much closer to fragment-chains, maybe +//! maybe with an occasional fork. +//! +//! Avoiding fragment-tree blowup is beyond the scope of this module. +//! +//! ### Pruning Fragment Trees +//! +//! When the relay-chain advances, we want to compare the new constraints +//! of that relay-parent to the roots of the fragment trees we have. There are 3 cases. +//! +//! 1. The root fragment is still valid under the new constraints. In this case, we do nothing. +//! This is the "prediction still uncertain" case. +//! 2. The root fragment is invalid under the new constraints because it has been subsumed by the relay-chain. +//! in this case, we can discard the root and split & re-root the fragment tree +//! under its descendents and compare to the new constraints again. +//! This is the "prediction came true" case. +//! 3. The root fragment is invalid under the new constraints because a competing parachain block has been included +//! or it would never be accepted for some other reason. In this case we can discard the entire +//! fragment tree. +//! This is the "prediction came false" case. +//! +//! This is all a bit of a simplification because it assumes that the relay-chain advances without +//! forks and is finalized instantly. In practice, the set of fragment-trees needs to be observable +//! from the perspective of a few different possible forks of the relay-chain and not pruned +//! too eagerly. +//! +//! Note that the fragments themselves don't need to change and the only thing we care about +//! is whether the predictions they represent are still valid. +//! +//! ### Extending Fragment Trees +//! +//! As predictions fade into the past, new ones should be stacked on top. +//! +//! Every new relay-chain block is an opportunity to make a new prediction about the future. +//! higher-level logic should select the leaves of the fragment-trees to build upon or whether +//! to create a new fragment-tree. use polkadot_primitives::v2::{ BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, @@ -65,6 +129,7 @@ pub struct OutboundHrmpChannelLimitations { /// parachain, which should be apparent from usage. #[derive(Debug, Clone, PartialEq)] pub struct Constraints { + // TODO [now]: Min relay-parent number? /// The amount of UMP messages remaining. pub ump_remaining: usize, /// The amount of UMP bytes remaining. @@ -542,20 +607,7 @@ fn validate_against_constraints( .map_err(FragmentValidityError::OutputsInvalid) } -// TODO [now]: move this to docs. -// When we get a new relay-chain block, we'll need to prune the prospective chain trees to only those that are compatible. -// This is because we need to know which prospective chains might be valid in the descendents of our new blocks. -// -// The operating constraints of our predictions won't change, because those are based on the -// relay-parents. Therefore fragments don't actually need to be updated. -// -// But instead what we need to do is -// 1. Prune off all the prospective chains which aren't based on the parent head-data at the tip of the relay-chain. -// More accurately, we need to prune off all the prospective chains which aren't based on the constraints at the -// tip of the relay-chain. This includes prospective chains which are offboarded i.e. the constraints don't exist. -// 2. Do we need to do anything that doesn't relate to the tail? We shouldn't need to update operating -// constraints. So as long as we can verify that one fragment directly follows another and that its parent -// was valid under the previous constraints, then everything seems fine. +// TODO [now]: fn for loading constraints from runtime. #[cfg(test)] mod tests { From 9f8adc96b96d5632425be6c7c37f0a0404d43416 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 10 Feb 2022 17:35:13 -0600 Subject: [PATCH 28/42] relay-parent number and min code size checks --- .../src/inclusion_emulator/staging.rs | 35 +++++++++++++++---- 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 70b0fb53abfe..8cb9a62f61d8 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -129,7 +129,12 @@ pub struct OutboundHrmpChannelLimitations { /// parachain, which should be apparent from usage. #[derive(Debug, Clone, PartialEq)] pub struct Constraints { - // TODO [now]: Min relay-parent number? + /// The minimum relay-parent number accepted under these constraints. + pub min_relay_parent_number: BlockNumber, + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: usize, + /// The maximum new validation code size allowed, in bytes. + pub max_code_size: usize, /// The amount of UMP messages remaining. pub ump_remaining: usize, /// The amount of UMP bytes remaining. @@ -140,9 +145,6 @@ pub struct Constraints { pub hrmp_inbound: InboundHrmpLimitations, /// The limitations of all registered outbound HRMP channels. pub hrmp_channels_out: HashMap, - /// The maximum Proof-of-Validity size allowed, in bytes. - pub max_pov_size: usize, - // TODO [now]: max code size? /// The maximum number of HRMP messages allowed per candidate. pub max_hrmp_num_per_candidate: usize, /// The required parent head-data of the parachain. @@ -476,6 +478,14 @@ pub enum FragmentValidityError { /// The outputs of the candidate are invalid under the operating /// constraints. OutputsInvalid(ModificationError), + /// New validation code size too big. + /// + /// Max allowed, new. + CodeSizeTooLarge(usize, usize), + /// Relay parent too old. + /// + /// Min allowed, current. + RelayParentTooOld(BlockNumber, BlockNumber), } /// A parachain fragment, representing another prospective parachain block. @@ -602,13 +612,26 @@ fn validate_against_constraints( )) } + if relay_parent.number < constraints.min_relay_parent_number { + return Err(FragmentValidityError::RelayParentTooOld( + constraints.min_relay_parent_number, + relay_parent.number, + )); + } + + let announced_code_size = candidate.commitments.new_validation_code.as_ref().map_or(0, |code| code.0.len()); + if announced_code_size > constraints.max_code_size { + return Err(FragmentValidityError::CodeSizeTooLarge( + constraints.max_code_size, + announced_code_size, + )); + } + constraints .check_modifications(&modifications) .map_err(FragmentValidityError::OutputsInvalid) } -// TODO [now]: fn for loading constraints from runtime. - #[cfg(test)] mod tests { use super::*; From 6fe064ced6d52cc956fdf933f48085a5b7fbbae7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 10 Feb 2022 17:38:31 -0600 Subject: [PATCH 29/42] check code upgrade restriction --- node/subsystem-util/src/inclusion_emulator/staging.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 8cb9a62f61d8..42864361c01c 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -154,7 +154,7 @@ pub struct Constraints { /// The go-ahead signal as-of this parachain. pub go_ahead: UpgradeGoAhead, /// The code upgrade restriction signal as-of this parachain. - pub upgrade_restriction: UpgradeRestriction, + pub upgrade_restriction: Option, /// The future validation code hash, if any, and at what relay-parent /// number the upgrade would be minimally applied. pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, @@ -486,6 +486,8 @@ pub enum FragmentValidityError { /// /// Min allowed, current. RelayParentTooOld(BlockNumber, BlockNumber), + /// Code upgrade not allowed. + CodeUpgradeRestricted, } /// A parachain fragment, representing another prospective parachain block. @@ -619,6 +621,13 @@ fn validate_against_constraints( )); } + if candidate.commitments.new_validation_code.is_some() { + match constraints.upgrade_restriction { + None => {} + Some(UpgradeRestriction::Present) => return Err(FragmentValidityError::CodeUpgradeRestricted), + } + } + let announced_code_size = candidate.commitments.new_validation_code.as_ref().map_or(0, |code| code.0.len()); if announced_code_size > constraints.max_code_size { return Err(FragmentValidityError::CodeSizeTooLarge( From 7367e654ace6a1cf5235622fa91a067fcae8c857 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 10 Feb 2022 17:44:08 -0600 Subject: [PATCH 30/42] check max hrmp per candidate --- .../src/inclusion_emulator/staging.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 42864361c01c..e915956675e2 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -486,6 +486,13 @@ pub enum FragmentValidityError { /// /// Min allowed, current. RelayParentTooOld(BlockNumber, BlockNumber), + /// Too many messages submitted to all HRMP channels. + HrmpMessagesPerCandidateOverflow { + /// The amount of messages a single candidate can submit. + messages_allowed: usize, + /// The amount of messages sent to all HRMP channels. + messages_submitted: usize, + }, /// Code upgrade not allowed. CodeUpgradeRestricted, } @@ -636,6 +643,13 @@ fn validate_against_constraints( )); } + if candidate.commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate { + return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { + messages_allowed: constraints.max_hrmp_num_per_candidate, + messages_submitted: candidate.commitments.horizontal_messages.len(), + }); + } + constraints .check_modifications(&modifications) .map_err(FragmentValidityError::OutputsInvalid) From 9dd5cb27ec31761c1e714a29806a7fa8810e1aca Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 10 Feb 2022 17:44:28 -0600 Subject: [PATCH 31/42] fmt --- .../src/inclusion_emulator/staging.rs | 27 +++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index e915956675e2..627f030f102c 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -292,17 +292,11 @@ impl Constraints { } if let Some(hrmp_watermark) = modifications.hrmp_watermark { - match new - .hrmp_inbound - .valid_watermarks - .iter() - .position(|w| w == &hrmp_watermark) - { + match new.hrmp_inbound.valid_watermarks.iter().position(|w| w == &hrmp_watermark) { Some(pos) => { let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1); }, - None => - return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)), + None => return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)), } } @@ -625,29 +619,34 @@ fn validate_against_constraints( return Err(FragmentValidityError::RelayParentTooOld( constraints.min_relay_parent_number, relay_parent.number, - )); + )) } if candidate.commitments.new_validation_code.is_some() { match constraints.upgrade_restriction { - None => {} - Some(UpgradeRestriction::Present) => return Err(FragmentValidityError::CodeUpgradeRestricted), + None => {}, + Some(UpgradeRestriction::Present) => + return Err(FragmentValidityError::CodeUpgradeRestricted), } } - let announced_code_size = candidate.commitments.new_validation_code.as_ref().map_or(0, |code| code.0.len()); + let announced_code_size = candidate + .commitments + .new_validation_code + .as_ref() + .map_or(0, |code| code.0.len()); if announced_code_size > constraints.max_code_size { return Err(FragmentValidityError::CodeSizeTooLarge( constraints.max_code_size, announced_code_size, - )); + )) } if candidate.commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate { return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { messages_allowed: constraints.max_hrmp_num_per_candidate, messages_submitted: candidate.commitments.horizontal_messages.len(), - }); + }) } constraints From a7d4347c66efbdac607e67bc01f2f996584ffcf2 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 10 Feb 2022 17:47:10 -0600 Subject: [PATCH 32/42] remove GoAhead logic because it wasn't helpful --- node/subsystem-util/src/inclusion_emulator/staging.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 627f030f102c..2feb81c07b9d 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -104,7 +104,7 @@ use polkadot_primitives::v2::{ BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, - PersistedValidationData, UpgradeGoAhead, UpgradeRestriction, ValidationCodeHash, + PersistedValidationData, UpgradeRestriction, ValidationCodeHash, }; use std::collections::HashMap; @@ -151,8 +151,6 @@ pub struct Constraints { pub required_parent: HeadData, /// The expected validation-code-hash of this parachain. pub validation_code_hash: ValidationCodeHash, - /// The go-ahead signal as-of this parachain. - pub go_ahead: UpgradeGoAhead, /// The code upgrade restriction signal as-of this parachain. pub upgrade_restriction: Option, /// The future validation code hash, if any, and at what relay-parent @@ -635,6 +633,7 @@ fn validate_against_constraints( .new_validation_code .as_ref() .map_or(0, |code| code.0.len()); + if announced_code_size > constraints.max_code_size { return Err(FragmentValidityError::CodeSizeTooLarge( constraints.max_code_size, From 87fd692d97f4c1d4956232409b3ac8a10416d6d7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 10 Feb 2022 17:52:18 -0600 Subject: [PATCH 33/42] docs on code upgrade failure --- node/subsystem-util/src/inclusion_emulator/staging.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 2feb81c07b9d..8104e6a11d3b 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -101,6 +101,17 @@ //! Every new relay-chain block is an opportunity to make a new prediction about the future. //! higher-level logic should select the leaves of the fragment-trees to build upon or whether //! to create a new fragment-tree. +//! +//! ### Code Upgrades +//! +//! Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade scheduling +//! logic is very path-dependent and intricate so we just assume that code upgrades +//! can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep, +//! in practice and code upgrades are fairly rare. So what's likely to happen around code +//! upgrades is that the entire fragment-tree has to get discarded at some point. +//! +//! That means a few blocks of execution time lost, which is not a big deal for code upgrades +//! in practice at most once every few weeks. use polkadot_primitives::v2::{ BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, From 85170dd2b529ce8254529641061f0eb0f43068b1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 10 Feb 2022 18:33:03 -0600 Subject: [PATCH 34/42] test stacking --- .../src/inclusion_emulator/staging.rs | 111 +++++++++++++++++- 1 file changed, 107 insertions(+), 4 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 8104e6a11d3b..0d34ff19e0af 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -450,6 +450,12 @@ impl ConstraintModifications { } /// The prospective candidate. +/// +/// This comprises the key information that represent a candidate +/// without pinning it to a particular session. For example, everything +/// to do with the collator's signature and commitments are represented +/// here. But the erasure-root is not. This means that prospective candidates +/// are unlinked from all sessions. #[derive(Debug, Clone, PartialEq)] pub struct ProspectiveCandidate { /// The commitments to the output of the execution. @@ -502,8 +508,8 @@ pub enum FragmentValidityError { /// A parachain fragment, representing another prospective parachain block. /// -/// This has two parts: the first is the new relay-parent and its associated limitations, -/// and the second is information about the advancement of the parachain. +/// This is a type which guarantees that the candidate is valid under the +/// operating constraints. #[derive(Debug, Clone, PartialEq)] pub struct Fragment { /// The new relay-parent. @@ -521,7 +527,8 @@ impl Fragment { /// Create a new fragment. /// /// This fails if the fragment isn't in line with the operating - /// constraints. + /// constraints. That is, either its inputs or its outputs fail + /// checks against the constraints. pub fn new( relay_parent: RelayChainBlockInfo, operating_constraints: Constraints, @@ -668,7 +675,103 @@ fn validate_against_constraints( mod tests { use super::*; - // TODO [now] Stacking modifications + #[test] + fn stack_modifications() { + let para_a = ParaId::from(1u32); + let para_b = ParaId::from(2u32); + let para_c = ParaId::from(3u32); + + let a = ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: { + let mut map = HashMap::new(); + map.insert(para_a, OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }); + + map.insert(para_b, OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }); + + map + }, + ump_messages_sent: 6, + ump_bytes_sent: 1000, + dmp_messages_processed: 5, + code_upgrade_applied: true, + }; + + let b = ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: { + let mut map = HashMap::new(); + map.insert(para_b, OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }); + + map.insert(para_c, OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }); + + map + }, + ump_messages_sent: 6, + ump_bytes_sent: 1000, + dmp_messages_processed: 5, + code_upgrade_applied: true, + }; + + let mut c = a.clone(); + c.stack(&b); + + assert_eq!( + c, + ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: { + let mut map = HashMap::new(); + map.insert(para_a, OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }); + + map.insert(para_b, OutboundHrmpChannelModification { + bytes_submitted: 200, + messages_submitted: 10, + }); + + map.insert(para_c, OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }); + + map + }, + ump_messages_sent: 12, + ump_bytes_sent: 2000, + dmp_messages_processed: 10, + code_upgrade_applied: true, + }, + ); + + let mut d = ConstraintModifications::identity(); + d.stack(&a); + d.stack(&b); + + assert_eq!(c, d); + } // TODO [now] checking outputs against constraints. + + // TODO [now] checking fragments against constraints. + + // TODO [now] checking modifications from fragments are + // produced correctly. } From a283a14e2080c969e9c8898bf78aaedf719add70 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 10 Feb 2022 18:53:41 -0600 Subject: [PATCH 35/42] test modifications against constraints --- .../src/inclusion_emulator/staging.rs | 237 +++++++++++++++++- 1 file changed, 236 insertions(+), 1 deletion(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 0d34ff19e0af..69f0d3b41120 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -113,7 +113,7 @@ //! That means a few blocks of execution time lost, which is not a big deal for code upgrades //! in practice at most once every few weeks. -use polkadot_primitives::v2::{ +use polkadot_primitives::vstaging::{ BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, PersistedValidationData, UpgradeRestriction, ValidationCodeHash, }; @@ -674,6 +674,7 @@ fn validate_against_constraints( #[cfg(test)] mod tests { use super::*; + use polkadot_primitives::vstaging::ValidationCode; #[test] fn stack_modifications() { @@ -768,6 +769,240 @@ mod tests { assert_eq!(c, d); } + fn make_constraints() -> Constraints { + let para_a = ParaId::from(1u32); + let para_b = ParaId::from(2u32); + let para_c = ParaId::from(3u32); + + Constraints { + min_relay_parent_number: 5, + max_pov_size: 1000, + max_code_size: 1000, + ump_remaining: 10, + ump_remaining_bytes: 1024, + dmp_remaining_messages: 5, + hrmp_inbound: InboundHrmpLimitations { + valid_watermarks: vec![6, 8], + }, + hrmp_channels_out: { + let mut map = HashMap::new(); + + map.insert(para_a, OutboundHrmpChannelLimitations { + messages_remaining: 5, + bytes_remaining: 512, + }); + + map.insert(para_b, OutboundHrmpChannelLimitations { + messages_remaining: 10, + bytes_remaining: 1024, + }); + + map.insert(para_c, OutboundHrmpChannelLimitations { + messages_remaining: 1, + bytes_remaining: 128, + }); + + map + }, + max_hrmp_num_per_candidate: 5, + required_parent: HeadData::from(vec![1, 2, 3]), + validation_code_hash: ValidationCode(vec![4, 5, 6]).hash(), + upgrade_restriction: None, + future_validation_code: None, + } + } + + #[test] + fn constraints_disallowed_watermark() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.hrmp_watermark = Some(7); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::DisallowedHrmpWatermark(7)), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::DisallowedHrmpWatermark(7)), + ); + } + + #[test] + fn constraints_no_such_hrmp_channel() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + let bad_para = ParaId::from(100u32); + modifications.outbound_hrmp.insert( + bad_para, + OutboundHrmpChannelModification { + bytes_submitted: 0, + messages_submitted: 0, + }, + ); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::NoSuchHrmpChannel(bad_para)), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::NoSuchHrmpChannel(bad_para)), + ); + } + + #[test] + fn constraints_hrmp_messages_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + let para_a = ParaId::from(1u32); + modifications.outbound_hrmp.insert( + para_a, + OutboundHrmpChannelModification { + bytes_submitted: 0, + messages_submitted: 6, + }, + ); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::HrmpMessagesOverflow { + para_id: para_a, + messages_remaining: 5, + messages_submitted: 6, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::HrmpMessagesOverflow { + para_id: para_a, + messages_remaining: 5, + messages_submitted: 6, + }), + ); + } + + #[test] + fn constraints_hrmp_bytes_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + let para_a = ParaId::from(1u32); + modifications.outbound_hrmp.insert( + para_a, + OutboundHrmpChannelModification { + bytes_submitted: 513, + messages_submitted: 1, + }, + ); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::HrmpBytesOverflow { + para_id: para_a, + bytes_remaining: 512, + bytes_submitted: 513, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::HrmpBytesOverflow { + para_id: para_a, + bytes_remaining: 512, + bytes_submitted: 513, + }), + ); + } + + #[test] + fn constraints_ump_messages_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.ump_messages_sent = 11; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::UmpMessagesOverflow { + messages_remaining: 10, + messages_submitted: 11, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::UmpMessagesOverflow { + messages_remaining: 10, + messages_submitted: 11, + }), + ); + } + + #[test] + fn constraints_ump_bytes_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.ump_bytes_sent = 1025; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::UmpBytesOverflow { + bytes_remaining: 1024, + bytes_submitted: 1025, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::UmpBytesOverflow { + bytes_remaining: 1024, + bytes_submitted: 1025, + }), + ); + } + + #[test] + fn constraints_dmp_messages() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.dmp_messages_processed = 6; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::DmpMessagesUnderflow { + messages_remaining: 5, + messages_processed: 6, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::DmpMessagesUnderflow { + messages_remaining: 5, + messages_processed: 6, + }), + ); + } + + #[test] + fn constraints_nonexistent_code_upgrade() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.code_upgrade_applied = true; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::AppliedNonexistentCodeUpgrade), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::AppliedNonexistentCodeUpgrade), + ); + } + // TODO [now] checking outputs against constraints. // TODO [now] checking fragments against constraints. From 4ea280b42e0bcbf220bc351e142dcf722773ec69 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 10 Feb 2022 18:53:58 -0600 Subject: [PATCH 36/42] fmt --- .../src/inclusion_emulator/staging.rs | 115 +++++++++--------- 1 file changed, 58 insertions(+), 57 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 69f0d3b41120..8cb3a02170b6 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -687,15 +687,15 @@ mod tests { hrmp_watermark: None, outbound_hrmp: { let mut map = HashMap::new(); - map.insert(para_a, OutboundHrmpChannelModification { - bytes_submitted: 100, - messages_submitted: 5, - }); + map.insert( + para_a, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); - map.insert(para_b, OutboundHrmpChannelModification { - bytes_submitted: 100, - messages_submitted: 5, - }); + map.insert( + para_b, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); map }, @@ -710,15 +710,15 @@ mod tests { hrmp_watermark: None, outbound_hrmp: { let mut map = HashMap::new(); - map.insert(para_b, OutboundHrmpChannelModification { - bytes_submitted: 100, - messages_submitted: 5, - }); + map.insert( + para_b, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); - map.insert(para_c, OutboundHrmpChannelModification { - bytes_submitted: 100, - messages_submitted: 5, - }); + map.insert( + para_c, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); map }, @@ -738,20 +738,29 @@ mod tests { hrmp_watermark: None, outbound_hrmp: { let mut map = HashMap::new(); - map.insert(para_a, OutboundHrmpChannelModification { - bytes_submitted: 100, - messages_submitted: 5, - }); - - map.insert(para_b, OutboundHrmpChannelModification { - bytes_submitted: 200, - messages_submitted: 10, - }); - - map.insert(para_c, OutboundHrmpChannelModification { - bytes_submitted: 100, - messages_submitted: 5, - }); + map.insert( + para_a, + OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }, + ); + + map.insert( + para_b, + OutboundHrmpChannelModification { + bytes_submitted: 200, + messages_submitted: 10, + }, + ); + + map.insert( + para_c, + OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }, + ); map }, @@ -781,26 +790,27 @@ mod tests { ump_remaining: 10, ump_remaining_bytes: 1024, dmp_remaining_messages: 5, - hrmp_inbound: InboundHrmpLimitations { - valid_watermarks: vec![6, 8], - }, + hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![6, 8] }, hrmp_channels_out: { let mut map = HashMap::new(); - map.insert(para_a, OutboundHrmpChannelLimitations { - messages_remaining: 5, - bytes_remaining: 512, - }); + map.insert( + para_a, + OutboundHrmpChannelLimitations { messages_remaining: 5, bytes_remaining: 512 }, + ); - map.insert(para_b, OutboundHrmpChannelLimitations { - messages_remaining: 10, - bytes_remaining: 1024, - }); + map.insert( + para_b, + OutboundHrmpChannelLimitations { + messages_remaining: 10, + bytes_remaining: 1024, + }, + ); - map.insert(para_c, OutboundHrmpChannelLimitations { - messages_remaining: 1, - bytes_remaining: 128, - }); + map.insert( + para_c, + OutboundHrmpChannelLimitations { messages_remaining: 1, bytes_remaining: 128 }, + ); map }, @@ -836,10 +846,7 @@ mod tests { let bad_para = ParaId::from(100u32); modifications.outbound_hrmp.insert( bad_para, - OutboundHrmpChannelModification { - bytes_submitted: 0, - messages_submitted: 0, - }, + OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 0 }, ); assert_eq!( @@ -860,10 +867,7 @@ mod tests { let para_a = ParaId::from(1u32); modifications.outbound_hrmp.insert( para_a, - OutboundHrmpChannelModification { - bytes_submitted: 0, - messages_submitted: 6, - }, + OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 6 }, ); assert_eq!( @@ -892,10 +896,7 @@ mod tests { let para_a = ParaId::from(1u32); modifications.outbound_hrmp.insert( para_a, - OutboundHrmpChannelModification { - bytes_submitted: 513, - messages_submitted: 1, - }, + OutboundHrmpChannelModification { bytes_submitted: 513, messages_submitted: 1 }, ); assert_eq!( From 22251e4731b2ad5f37a3806fef03f38a1bf302a7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 11 Feb 2022 16:52:36 -0600 Subject: [PATCH 37/42] test fragments --- node/subsystem-util/Cargo.toml | 1 - .../src/inclusion_emulator/staging.rs | 191 +++++++++++++++++- 2 files changed, 190 insertions(+), 2 deletions(-) diff --git a/node/subsystem-util/Cargo.toml b/node/subsystem-util/Cargo.toml index cf22f7916132..2d1688ee0dcf 100644 --- a/node/subsystem-util/Cargo.toml +++ b/node/subsystem-util/Cargo.toml @@ -38,4 +38,3 @@ log = "0.4.13" polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } lazy_static = "1.4.0" polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } - diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 8cb3a02170b6..f795daa34279 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -529,6 +529,11 @@ impl Fragment { /// This fails if the fragment isn't in line with the operating /// constraints. That is, either its inputs or its outputs fail /// checks against the constraints. + /// + /// This doesn't check that the collator signature is valid or + /// whether the PoV is large enough or whether the hrmp messages + /// are in ascending order and non-duplicate + // TODO [now]: maybe it should. pub fn new( relay_parent: RelayChainBlockInfo, operating_constraints: Constraints, @@ -674,7 +679,8 @@ fn validate_against_constraints( #[cfg(test)] mod tests { use super::*; - use polkadot_primitives::vstaging::ValidationCode; + use polkadot_primitives::vstaging::{CollatorPair, ValidationCode, OutboundHrmpMessage}; + use sp_application_crypto::Pair; #[test] fn stack_modifications() { @@ -1004,6 +1010,189 @@ mod tests { ); } + fn make_candidate( + constraints: &Constraints, + relay_parent: &RelayChainBlockInfo, + ) -> ProspectiveCandidate { + let collator_pair = CollatorPair::generate().0; + let collator = collator_pair.public(); + + let sig = collator_pair.sign(b"blabla".as_slice()); + + ProspectiveCandidate { + commitments: CandidateCommitments { + upward_messages: Vec::new(), + horizontal_messages: Vec::new(), + new_validation_code: None, + head_data: HeadData::from(vec![1, 2, 3, 4, 5]), + processed_downward_messages: 0, + hrmp_watermark: relay_parent.number, + }, + collator, + collator_signature: sig, + persisted_validation_data: PersistedValidationData { + parent_head: constraints.required_parent.clone(), + relay_parent_number: relay_parent.number, + relay_parent_storage_root: relay_parent.storage_root, + max_pov_size: constraints.max_pov_size as u32, + }, + pov_hash: Hash::repeat_byte(1), + validation_code_hash: constraints.validation_code_hash, + } + } + + #[test] + fn fragment_validation_code_mismatch() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let expected_code = constraints.validation_code_hash.clone(); + let got_code = ValidationCode(vec![9, 9 , 9]).hash(); + + candidate.validation_code_hash = got_code; + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::ValidationCodeMismatch( + expected_code, + got_code, + )), + ) + } + + #[test] + fn fragment_pvd_mismatch() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let relay_parent_b = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0b), + storage_root: Hash::repeat_byte(0xee), + }; + + let constraints = make_constraints(); + let candidate = make_candidate(&constraints, &relay_parent); + + let expected_pvd = PersistedValidationData { + parent_head: constraints.required_parent.clone(), + relay_parent_number: relay_parent_b.number, + relay_parent_storage_root: relay_parent_b.storage_root, + max_pov_size: constraints.max_pov_size as u32, + }; + + let got_pvd = candidate.persisted_validation_data.clone(); + + assert_eq!( + Fragment::new(relay_parent_b, constraints, candidate), + Err(FragmentValidityError::PersistedValidationDataMismatch( + expected_pvd, + got_pvd, + )), + ); + } + + #[test] + fn fragment_code_size_too_large() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let max_code_size = constraints.max_code_size; + candidate.commitments.new_validation_code = Some(vec![0; max_code_size + 1].into()); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::CodeSizeTooLarge( + max_code_size, + max_code_size + 1, + )), + ); + } + + #[test] + fn fragment_relay_parent_too_old() { + let relay_parent = RelayChainBlockInfo { + number: 3, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let candidate = make_candidate(&constraints, &relay_parent); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::RelayParentTooOld( + 5, + 3, + )), + ); + } + + #[test] + fn fragment_hrmp_messages_overflow() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let max_hrmp = constraints.max_hrmp_num_per_candidate; + + candidate.commitments.horizontal_messages.extend( + (0..max_hrmp + 1).map(|i| OutboundHrmpMessage { + recipient: ParaId::from(i as u32), + data: vec![1, 2, 3], + }) + ); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { + messages_allowed: max_hrmp, + messages_submitted: max_hrmp + 1, + }), + ); + } + + #[test] + fn fragment_code_upgrade_restricted() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let mut constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + constraints.upgrade_restriction = Some(UpgradeRestriction::Present); + candidate.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3])); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::CodeUpgradeRestricted), + ); + } + // TODO [now] checking outputs against constraints. // TODO [now] checking fragments against constraints. From e974afa2b27725d199d665d2a77b5a66d5c418e7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 11 Feb 2022 16:59:35 -0600 Subject: [PATCH 38/42] descending or duplicate test --- .../src/inclusion_emulator/staging.rs | 65 ++++++++++++++++--- 1 file changed, 57 insertions(+), 8 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index f795daa34279..5546775f193f 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -504,6 +504,11 @@ pub enum FragmentValidityError { }, /// Code upgrade not allowed. CodeUpgradeRestricted, + /// HRMP messages are not ascending or are duplicate. + /// + /// The `usize` is the index into the outbound HRMP messages of + /// the candidate. + HrmpMessagesDescendingOrDuplicate(usize), } /// A parachain fragment, representing another prospective parachain block. @@ -531,9 +536,7 @@ impl Fragment { /// checks against the constraints. /// /// This doesn't check that the collator signature is valid or - /// whether the PoV is large enough or whether the hrmp messages - /// are in ascending order and non-duplicate - // TODO [now]: maybe it should. + /// whether the PoV is small enough. pub fn new( relay_parent: RelayChainBlockInfo, operating_constraints: Constraints, @@ -546,7 +549,16 @@ impl Fragment { hrmp_watermark: Some(commitments.hrmp_watermark), outbound_hrmp: { let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); - for message in &commitments.horizontal_messages { + + let mut last_recipient = None::; + for (i, message) in commitments.horizontal_messages.iter().enumerate() { + if let Some(last) = last_recipient { + if last >= message.recipient { + return Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(i)); + } + } + + last_recipient = Some(message.recipient); let record = outbound_hrmp.entry(message.recipient.clone()).or_default(); record.bytes_submitted += message.data.len(); @@ -1193,10 +1205,47 @@ mod tests { ); } - // TODO [now] checking outputs against constraints. + #[test] + fn fragment_hrmp_messages_descending_or_duplicate() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; - // TODO [now] checking fragments against constraints. + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); - // TODO [now] checking modifications from fragments are - // produced correctly. + candidate.commitments.horizontal_messages = vec![ + OutboundHrmpMessage { + recipient: ParaId::from(0 as u32), + data: vec![1, 2, 3], + }, + OutboundHrmpMessage { + recipient: ParaId::from(0 as u32), + data: vec![4, 5, 6], + } + ]; + + assert_eq!( + Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), + Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), + ); + + candidate.commitments.horizontal_messages = vec![ + OutboundHrmpMessage { + recipient: ParaId::from(1 as u32), + data: vec![1, 2, 3], + }, + OutboundHrmpMessage { + recipient: ParaId::from(0 as u32), + data: vec![4, 5, 6], + } + ]; + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), + ); + } } From d6d6a6598250061dfae001f9f0f4f6145244170c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 11 Feb 2022 16:59:43 -0600 Subject: [PATCH 39/42] fmt --- .../src/inclusion_emulator/staging.rs | 57 ++++++------------- 1 file changed, 16 insertions(+), 41 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 5546775f193f..784d276174fc 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -554,7 +554,9 @@ impl Fragment { for (i, message) in commitments.horizontal_messages.iter().enumerate() { if let Some(last) = last_recipient { if last >= message.recipient { - return Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(i)); + return Err( + FragmentValidityError::HrmpMessagesDescendingOrDuplicate(i), + ) } } @@ -691,7 +693,7 @@ fn validate_against_constraints( #[cfg(test)] mod tests { use super::*; - use polkadot_primitives::vstaging::{CollatorPair, ValidationCode, OutboundHrmpMessage}; + use polkadot_primitives::vstaging::{CollatorPair, OutboundHrmpMessage, ValidationCode}; use sp_application_crypto::Pair; #[test] @@ -1065,16 +1067,13 @@ mod tests { let mut candidate = make_candidate(&constraints, &relay_parent); let expected_code = constraints.validation_code_hash.clone(); - let got_code = ValidationCode(vec![9, 9 , 9]).hash(); + let got_code = ValidationCode(vec![9, 9, 9]).hash(); candidate.validation_code_hash = got_code; assert_eq!( Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::ValidationCodeMismatch( - expected_code, - got_code, - )), + Err(FragmentValidityError::ValidationCodeMismatch(expected_code, got_code,)), ) } @@ -1106,10 +1105,7 @@ mod tests { assert_eq!( Fragment::new(relay_parent_b, constraints, candidate), - Err(FragmentValidityError::PersistedValidationDataMismatch( - expected_pvd, - got_pvd, - )), + Err(FragmentValidityError::PersistedValidationDataMismatch(expected_pvd, got_pvd,)), ); } @@ -1129,10 +1125,7 @@ mod tests { assert_eq!( Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::CodeSizeTooLarge( - max_code_size, - max_code_size + 1, - )), + Err(FragmentValidityError::CodeSizeTooLarge(max_code_size, max_code_size + 1,)), ); } @@ -1149,10 +1142,7 @@ mod tests { assert_eq!( Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::RelayParentTooOld( - 5, - 3, - )), + Err(FragmentValidityError::RelayParentTooOld(5, 3,)), ); } @@ -1169,12 +1159,9 @@ mod tests { let max_hrmp = constraints.max_hrmp_num_per_candidate; - candidate.commitments.horizontal_messages.extend( - (0..max_hrmp + 1).map(|i| OutboundHrmpMessage { - recipient: ParaId::from(i as u32), - data: vec![1, 2, 3], - }) - ); + candidate.commitments.horizontal_messages.extend((0..max_hrmp + 1).map(|i| { + OutboundHrmpMessage { recipient: ParaId::from(i as u32), data: vec![1, 2, 3] } + })); assert_eq!( Fragment::new(relay_parent, constraints, candidate), @@ -1217,14 +1204,8 @@ mod tests { let mut candidate = make_candidate(&constraints, &relay_parent); candidate.commitments.horizontal_messages = vec![ - OutboundHrmpMessage { - recipient: ParaId::from(0 as u32), - data: vec![1, 2, 3], - }, - OutboundHrmpMessage { - recipient: ParaId::from(0 as u32), - data: vec![4, 5, 6], - } + OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![1, 2, 3] }, + OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, ]; assert_eq!( @@ -1233,14 +1214,8 @@ mod tests { ); candidate.commitments.horizontal_messages = vec![ - OutboundHrmpMessage { - recipient: ParaId::from(1 as u32), - data: vec![1, 2, 3], - }, - OutboundHrmpMessage { - recipient: ParaId::from(0 as u32), - data: vec![4, 5, 6], - } + OutboundHrmpMessage { recipient: ParaId::from(1 as u32), data: vec![1, 2, 3] }, + OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, ]; assert_eq!( From 48539609e9e16bea90d2a068e14cbdc2b91fd896 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 11 Feb 2022 17:00:22 -0600 Subject: [PATCH 40/42] remove unused imports in vstaging --- primitives/src/v1/mod.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/primitives/src/v1/mod.rs b/primitives/src/v1/mod.rs index 9e06a82fa4c8..b6b9e00d58eb 100644 --- a/primitives/src/v1/mod.rs +++ b/primitives/src/v1/mod.rs @@ -18,12 +18,9 @@ use bitvec::vec::BitVec; use parity_scale_codec::{Decode, Encode}; -use scale_info::TypeInfo; -use sp_std::prelude::*; use application_crypto::KeyTypeId; use inherents::InherentIdentifier; -use primitives::RuntimeDebug; use runtime_primitives::traits::{AppVerify, Header as HeaderT}; use sp_arithmetic::traits::{BaseArithmetic, Saturating}; From 36906facceb455c61b159ed60515ec0f7c2f2dfd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 11 Feb 2022 17:12:36 -0600 Subject: [PATCH 41/42] wrong primitives --- primitives/src/v1/mod.rs | 3 +++ primitives/src/vstaging/mod.rs | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/primitives/src/v1/mod.rs b/primitives/src/v1/mod.rs index b6b9e00d58eb..9e06a82fa4c8 100644 --- a/primitives/src/v1/mod.rs +++ b/primitives/src/v1/mod.rs @@ -18,9 +18,12 @@ use bitvec::vec::BitVec; use parity_scale_codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_std::prelude::*; use application_crypto::KeyTypeId; use inherents::InherentIdentifier; +use primitives::RuntimeDebug; use runtime_primitives::traits::{AppVerify, Header as HeaderT}; use sp_arithmetic::traits::{BaseArithmetic, Saturating}; diff --git a/primitives/src/vstaging/mod.rs b/primitives/src/vstaging/mod.rs index 8e17b2c00f9b..b26f78ff502e 100644 --- a/primitives/src/vstaging/mod.rs +++ b/primitives/src/vstaging/mod.rs @@ -17,9 +17,6 @@ //! Staging Primitives. use parity_scale_codec::{Decode, Encode}; -use primitives::RuntimeDebug; -use scale_info::TypeInfo; -use sp_std::prelude::*; pub use crate::v2::*; From 3b403ed53e19116f9d850be1f81e82423f6baea5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 11 Feb 2022 18:45:54 -0600 Subject: [PATCH 42/42] spellcheck --- node/subsystem-util/src/inclusion_emulator/staging.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 784d276174fc..e886a9a0ff22 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -13,7 +13,7 @@ //! The implementation of the inclusion emulator for the 'staging' runtime version. //! -//! This is currently v1 (v2?), but will evolve to v3. +//! This is currently `v1` (`v2`?), but will evolve to `v3`. // TODO https://github.com/paritytech/polkadot/issues/4803 //! //! A set of utilities for node-side code to emulate the logic the runtime uses for checking @@ -52,7 +52,7 @@ //! //! What the operating constraints are, in practice, is a prediction about the state of the //! relay-chain in the future. The relay-chain is aware of some current state, and we want to -//! make an intelligent prediction about what'll be accepted in the future based on +//! make an intelligent prediction about what might be accepted in the future based on //! prior fragments that also exist off-chain. //! //! ## Fragment Trees @@ -455,7 +455,7 @@ impl ConstraintModifications { /// without pinning it to a particular session. For example, everything /// to do with the collator's signature and commitments are represented /// here. But the erasure-root is not. This means that prospective candidates -/// are unlinked from all sessions. +/// are not correlated to any session in particular. #[derive(Debug, Clone, PartialEq)] pub struct ProspectiveCandidate { /// The commitments to the output of the execution.