diff --git a/Cargo.lock b/Cargo.lock index 19abd6ad5..ab1d18e3d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17506,6 +17506,7 @@ dependencies = [ "dancelight-runtime-constants", "dp-container-chain-genesis-data", "env_logger 0.11.3", + "flume 0.10.14", "frame-benchmarking", "frame-benchmarking-cli", "frame-support", @@ -17599,6 +17600,7 @@ dependencies = [ "sp-block-builder", "sp-blockchain", "sp-consensus", + "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-core", diff --git a/client/service-container-chain/src/chain_spec.rs b/client/service-container-chain/src/chain_spec.rs index d6d1c3148..0497992bb 100644 --- a/client/service-container-chain/src/chain_spec.rs +++ b/client/service-container-chain/src/chain_spec.rs @@ -17,49 +17,11 @@ use { sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}, serde::{Deserialize, Serialize}, - std::collections::BTreeMap, }; /// Specialized `ChainSpec` for container chains that only allows raw genesis format. pub type RawChainSpec = sc_service::GenericChainSpec; -/// Helper type that implements the traits needed to be used as a "GenesisConfig", -/// but whose implementation panics because we only expect it to be used with raw ChainSpecs, -/// so it will never be serialized or deserialized. -/// This is because container chains must use raw chain spec files where the "genesis" -/// field only has one field: "raw". -pub struct RawGenesisConfig { - pub storage_raw: BTreeMap, Vec>, -} - -impl Serialize for RawGenesisConfig { - fn serialize(&self, _serializer: S) -> Result - where - S: serde::Serializer, - { - panic!("RawGenesisConfigDummy should never be serialized") - } -} - -impl<'de> Deserialize<'de> for RawGenesisConfig { - fn deserialize(_deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - panic!("Attempted to read a non-raw ContainerChain ChainSpec.\nHelp: add `--raw` flag to `build-spec` command to generate a raw chain spec") - } -} - -impl sp_runtime::BuildStorage for RawGenesisConfig { - fn assimilate_storage(&self, storage: &mut sp_core::storage::Storage) -> Result<(), String> { - storage - .top - .extend(self.storage_raw.iter().map(|(k, v)| (k.clone(), v.clone()))); - - Ok(()) - } -} - /// The extensions for the [`ChainSpec`]. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] #[serde(deny_unknown_fields)] diff --git a/client/service-container-chain/src/cli.rs b/client/service-container-chain/src/cli.rs index 8aee43a15..91ba36914 100644 --- a/client/service-container-chain/src/cli.rs +++ b/client/service-container-chain/src/cli.rs @@ -15,7 +15,6 @@ // along with Tanssi. If not, see use { - crate::chain_spec::RawGenesisConfig, cumulus_client_cli::{CollatorOptions, RelayChainMode}, dc_orchestrator_chain_interface::ContainerChainGenesisData, dp_container_chain_genesis_data::json::properties_to_map, @@ -181,9 +180,6 @@ impl ContainerChainCli { relay_chain, para_id, }; - let raw_genesis_config = RawGenesisConfig { - storage_raw: storage_raw.clone(), - }; let chain_spec = crate::chain_spec::RawChainSpec::builder( // This code is not used, we override it in `set_storage` below @@ -210,7 +206,7 @@ impl ContainerChainCli { let mut chain_spec = chain_spec.build(); chain_spec.set_storage(Storage { - top: raw_genesis_config.storage_raw, + top: storage_raw, children_default: Default::default(), }); diff --git a/pallets/external-validators/src/benchmarking.rs b/pallets/external-validators/src/benchmarking.rs index e9c709635..94426ae9d 100644 --- a/pallets/external-validators/src/benchmarking.rs +++ b/pallets/external-validators/src/benchmarking.rs @@ -25,7 +25,8 @@ use { frame_support::traits::{Currency, EnsureOrigin, Get}, frame_system::{EventRecord, RawOrigin}, pallet_session::{self as session, SessionManager}, - sp_runtime::traits::Convert, + rand::{RngCore, SeedableRng}, + sp_runtime::{codec, traits::Convert}, sp_std::prelude::*, }; const SEED: u32 = 0; @@ -52,21 +53,22 @@ fn create_funded_user( user } -fn keys(c: u32) -> ::Keys { - use rand::{RngCore, SeedableRng}; - - let keys = { - let mut keys = [0u8; 256]; +struct InputFromRng<'a, T>(&'a mut T); +impl<'a, T: RngCore> codec::Input for InputFromRng<'a, T> { + fn remaining_len(&mut self) -> Result, codec::Error> { + Ok(None) + } - if c > 0 { - let mut rng = rand::rngs::StdRng::seed_from_u64(u64::from(c)); - rng.fill_bytes(&mut keys); - } + fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { + self.0.fill_bytes(into); + Ok(()) + } +} - keys - }; +fn keys(c: u32) -> ::Keys { + let mut rng = rand::rngs::StdRng::seed_from_u64(u64::from(c)); - Decode::decode(&mut &keys[..]).unwrap() + Decode::decode(&mut InputFromRng(&mut rng)).unwrap() } fn invulnerable( diff --git a/pallets/invulnerables/src/benchmarking.rs b/pallets/invulnerables/src/benchmarking.rs index 3aa77b363..9b3f10980 100644 --- a/pallets/invulnerables/src/benchmarking.rs +++ b/pallets/invulnerables/src/benchmarking.rs @@ -28,7 +28,8 @@ use { }, frame_system::{EventRecord, RawOrigin}, pallet_session::{self as session, SessionManager}, - sp_runtime::traits::AtLeast32BitUnsigned, + rand::{RngCore, SeedableRng}, + sp_runtime::{codec, traits::AtLeast32BitUnsigned}, sp_std::prelude::*, tp_traits::DistributeRewards, }; @@ -56,21 +57,22 @@ fn create_funded_user( user } -fn keys(c: u32) -> ::Keys { - use rand::{RngCore, SeedableRng}; - - let keys = { - let mut keys = [0u8; 128]; +struct InputFromRng<'a, T>(&'a mut T); +impl<'a, T: RngCore> codec::Input for InputFromRng<'a, T> { + fn remaining_len(&mut self) -> Result, codec::Error> { + Ok(None) + } - if c > 0 { - let mut rng = rand::rngs::StdRng::seed_from_u64(u64::from(c)); - rng.fill_bytes(&mut keys); - } + fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { + self.0.fill_bytes(into); + Ok(()) + } +} - keys - }; +fn keys(c: u32) -> ::Keys { + let mut rng = rand::rngs::StdRng::seed_from_u64(u64::from(c)); - Decode::decode(&mut &keys[..]).unwrap() + Decode::decode(&mut InputFromRng(&mut rng)).unwrap() } fn invulnerable( diff --git a/solo-chains/node/tanssi-relay-service/Cargo.toml b/solo-chains/node/tanssi-relay-service/Cargo.toml index f088ac744..49dbf03dd 100644 --- a/solo-chains/node/tanssi-relay-service/Cargo.toml +++ b/solo-chains/node/tanssi-relay-service/Cargo.toml @@ -47,6 +47,7 @@ sp-api = { workspace = true } sp-authority-discovery = { workspace = true } sp-block-builder = { workspace = true } sp-blockchain = { workspace = true } +sp-consensus-aura = { workspace = true } sp-consensus-babe = { workspace = true } sp-core = { workspace = true, features = [ "std" ] } sp-inherents = { workspace = true, features = [ "std" ] } @@ -81,6 +82,7 @@ async-io = { workspace = true } async-trait = { workspace = true } bitvec = { workspace = true, optional = true } codec = { workspace = true } +flume = { workspace = true } futures = { workspace = true } gum = { workspace = true } hex-literal = { workspace = true } diff --git a/solo-chains/node/tanssi-relay-service/src/dev_rpcs.rs b/solo-chains/node/tanssi-relay-service/src/dev_rpcs.rs new file mode 100644 index 000000000..264670ca8 --- /dev/null +++ b/solo-chains/node/tanssi-relay-service/src/dev_rpcs.rs @@ -0,0 +1,81 @@ +// Copyright (C) Moondance Labs Ltd. +// This file is part of Tanssi. + +// Tanssi is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Tanssi is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Tanssi. If not, see + +//! Development Polkadot service. Adapted from `polkadot_service` crate +//! and removed un-necessary components which are not required in dev node. + +use codec::Encode; +use jsonrpsee::{ + core::RpcResult, + proc_macros::rpc, + types::{ + error::{INTERNAL_ERROR_CODE, INTERNAL_ERROR_MSG}, + ErrorObjectOwned, + }, +}; + +/// This RPC interface is used to provide methods in dev mode only +#[rpc(server)] +#[jsonrpsee::core::async_trait] +pub trait DevApi { + /// Indicate the mock parachain candidate insertion to be active + #[method(name = "mock_enableParaInherentCandidate")] + async fn enable_para_inherent_candidate(&self) -> RpcResult<()>; + + /// Indicate the mock parachain candidate insertion to be disabled + #[method(name = "mock_disableParaInherentCandidate")] + async fn disable_para_inherent_candidate(&self) -> RpcResult<()>; +} + +pub struct DevRpc { + pub mock_para_inherent_channel: flume::Sender>, +} + +#[jsonrpsee::core::async_trait] +impl DevApiServer for DevRpc { + async fn enable_para_inherent_candidate(&self) -> RpcResult<()> { + let mock_para_inherent_channel = self.mock_para_inherent_channel.clone(); + // Push the message to the shared channel where it will be queued up + // to be injected in to an upcoming block. + mock_para_inherent_channel + .send_async(true.encode()) + .await + .map_err(|err| internal_err(err.to_string()))?; + + Ok(()) + } + + async fn disable_para_inherent_candidate(&self) -> RpcResult<()> { + let mock_para_inherent_channel = self.mock_para_inherent_channel.clone(); + // Push the message to the shared channel where it will be queued up + // to be injected in to an upcoming block. + mock_para_inherent_channel + .send_async(false.encode()) + .await + .map_err(|err| internal_err(err.to_string()))?; + + Ok(()) + } +} + +// This bit cribbed from frontier. +pub fn internal_err(message: T) -> ErrorObjectOwned { + ErrorObjectOwned::owned( + INTERNAL_ERROR_CODE, + INTERNAL_ERROR_MSG, + Some(message.to_string()), + ) +} diff --git a/solo-chains/node/tanssi-relay-service/src/dev_service.rs b/solo-chains/node/tanssi-relay-service/src/dev_service.rs index bd12767b2..b8c355b66 100644 --- a/solo-chains/node/tanssi-relay-service/src/dev_service.rs +++ b/solo-chains/node/tanssi-relay-service/src/dev_service.rs @@ -31,6 +31,7 @@ //! by incrementing timestamp by slot duration. use { + crate::dev_rpcs::{DevApiServer, DevRpc}, async_io::Timer, babe::{BabeBlockImport, BabeLink}, codec::{Decode, Encode}, @@ -42,7 +43,12 @@ use { polkadot_core_primitives::{AccountId, Balance, Block, Hash, Nonce}, polkadot_node_core_parachains_inherent::Error as InherentError, polkadot_overseer::Handle, - polkadot_primitives::InherentData as ParachainsInherentData, + polkadot_primitives::{ + runtime_api::ParachainHost, BackedCandidate, CandidateCommitments, CandidateDescriptor, + CollatorPair, CommittedCandidateReceipt, CompactStatement, EncodeAs, + InherentData as ParachainsInherentData, OccupiedCoreAssumption, SigningContext, + ValidityAttestation, + }, polkadot_rpc::{DenyUnsafe, RpcExtension}, polkadot_service::{ BlockT, Error, IdentifyVariant, NewFullParams, OverseerGen, SelectRelayChain, @@ -54,16 +60,24 @@ use { run_manual_seal, EngineCommand, ManualSealParams, }, sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}, + sc_keystore::Keystore, sc_transaction_pool_api::{OffchainTransactionPoolFactory, TransactionPool}, service::{Configuration, KeystoreContainer, RpcHandlers, TaskManager}, sp_api::ProvideRuntimeApi, sp_block_builder::BlockBuilder, sp_blockchain::{HeaderBackend, HeaderMetadata}, + sp_consensus_aura::{inherents::InherentType as AuraInherentType, AURA_ENGINE_ID}, sp_consensus_babe::SlotDuration, + sp_core::{ByteArray, Pair, H256}, + sp_keystore::KeystorePtr, + sp_runtime::{traits::BlakeTwo256, DigestItem, RuntimeAppPublic}, std::{cmp::max, ops::Add, sync::Arc, time::Duration}, telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}, }; +// We use this key to store whether we want the para inherent mocker to be active +const PARA_INHERENT_SELECTOR_AUX_KEY: &[u8] = b"__DEV_PARA_INHERENT_SELECTOR"; + pub type FullBackend = service::TFullBackend; pub type FullClient = service::TFullClient< @@ -97,6 +111,8 @@ struct DevDeps { pub deny_unsafe: DenyUnsafe, /// Manual seal command sink pub command_sink: Option>>, + /// Channels for dev rpcs + pub dev_rpc_data: Option>>, } fn create_dev_rpc_extension( @@ -106,6 +122,7 @@ fn create_dev_rpc_extension( chain_spec, deny_unsafe, command_sink: maybe_command_sink, + dev_rpc_data: maybe_dev_rpc_data, }: DevDeps, ) -> Result> where @@ -145,15 +162,21 @@ where io.merge(ManualSeal::new(command_sink).into_rpc())?; } + if let Some(mock_para_inherent_channel) = maybe_dev_rpc_data { + io.merge( + DevRpc { + mock_para_inherent_channel, + } + .into_rpc(), + )?; + } + Ok(io) } /// We use EmptyParachainsInherentDataProvider to insert an empty parachain inherent in the block /// to satisfy runtime -struct EmptyParachainsInherentDataProvider> { - pub client: Arc, - pub parent: Hash, -} +struct EmptyParachainsInherentDataProvider; /// Copied from polkadot service just so that this code retains same structure as /// polkadot_service crate. @@ -165,12 +188,8 @@ struct Basics { telemetry: Option, } -impl> EmptyParachainsInherentDataProvider { - pub fn new(client: Arc, parent: Hash) -> Self { - EmptyParachainsInherentDataProvider { client, parent } - } - - pub async fn create( +impl EmptyParachainsInherentDataProvider { + pub async fn create>( client: Arc, parent: Hash, ) -> Result { @@ -189,35 +208,6 @@ impl> EmptyParachainsInherentDataProvider { } } -#[async_trait::async_trait] -impl> sp_inherents::InherentDataProvider - for EmptyParachainsInherentDataProvider -{ - async fn provide_inherent_data( - &self, - dst_inherent_data: &mut sp_inherents::InherentData, - ) -> Result<(), sp_inherents::Error> { - let inherent_data = - EmptyParachainsInherentDataProvider::create(self.client.clone(), self.parent) - .await - .map_err(|e| sp_inherents::Error::Application(Box::new(e)))?; - - dst_inherent_data.put_data( - polkadot_primitives::PARACHAINS_INHERENT_IDENTIFIER, - &inherent_data, - ) - } - - async fn try_handle_error( - &self, - _identifier: &sp_inherents::InherentIdentifier, - _error: &[u8], - ) -> Option> { - // Inherent isn't checked and can not return any error - None - } -} - /// Creates new development full node with manual seal pub fn build_full( sealing: Sealing, @@ -245,6 +235,300 @@ pub fn build_full( } } +/// We use MockParachainsInherentDataProvider to insert an parachain inherent with mocked +/// candidates +/// We detect whether any of the keys in our keystore is assigned to a core and provide +/// a mocked candidate in such core +struct MockParachainsInherentDataProvider + ProvideRuntimeApi> { + pub client: Arc, + pub parent: Hash, + pub keystore: KeystorePtr, +} + +impl + ProvideRuntimeApi> MockParachainsInherentDataProvider +where + C::Api: ParachainHost, +{ + pub fn new(client: Arc, parent: Hash, keystore: KeystorePtr) -> Self { + MockParachainsInherentDataProvider { + client, + parent, + keystore, + } + } + + pub async fn create( + client: Arc, + parent: Hash, + keystore: KeystorePtr, + ) -> Result { + let parent_header = match client.header(parent) { + Ok(Some(h)) => h, + Ok(None) => return Err(InherentError::ParentHeaderNotFound(parent)), + Err(err) => return Err(InherentError::Blockchain(err)), + }; + + // Strategy: + // we usually have 1 validator per core, and we usually run with --alice + // the idea is that at least alice will be assigned to one core + // if we find in the keystore the validator attached to a particular core, + // we generate a signature for the parachain assigned to that core + // To retrieve the validator keys, cal runtime api: + + // this following piece of code predicts whether the validator is assigned to a particular + // core where a candidate for a parachain needs to be created + let runtime_api = client.runtime_api(); + + // we get all validators + + // we get the current claim queue to know core availability + let claim_queue = runtime_api.claim_queue(parent).unwrap(); + + // we get the validator groups + let (groups, rotation_info) = runtime_api.validator_groups(parent).unwrap(); + + // we calculate rotation since start, which will define the core assignation + // to validators + let rotations_since_session_start = (parent_header.number + - rotation_info.session_start_block) + / rotation_info.group_rotation_frequency; + + // Get all the available keys in the keystore + let available_keys = keystore + .keys(polkadot_primitives::PARACHAIN_KEY_TYPE_ID) + .unwrap(); + + // create a slot number identical to the parent block num + let slot_number = AuraInherentType::from(u64::from(parent_header.number)); + + // create a mocked header + let parachain_mocked_header = sp_runtime::generic::Header:: { + parent_hash: Default::default(), + number: parent_header.number, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: sp_runtime::generic::Digest { + logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, slot_number.encode())], + }, + }; + + // retrieve availability cores + let availability_cores = runtime_api.availability_cores(parent).unwrap(); + + // retrieve current session_idx + let session_idx = runtime_api.session_index_for_child(parent).unwrap(); + + // retrieve all validators + let all_validators = runtime_api.validators(parent).unwrap(); + + // construct full availability bitvec + let availability_bitvec = availability_bitvec(1, availability_cores.len()); + + let signature_ctx = SigningContext { + parent_hash: parent, + session_index: session_idx, + }; + + // we generate the availability bitfield sigs + // TODO: here we assume all validator keys are able to sign with our keystore + // we need to make sure the key is there before we try to sign + // this is mostly to indicate that the erasure coding chunks where received by all val + let bitfields: Vec> = all_validators + .iter() + .enumerate() + .map(|(i, public)| { + keystore_sign( + &keystore, + availability_bitvec.clone(), + &signature_ctx, + ValidatorIndex(i as u32), + &public, + ) + .unwrap() + .unwrap() + }) + .collect(); + + // generate a random collator pair + let collator_pair = CollatorPair::generate().0; + let mut backed_cand: Vec> = vec![]; + + // iterate over every core|para pair + for (core, para) in claim_queue { + // check which group is assigned to each core + let group_assigned_to_core = + core.0 + rotations_since_session_start % groups.len() as u32; + // check validator indices associated to the core + let indices_associated_to_core = groups.get(group_assigned_to_core as usize).unwrap(); + for index in indices_associated_to_core { + // fetch validator keys + let validator_keys_to_find = all_validators.get(index.0 as usize).unwrap(); + // Iterate keys until we find an eligible one, or run out of candidates. + for type_public_pair in &available_keys { + if let Ok(validator) = + polkadot_primitives::ValidatorId::from_slice(&type_public_pair) + { + // if we find the validator in keystore, we try to create a backed cand + if validator_keys_to_find == &validator { + // we work with the previous included data + let mut persisted_validation_data = runtime_api + .persisted_validation_data( + parent, + para[0], + OccupiedCoreAssumption::Included, + ) + .unwrap() + .unwrap(); + + // if we dont do this we have a backed candidate every 2 blocks + // TODO: figure out why + persisted_validation_data.relay_parent_storage_root = + parent_header.state_root; + + let persisted_validation_data_hash = persisted_validation_data.hash(); + // retrieve the validation code hash + let validation_code_hash = runtime_api + .validation_code_hash( + parent, + para[0], + OccupiedCoreAssumption::Included, + ) + .unwrap() + .unwrap(); + let pov_hash = Default::default(); + // generate a fake collator signature + let payload = polkadot_primitives::collator_signature_payload( + &parent, + ¶[0], + &persisted_validation_data_hash, + &pov_hash, + &validation_code_hash, + ); + let collator_signature = collator_pair.sign(&payload); + // generate a candidate with most of the values mocked + let candidate = CommittedCandidateReceipt:: { + descriptor: CandidateDescriptor:: { + para_id: para[0], + relay_parent: parent, + collator: collator_pair.public(), + persisted_validation_data_hash, + pov_hash, + erasure_root: Default::default(), + signature: collator_signature, + para_head: parachain_mocked_header.clone().hash(), + validation_code_hash, + }, + commitments: CandidateCommitments:: { + upward_messages: Default::default(), + horizontal_messages: Default::default(), + new_validation_code: None, + head_data: parachain_mocked_header.clone().encode().into(), + processed_downward_messages: 0, + hrmp_watermark: parent_header.number, + }, + }; + let candidate_hash = candidate.hash(); + let payload = CompactStatement::Valid(candidate_hash); + + let signature_ctx = SigningContext { + parent_hash: parent, + session_index: session_idx, + }; + + // sign the candidate with the validator key + let signature = keystore_sign( + &keystore, + payload, + &signature_ctx, + *index, + &validator, + ) + .unwrap() + .unwrap() + .benchmark_signature(); + + // construct a validity vote + let validity_votes = vec![ValidityAttestation::Explicit(signature)]; + + // push the candidate + backed_cand.push(BackedCandidate::::new( + candidate, + validity_votes.clone(), + bitvec::bitvec![u8, bitvec::order::Lsb0; 1; indices_associated_to_core.len()], + Some(core), + )); + } + } + } + } + } + + Ok(ParachainsInherentData { + bitfields: bitfields, + backed_candidates: backed_cand, + disputes: Vec::new(), + parent_header, + }) + } +} + +#[async_trait::async_trait] +impl + ProvideRuntimeApi> sp_inherents::InherentDataProvider + for MockParachainsInherentDataProvider +where + C::Api: ParachainHost, + C: AuxStore, +{ + async fn provide_inherent_data( + &self, + dst_inherent_data: &mut sp_inherents::InherentData, + ) -> Result<(), sp_inherents::Error> { + // fetch whether the para inherent selector has been set + let maybe_para_selector = self + .client + .get_aux(PARA_INHERENT_SELECTOR_AUX_KEY) + .expect("Should be able to query aux storage; qed"); + + let inherent_data = { + if let Some(aux) = maybe_para_selector { + // if it is true, the candidates need to be mocked + // else, we output the empty parachain inherent data provider + if aux == true.encode() { + MockParachainsInherentDataProvider::create( + self.client.clone(), + self.parent, + self.keystore.clone(), + ) + .await + .map_err(|e| sp_inherents::Error::Application(Box::new(e)))? + } else { + EmptyParachainsInherentDataProvider::create(self.client.clone(), self.parent) + .await + .map_err(|e| sp_inherents::Error::Application(Box::new(e)))? + } + } else { + EmptyParachainsInherentDataProvider::create(self.client.clone(), self.parent) + .await + .map_err(|e| sp_inherents::Error::Application(Box::new(e)))? + } + }; + + dst_inherent_data.put_data( + polkadot_primitives::PARACHAINS_INHERENT_IDENTIFIER, + &inherent_data, + ) + } + + async fn try_handle_error( + &self, + _identifier: &sp_inherents::InherentIdentifier, + _error: &[u8], + ) -> Option> { + // Inherent isn't checked and can not return any error + None + } +} + /// We store past timestamp we created in the aux storage, which enable us to return timestamp which is increased by /// slot duration from previous timestamp or current timestamp if in reality more time is passed. fn get_next_timestamp( @@ -321,6 +605,10 @@ fn new_full< let net_config = sc_network::config::FullNetworkConfiguration::<_, _, Network>::new(&config.network); + // Create channels for mocked parachain candidates. + let (downward_mock_para_inherent_sender, downward_mock_para_inherent_receiver) = + flume::bounded::>(100); + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = service::build_network(service::BuildNetworkParams { config: &config, @@ -403,6 +691,7 @@ fn new_full< }, )), }; + let keystore_clone = keystore.clone(); let babe_config = babe_link.config(); let babe_consensus_provider = BabeConsensusDataProvider::new( @@ -418,6 +707,7 @@ fn new_full< // Need to clone it and store here to avoid moving of `client` // variable in closure below. let client_clone = client.clone(); + task_manager.spawn_essential_handle().spawn_blocking( "authorship_task", Some("block-authoring"), @@ -430,13 +720,30 @@ fn new_full< select_chain, create_inherent_data_providers: move |parent, ()| { let client_clone = client_clone.clone(); - + let keystore = keystore_clone.clone(); + let downward_mock_para_inherent_receiver = downward_mock_para_inherent_receiver.clone(); async move { - let parachain = - EmptyParachainsInherentDataProvider::new( - client_clone.clone(), - parent, - ); + + let downward_mock_para_inherent_receiver = downward_mock_para_inherent_receiver.clone(); + // here we only take the last one + let para_inherent_decider_messages: Vec> = downward_mock_para_inherent_receiver.drain().collect(); + + // If there is a value to be updated, we update it + if let Some(value) = para_inherent_decider_messages.last() { + client_clone + .insert_aux( + &[(PARA_INHERENT_SELECTOR_AUX_KEY, value.as_slice())], + &[], + ) + .expect("Should be able to write to aux storage; qed"); + + } + + let parachain = MockParachainsInherentDataProvider::new( + client_clone.clone(), + parent, + keystore + ); let timestamp = get_next_timestamp(client_clone, slot_duration); @@ -454,6 +761,13 @@ fn new_full< ); } + // We dont need the flume receiver if we are not a validator + let dev_rpc_data = if role.clone().is_authority() { + Some(downward_mock_para_inherent_sender) + } else { + None + }; + let rpc_extensions_builder = { let client = client.clone(); let transaction_pool = transaction_pool.clone(); @@ -468,6 +782,7 @@ fn new_full< chain_spec: chain_spec.cloned_box(), deny_unsafe, command_sink: command_sink.clone(), + dev_rpc_data: dev_rpc_data.clone(), }; create_dev_rpc_extension(deps).map_err(Into::into) @@ -630,3 +945,44 @@ fn new_partial_basics( telemetry, }) } + +use polkadot_primitives::{AvailabilityBitfield, UncheckedSigned, ValidatorId, ValidatorIndex}; +use sp_keystore::Error as KeystoreError; +fn keystore_sign( + keystore: &KeystorePtr, + payload: Payload, + context: &SigningContext, + validator_index: ValidatorIndex, + key: &ValidatorId, +) -> Result>, KeystoreError> { + let data = payload_data(&payload, context); + let signature = keystore + .sr25519_sign(ValidatorId::ID, key.as_ref(), &data)? + .map(|sig| UncheckedSigned::new(payload, validator_index, sig.into())); + Ok(signature) +} + +fn payload_data( + payload: &Payload, + context: &SigningContext, +) -> Vec { + // equivalent to (`real_payload`, context).encode() + let mut out = payload.encode_as(); + out.extend(context.encode()); + out +} + +/// Create an `AvailabilityBitfield` with size `total_cores`. The first `used_cores` set to true (occupied), +/// and the remaining to false (available). +fn availability_bitvec(used_cores: usize, total_cores: usize) -> AvailabilityBitfield { + let mut bitfields = bitvec::bitvec![u8, bitvec::order::Lsb0; 0; 0]; + for i in 0..total_cores { + if i < used_cores { + bitfields.push(true); + } else { + bitfields.push(false) + } + } + + bitfields.into() +} diff --git a/solo-chains/node/tanssi-relay-service/src/lib.rs b/solo-chains/node/tanssi-relay-service/src/lib.rs index 0ad8f1d95..46a0e5030 100644 --- a/solo-chains/node/tanssi-relay-service/src/lib.rs +++ b/solo-chains/node/tanssi-relay-service/src/lib.rs @@ -17,3 +17,5 @@ pub mod chain_spec; pub mod dev_service; + +pub mod dev_rpcs; diff --git a/solo-chains/runtime/dancelight/src/lib.rs b/solo-chains/runtime/dancelight/src/lib.rs index b6a6b9a05..924118919 100644 --- a/solo-chains/runtime/dancelight/src/lib.rs +++ b/solo-chains/runtime/dancelight/src/lib.rs @@ -1304,7 +1304,7 @@ impl pallet_invulnerables::Config for Runtime { type CollatorId = ::AccountId; type CollatorIdOf = ConvertInto; type CollatorRegistration = Session; - type WeightInfo = (); + type WeightInfo = weights::pallet_invulnerables::SubstrateWeight; #[cfg(feature = "runtime-benchmarks")] type Currency = Balances; } @@ -1980,6 +1980,7 @@ mod benches { [pallet_collator_assignment, TanssiCollatorAssignment] [pallet_external_validators, ExternalValidators] [pallet_external_validator_slashes, ExternalValidatorSlashes] + [pallet_invulnerables, TanssiInvulnerables] // XCM [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_xcm_benchmarks::fungible, pallet_xcm_benchmarks::fungible::Pallet::] diff --git a/solo-chains/runtime/dancelight/src/weights/mod.rs b/solo-chains/runtime/dancelight/src/weights/mod.rs index ee0098635..b67c7728a 100644 --- a/solo-chains/runtime/dancelight/src/weights/mod.rs +++ b/solo-chains/runtime/dancelight/src/weights/mod.rs @@ -23,6 +23,7 @@ pub mod pallet_conviction_voting; pub mod pallet_external_validator_slashes; pub mod pallet_external_validators; pub mod pallet_identity; +pub mod pallet_invulnerables; pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_parameters; diff --git a/solo-chains/runtime/dancelight/src/weights/pallet_invulnerables.rs b/solo-chains/runtime/dancelight/src/weights/pallet_invulnerables.rs new file mode 100644 index 000000000..009e87d1b --- /dev/null +++ b/solo-chains/runtime/dancelight/src/weights/pallet_invulnerables.rs @@ -0,0 +1,117 @@ +// Copyright (C) Moondance Labs Ltd. +// This file is part of Tanssi. + +// Tanssi is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Tanssi is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Tanssi. If not, see + + +//! Autogenerated weights for pallet_invulnerables +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 42.0.0 +//! DATE: 2024-11-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `tomasz-XPS-15-9520`, CPU: `12th Gen Intel(R) Core(TM) i7-12700H` +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("dancelight-dev"), DB CACHE: 1024 + +// Executed Command: +// target/release/tanssi-relay +// benchmark +// pallet +// --execution=wasm +// --wasm-execution=compiled +// --pallet +// pallet_invulnerables +// --extrinsic +// * +// --chain=dancelight-dev +// --steps +// 50 +// --repeat +// 20 +// --template=benchmarking/frame-weight-runtime-template.hbs +// --json-file +// raw.json +// --output +// tmp/dancelight_weights/pallet_invulnerables.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weights for pallet_invulnerables using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl pallet_invulnerables::WeightInfo for SubstrateWeight { + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `TanssiInvulnerables::Invulnerables` (r:1 w:1) + /// Proof: `TanssiInvulnerables::Invulnerables` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 99]`. + fn add_invulnerable(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `845 + b * (36 ±0)` + // Estimated: `4687 + b * (37 ±0)` + // Minimum execution time: 12_566_000 picoseconds. + Weight::from_parts(17_476_246, 4687) + // Standard Error: 1_484 + .saturating_add(Weight::from_parts(64_539, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) + } + /// Storage: `TanssiInvulnerables::Invulnerables` (r:1 w:1) + /// Proof: `TanssiInvulnerables::Invulnerables` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 100]`. + fn remove_invulnerable(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `137 + b * (32 ±0)` + // Estimated: `4687` + // Minimum execution time: 6_954_000 picoseconds. + Weight::from_parts(9_208_806, 4687) + // Standard Error: 654 + .saturating_add(Weight::from_parts(35_463, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `TanssiInvulnerables::Invulnerables` (r:1 w:0) + /// Proof: `TanssiInvulnerables::Invulnerables` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 100]`. + fn new_session(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `137 + r * (32 ±0)` + // Estimated: `4687` + // Minimum execution time: 6_255_000 picoseconds. + Weight::from_parts(9_361_395, 4687) + // Standard Error: 1_732 + .saturating_add(Weight::from_parts(28_444, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `TanssiInvulnerables::Invulnerables` (r:1 w:0) + /// Proof: `TanssiInvulnerables::Invulnerables` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 100]`. + fn reward_invulnerable(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `285 + b * (33 ±0)` + // Estimated: `4687` + // Minimum execution time: 15_199_000 picoseconds. + Weight::from_parts(17_573_236, 4687) + // Standard Error: 801 + .saturating_add(Weight::from_parts(45_127, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} \ No newline at end of file diff --git a/test/suites/dev-tanssi-relay/external-validators-rewards/test_external_validator_rewards.ts b/test/suites/dev-tanssi-relay/external-validators-rewards/test_external_validator_rewards.ts new file mode 100644 index 000000000..dc4ef52ea --- /dev/null +++ b/test/suites/dev-tanssi-relay/external-validators-rewards/test_external_validator_rewards.ts @@ -0,0 +1,67 @@ +import "@tanssi/api-augment"; +import { describeSuite, customDevRpcRequest, expect, beforeAll } from "@moonwall/cli"; +import { ApiPromise, Keyring } from "@polkadot/api"; +import { jumpToSession } from "util/block"; + +describeSuite({ + id: "DTR1601", + title: "Paras inherent tests", + foundationMethods: "dev", + + testCases: ({ it, context }) => { + let polkadotJs: ApiPromise; + + beforeAll(async () => { + polkadotJs = context.polkadotJs(); + }); + + it({ + id: "E01", + title: "para candidates should trigger reward info", + test: async function () { + const keyring = new Keyring({ type: "sr25519" }); + const aliceStash = keyring.addFromUri("//Alice//stash"); + await context.createBlock(); + // Send RPC call to enable para inherent candidate generation + await customDevRpcRequest("mock_enableParaInherentCandidate", []); + // Since collators are not assigned until session 2, we need to go till session 2 to actually see heads being injected + await jumpToSession(context, 3); + await context.createBlock(); + + // we are still in era 0 + const validatorRewards = await context + .polkadotJs() + .query.externalValidatorsRewards.rewardPointsForEra(0); + const totalRewards = validatorRewards.total.toBigInt(); + + expect(totalRewards).to.be.greaterThan(0n); + // All of them come from alice as she is the only one validating candidates + expect(validatorRewards.individual.toHuman()[aliceStash.address]).to.be.eq(totalRewards.toString()); + }, + }); + + it({ + id: "E02", + title: "Check rewards storage clears after historyDepth", + test: async function () { + const sessionsPerEra = await polkadotJs.consts.externalValidators.sessionsPerEra; + const historyDepth = await polkadotJs.consts.externalValidatorsRewards.historyDepth; + + const currentIndex = await polkadotJs.query.session.currentIndex(); + + const targetSession = + currentIndex.toNumber() + sessionsPerEra.toNumber() * (historyDepth.toNumber() + 1); + + await jumpToSession(context, targetSession); + + const validatorRewards = await context + .polkadotJs() + .query.externalValidatorsRewards.rewardPointsForEra(0); + const totalRewards = validatorRewards.total.toBigInt(); + + // rewards should have expired + expect(totalRewards).to.be.equal(0n); + }, + }); + }, +}); diff --git a/test/suites/dev-tanssi-relay/paras-candidate-inherent/test_paras_candidate_inherent.ts b/test/suites/dev-tanssi-relay/paras-candidate-inherent/test_paras_candidate_inherent.ts new file mode 100644 index 000000000..b1329ec6c --- /dev/null +++ b/test/suites/dev-tanssi-relay/paras-candidate-inherent/test_paras_candidate_inherent.ts @@ -0,0 +1,41 @@ +import "@tanssi/api-augment"; +import { describeSuite, customDevRpcRequest, expect, beforeAll } from "@moonwall/cli"; +import { ApiPromise } from "@polkadot/api"; +import { jumpToSession } from "util/block"; +import { getHeaderFromRelay } from "util/relayInterface.ts"; + +describeSuite({ + id: "DTR1401", + title: "Paras inherent tests", + foundationMethods: "dev", + + testCases: ({ it, context }) => { + let polkadotJs: ApiPromise; + + beforeAll(async () => { + polkadotJs = context.polkadotJs(); + }); + + it({ + id: "E01", + title: "Paras heads should be updated every block", + test: async function () { + const parasHeadGenesis = await polkadotJs.query.paras.heads(2000); + await context.createBlock(); + // Send RPC call to enable para inherent candidate generation + await customDevRpcRequest("mock_enableParaInherentCandidate", []); + // Since collators are not assigned until session 2, we need to go till session 2 to actually see heads being injected + await jumpToSession(context, 3); + await context.createBlock(); + const parasHeadAfterOneBlock = await polkadotJs.query.paras.heads(2000); + expect(parasHeadAfterOneBlock).to.not.be.eq(parasHeadGenesis); + await context.createBlock(); + // we create one more block to test we are persisting candidates every block + const parasHeadAfterTwoBlocks = await polkadotJs.query.paras.heads(2000); + expect(parasHeadAfterOneBlock).to.not.be.eq(parasHeadAfterTwoBlocks); + const header2000 = await getHeaderFromRelay(context.polkadotJs(), 2000); + expect(header2000.number.toBigInt()).to.be.equal(31n); + }, + }); + }, +}); diff --git a/test/suites/dev-tanssi-relay/slashes/test_slashes_are_removed_after_bonding_period.ts b/test/suites/dev-tanssi-relay/slashes/test_slashes_are_removed_after_bonding_period.ts index 91eb73304..3d289f1e0 100644 --- a/test/suites/dev-tanssi-relay/slashes/test_slashes_are_removed_after_bonding_period.ts +++ b/test/suites/dev-tanssi-relay/slashes/test_slashes_are_removed_after_bonding_period.ts @@ -78,8 +78,8 @@ describeSuite({ .signAsync(alice); await context.createBlock([addAliceFromInvulnerables]); - const sessionsPerEra = await polkadotJs.consts.externalValidators.sessionsPerEra; - const bondingPeriod = await polkadotJs.consts.externalValidatorSlashes.bondingDuration; + const sessionsPerEra = (await polkadotJs.consts.externalValidators.sessionsPerEra).toNumber(); + const bondingPeriod = (await polkadotJs.consts.externalValidatorSlashes.bondingDuration).toNumber(); const currentIndex = await polkadotJs.query.session.currentIndex(); diff --git a/test/suites/smoke-test-dancelight/test-para-inclusion.ts b/test/suites/smoke-test-dancelight/test-para-inclusion.ts new file mode 100644 index 000000000..140695f8f --- /dev/null +++ b/test/suites/smoke-test-dancelight/test-para-inclusion.ts @@ -0,0 +1,147 @@ +import { beforeAll, describeSuite, expect } from "@moonwall/cli"; +import { getBlockArray } from "@moonwall/util"; +import { ApiPromise } from "@polkadot/api"; +import { GenericExtrinsic } from "@polkadot/types"; +import { FrameSystemEventRecord } from "@polkadot/types/lookup"; +import { AnyTuple } from "@polkadot/types/types"; +import Bottleneck from "bottleneck"; + +const timePeriod = process.env.TIME_PERIOD ? Number(process.env.TIME_PERIOD) : 1 * 60 * 60 * 1000; +const timeout = Math.max(Math.floor(timePeriod / 12), 5000); +const hours = (timePeriod / (1000 * 60 * 60)).toFixed(2); + +type BlockFilteredRecord = { + blockNum: number; + extrinsics: GenericExtrinsic[]; + events: FrameSystemEventRecord[]; + logs; + config; + paraInherent; +}; + +describeSuite({ + id: "S21", + title: "Sample suite that only runs on Dancelight chains", + foundationMethods: "read_only", + testCases: ({ it, context, log }) => { + let api: ApiPromise; + let blockData: BlockFilteredRecord[]; + // block hash to block number + const blockNumberMap: Map = new Map(); + // block hash to collators + const collatorsMap: Map = new Map(); + + beforeAll(async () => { + api = context.polkadotJs(); + + const blockNumArray = await getBlockArray(api, timePeriod); + log(`Collecting ${hours} hours worth of authors`); + + const getBlockData = async (blockNum: number) => { + const blockHash = await api.rpc.chain.getBlockHash(blockNum); + const signedBlock = await api.rpc.chain.getBlock(blockHash); + const apiAt = await api.at(blockHash); + const config = await apiAt.query.configuration.activeConfig(); + const extrinsics = signedBlock.block.extrinsics; + + const paraInherent = extrinsics.filter((ex) => { + const { + method: { method, section }, + } = ex; + return section == "paraInherent" && method == "enter"; + }); + + const { + method: { args }, + } = paraInherent[0]; + + const arg = args[0]; + + const backedCandidates = arg.backedCandidates; + + for (const cand of backedCandidates) { + const relayParent = cand.candidate.descriptor.relayParent.toHex(); + + if (!blockNumberMap.has(relayParent)) { + const apiAtP = await api.at(relayParent); + const parentBlockNumber = await apiAtP.query.system.number(); + + blockNumberMap.set(relayParent, parentBlockNumber.toNumber()); + } + + if (!collatorsMap.has(relayParent)) { + const apiAtP = await api.at(relayParent); + const collators = ( + await apiAtP.query.tanssiCollatorAssignment.collatorContainerChain() + ).toJSON(); + + collatorsMap.set(relayParent, collators); + } + } + + return { + blockNum: blockNum, + extrinsics, + events: await apiAt.query.system.events(), + logs: signedBlock.block.header.digest.logs, + config, + paraInherent, + }; + }; + const limiter = new Bottleneck({ maxConcurrent: 5, minTime: 100 }); + blockData = await Promise.all(blockNumArray.map((num) => limiter.schedule(() => getBlockData(num)))); + }, timeout); + + it({ + id: "C01", + title: "Included paras valid", + test: async function () { + blockData.map(({ blockNum, config, paraInherent }) => { + // Should have exactly 1 paraInherent + expect(paraInherent.length, `Block #{blockNum}: missing paraInherent in block`).toBeGreaterThan(0); + expect(paraInherent.length, `Block #{blockNum}: duplicate paraInherent in block`).toBeLessThan(2); + + const { + method: { args }, + } = paraInherent[0]; + const arg = args[0]; + + const backedCandidates = arg.backedCandidates; + + const numBackedCandidates = backedCandidates.length; + + // assert that numBackedCandidates <= numCores + const numCores = config.schedulerParams.numCores.toNumber(); + expect( + numBackedCandidates, + `Block #${blockNum}: backed more candidates than cores available: ${numBackedCandidates} vs cores ${numCores}` + ).to.be.lessThanOrEqual(numCores); + + // Assert that each backed candidate: + // * has relayParent be at most allowedAncestryLen backwards + // * had collators assigned to it at block "relayParent" + const allowedAncestryLen = config.asyncBackingParams.allowedAncestryLen.toNumber(); + for (const cand of backedCandidates) { + const paraId = cand.candidate.descriptor.paraId.toNumber(); + const relayParent = cand.candidate.descriptor.relayParent.toHex(); + + const parentBlockNumber = blockNumberMap.get(relayParent); + + // allowedAncestryLen = 1 means that parent + 1 == current + // with allowedAncestryLen = 2, parent + allowedAncestryLen >= current + expect( + parentBlockNumber + allowedAncestryLen, + `Block #${blockNum}: backed candidate for para id ${paraId} has too old relayParent: ${parentBlockNumber} vs current ${blockNum}` + ).to.be.greaterThanOrEqual(blockNum); + + const collators = collatorsMap.get(relayParent); + expect( + collators.containerChains[paraId], + `Block #${blockNum}: Found backed candidate for para id ${paraId}, but that para id has no collators assigned. Collator assignment: ${collators}` + ).toBeTruthy(); + } + }); + }, + }); + }, +}); diff --git a/test/util/relayInterface.ts b/test/util/relayInterface.ts index 80c92cb5f..048b3ad1a 100644 --- a/test/util/relayInterface.ts +++ b/test/util/relayInterface.ts @@ -1,9 +1,15 @@ import { ApiPromise } from "@polkadot/api"; -import type { Header, ParaId } from "@polkadot/types/interfaces"; +import type { Header, ParaId, HeadData } from "@polkadot/types/interfaces"; +import { Bytes } from "@polkadot/types-codec"; +import { TypeRegistry } from "@polkadot/types"; export async function getHeaderFromRelay(relayApi: ApiPromise, paraId: ParaId): Promise
{ // Get the latest header from relay storage const encoded = await relayApi.query.paras.heads(paraId); - const header = await relayApi.createType("Header", encoded); + const registry = new TypeRegistry(); + const headerEncoded: HeadData = await relayApi.createType("HeadData", encoded.toHex()); + const nonEncodedHeader = new Bytes(registry, headerEncoded.toU8a(true)).toHex(); + + const header = await relayApi.createType("SpRuntimeHeader", nonEncodedHeader); return header; }