diff --git a/Cargo.lock b/Cargo.lock index e9d67e6c1..309c79e88 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17064,6 +17064,7 @@ dependencies = [ "async-io 1.13.0", "async-trait", "ccp-authorities-noting-inherent", + "chrono", "clap 4.5.4", "cumulus-client-cli", "cumulus-client-collator", @@ -17081,6 +17082,7 @@ dependencies = [ "dp-container-chain-genesis-data", "dp-slot-duration-runtime-api", "exit-future", + "fdlimit", "flashbox-runtime", "flume 0.10.14", "frame-benchmarking", @@ -17500,6 +17502,7 @@ dependencies = [ "async-trait", "ccp-authorities-noting-inherent", "clap 4.5.4", + "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", @@ -17558,6 +17561,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", + "url", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 563cc962f..9fcee947f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -330,9 +330,11 @@ tap = "1.0.1" # General (client) async-io = "1.3" async-trait = "0.1" +chrono = "0.4.31" clap = { version = "4.5.3", default-features = false, features = [ "derive" ] } core_extensions = "1.5.3" exit-future = { version = "0.2.0" } +fdlimit = "0.3.0" flume = "0.10.9" fs2 = "0.4.3" futures = { version = "0.3.1" } diff --git a/client/consensus/src/collators/lookahead.rs b/client/consensus/src/collators/lookahead.rs index 957d2e351..089978179 100644 --- a/client/consensus/src/collators/lookahead.rs +++ b/client/consensus/src/collators/lookahead.rs @@ -343,9 +343,32 @@ pub struct Params< pub authoring_duration: Duration, pub force_authoring: bool, pub cancellation_token: CancellationToken, - pub orchestrator_tx_pool: Arc, - pub orchestrator_client: Arc, - pub solochain: bool, + pub buy_core_params: BuyCoreParams, +} + +pub enum BuyCoreParams { + Orchestrator { + orchestrator_tx_pool: Arc, + orchestrator_client: Arc, + }, + Solochain { + // TODO: relay_tx_pool + }, +} + +impl Clone for BuyCoreParams { + fn clone(&self) -> Self { + match self { + Self::Orchestrator { + orchestrator_tx_pool, + orchestrator_client, + } => Self::Orchestrator { + orchestrator_tx_pool: orchestrator_tx_pool.clone(), + orchestrator_client: orchestrator_client.clone(), + }, + Self::Solochain {} => Self::Solochain {}, + } + } } /// Run async-backing-friendly for Tanssi Aura. @@ -637,12 +660,20 @@ where let slot = inherent_providers.slot(); let container_chain_slot_duration = (params.get_current_slot_duration)(parent_header.hash()); - let buy_core_result = if params.solochain { - // TODO: implement parathread support for solochain - log::warn!("Unimplemented: cannot buy core for parathread in solochain"); - break; - } else { - try_to_buy_core::<_, _, <::Header as HeaderT>::Number, _, CIDP, _, _>(params.para_id, aux_data, inherent_providers, ¶ms.keystore, params.orchestrator_client.clone(), params.orchestrator_tx_pool.clone(), parent_header, params.orchestrator_slot_duration, container_chain_slot_duration).await + let buy_core_result = match ¶ms.buy_core_params { + BuyCoreParams::Orchestrator { + orchestrator_client, + orchestrator_tx_pool, + } => { + try_to_buy_core::<_, _, <::Header as HeaderT>::Number, _, CIDP, _, _>(params.para_id, aux_data, inherent_providers, ¶ms.keystore, orchestrator_client.clone(), orchestrator_tx_pool.clone(), parent_header, params.orchestrator_slot_duration, container_chain_slot_duration).await + } + BuyCoreParams::Solochain { + + } => { + // TODO: implement parathread support for solochain + log::warn!("Unimplemented: cannot buy core for parathread in solochain"); + break; + } }; match buy_core_result { Ok(block_hash) => { diff --git a/client/consensus/src/mocks.rs b/client/consensus/src/mocks.rs index cdec9a1e8..1ec7358dd 100644 --- a/client/consensus/src/mocks.rs +++ b/client/consensus/src/mocks.rs @@ -16,14 +16,17 @@ use { crate::{ - collators::lookahead::Params as LookAheadParams, OrchestratorAuraWorkerAuxData, - SlotFrequency, + collators::lookahead::{BuyCoreParams, Params as LookAheadParams}, + OrchestratorAuraWorkerAuxData, SlotFrequency, }, async_trait::async_trait, cumulus_client_collator::service::CollatorService, cumulus_client_consensus_common::{ParachainBlockImportMarker, ValidationCodeHashProvider}, cumulus_client_consensus_proposer::Proposer as ConsensusProposer, - cumulus_primitives_core::{relay_chain::BlockId, CollationInfo, CollectCollationInfo, ParaId}, + cumulus_primitives_core::{ + relay_chain::{BlockId, ValidationCodeHash}, + CollationInfo, CollectCollationInfo, ParaId, + }, cumulus_relay_chain_interface::{ CommittedCandidateReceipt, OverseerHandle, RelayChainInterface, RelayChainResult, StorageValue, @@ -35,7 +38,10 @@ use { pallet_xcm_core_buyer_runtime_api::BuyingError, parity_scale_codec::Encode, polkadot_core_primitives::{Header as PHeader, InboundDownwardMessage, InboundHrmpMessage}, - polkadot_node_subsystem::messages::{RuntimeApiMessage, RuntimeApiRequest}, + polkadot_node_subsystem::{ + messages::{RuntimeApiMessage, RuntimeApiRequest}, + overseer, OverseerSignal, + }, polkadot_overseer::dummy::dummy_overseer_builder, polkadot_parachain_primitives::primitives::HeadData, polkadot_primitives::{ @@ -512,11 +518,6 @@ impl sc_consensus::Verifier for SealExtractorVerfier { } } -use { - cumulus_primitives_core::relay_chain::ValidationCodeHash, - polkadot_node_subsystem::{overseer, OverseerSignal}, -}; - pub struct DummyCodeHashProvider; impl ValidationCodeHashProvider for DummyCodeHashProvider { fn code_hash_at(&self, _at: PHash) -> Option { @@ -984,10 +985,11 @@ impl CollatorLookaheadTestBuilder { para_client: environ.clone().into(), sync_oracle: DummyOracle, para_backend: backend, - orchestrator_client: environ.into(), + buy_core_params: BuyCoreParams::Orchestrator { + orchestrator_client: environ.into(), + orchestrator_tx_pool: orchestrator_tx_pool.clone(), + }, orchestrator_slot_duration: SlotDuration::from_millis(SLOT_DURATION_MS), - orchestrator_tx_pool: orchestrator_tx_pool.clone(), - solochain: false, }; let (fut, exit_notification_receiver) = crate::collators::lookahead::run::< _, diff --git a/client/service-container-chain/Cargo.toml b/client/service-container-chain/Cargo.toml index 33e9db891..aa56274bf 100644 --- a/client/service-container-chain/Cargo.toml +++ b/client/service-container-chain/Cargo.toml @@ -20,6 +20,7 @@ log = { workspace = true } serde = { workspace = true } tokio = { workspace = true } tokio-util = { workspace = true } +url = { workspace = true } # Local ccp-authorities-noting-inherent = { workspace = true, features = [ "std" ] } @@ -70,6 +71,7 @@ sp-timestamp = { workspace = true, features = [ "std" ] } polkadot-primitives = { workspace = true } # Cumulus +cumulus-client-cli = { workspace = true } cumulus-client-collator = { workspace = true } cumulus-client-consensus-aura = { workspace = true } cumulus-client-consensus-common = { workspace = true } diff --git a/client/service-container-chain/src/cli.rs b/client/service-container-chain/src/cli.rs index 460efcad3..8aee43a15 100644 --- a/client/service-container-chain/src/cli.rs +++ b/client/service-container-chain/src/cli.rs @@ -16,12 +16,16 @@ use { crate::chain_spec::RawGenesisConfig, + cumulus_client_cli::{CollatorOptions, RelayChainMode}, dc_orchestrator_chain_interface::ContainerChainGenesisData, dp_container_chain_genesis_data::json::properties_to_map, sc_chain_spec::ChainSpec, + sc_cli::{CliConfiguration, SubstrateCli}, sc_network::config::MultiaddrWithPeerId, + sc_service::BasePath, sp_runtime::Storage, - std::{collections::BTreeMap, net::SocketAddr, path::PathBuf}, + std::{collections::BTreeMap, net::SocketAddr}, + url::Url, }; /// The `run` command used to run a container chain node. @@ -45,6 +49,69 @@ pub struct ContainerChainRunCmd { /// Keep container-chain db after changing collator assignments #[arg(long)] pub keep_db: bool, + + /// Creates a less resource-hungry node that retrieves relay chain data from an RPC endpoint. + /// + /// The provided URLs should point to RPC endpoints of the relay chain. + /// This node connects to the remote nodes following the order they were specified in. If the + /// connection fails, it attempts to connect to the next endpoint in the list. + /// + /// Note: This option doesn't stop the node from connecting to the relay chain network but + /// reduces bandwidth use. + #[arg( + long, + value_parser = validate_relay_chain_url, + num_args = 0.., + alias = "relay-chain-rpc-url" + )] + pub relay_chain_rpc_urls: Vec, + + /// EXPERIMENTAL: Embed a light client for the relay chain. Only supported for full-nodes. + /// Will use the specified relay chain chainspec. + #[arg(long, conflicts_with_all = ["relay_chain_rpc_urls", "collator"])] + pub relay_chain_light_client: bool, +} + +impl ContainerChainRunCmd { + /// Create a [`NormalizedRunCmd`] which merges the `collator` cli argument into `validator` to + /// have only one. + pub fn normalize(&self) -> ContainerChainCli { + let mut new_base = self.clone(); + + new_base.base.validator = self.base.validator || self.collator; + + // Append `containers/` to base_path for this object. This is to ensure that when spawning + // a new container chain, its database is always inside the `containers` folder. + // So if the user passes `--base-path /tmp/node`, we want the ephemeral container data in + // `/tmp/node/containers`, and the persistent storage in `/tmp/node/config`. + let base_path = base_path_or_default( + self.base.base_path().expect("failed to get base_path"), + &ContainerChainCli::executable_name(), + ); + + let base_path = base_path.path().join("containers"); + new_base.base.shared_params.base_path = Some(base_path); + + ContainerChainCli { + base: new_base, + preloaded_chain_spec: None, + } + } + + /// Create [`CollatorOptions`] representing options only relevant to parachain collator nodes + // Copied from polkadot-sdk/cumulus/client/cli/src/lib.rs + pub fn collator_options(&self) -> CollatorOptions { + let relay_chain_mode = match ( + self.relay_chain_light_client, + !self.relay_chain_rpc_urls.is_empty(), + ) { + (true, _) => RelayChainMode::LightClient, + (_, true) => RelayChainMode::ExternalRpc(self.relay_chain_rpc_urls.clone()), + _ => RelayChainMode::Embedded, + }; + + CollatorOptions { relay_chain_mode } + } } #[derive(Debug)] @@ -52,9 +119,6 @@ pub struct ContainerChainCli { /// The actual container chain cli object. pub base: ContainerChainRunCmd, - /// The base path that should be used by the container chain. - pub base_path: PathBuf, - /// The ChainSpecs that this struct can initialize. This starts empty and gets filled /// by calling preload_chain_spec_file. pub preloaded_chain_spec: Option>, @@ -64,7 +128,6 @@ impl Clone for ContainerChainCli { fn clone(&self) -> Self { Self { base: self.base.clone(), - base_path: self.base_path.clone(), preloaded_chain_spec: self.preloaded_chain_spec.as_ref().map(|x| x.cloned_box()), } } @@ -76,13 +139,27 @@ impl ContainerChainCli { para_config: &sc_service::Configuration, container_chain_args: impl Iterator, ) -> Self { - let base_path = para_config.base_path.path().join("containers"); + let mut base: ContainerChainRunCmd = clap::Parser::parse_from(container_chain_args); - Self { - base_path, - base: clap::Parser::parse_from(container_chain_args), - preloaded_chain_spec: None, + // Copy some parachain args into container chain args + + // If the container chain args have no --wasmtime-precompiled flag, use the same as the orchestrator + if base.base.import_params.wasmtime_precompiled.is_none() { + base.base + .import_params + .wasmtime_precompiled + .clone_from(¶_config.wasmtime_precompiled); + } + + // Set container base path to the same value as orchestrator base_path. + // "containers" is appended in `base.normalize()` + if base.base.shared_params.base_path.is_some() { + log::warn!("Container chain --base-path is being ignored"); } + let base_path = para_config.base_path.path().to_owned(); + base.base.shared_params.base_path = Some(base_path); + + base.normalize() } pub fn chain_spec_from_genesis_data( @@ -249,10 +326,7 @@ impl sc_cli::CliConfiguration for ContainerChainCli { } fn base_path(&self) -> sc_cli::Result> { - Ok(self - .shared_params() - .base_path()? - .or_else(|| Some(self.base_path.clone().into()))) + self.shared_params().base_path() } fn rpc_addr(&self, default_listen_port: u16) -> sc_cli::Result> { @@ -366,3 +440,26 @@ fn parse_container_chain_id_str(id: &str) -> std::result::Result { }) .ok_or_else(|| format!("load_spec called with invalid id: {:?}", id)) } + +// Copied from polkadot-sdk/cumulus/client/cli/src/lib.rs +fn validate_relay_chain_url(arg: &str) -> Result { + let url = Url::parse(arg).map_err(|e| e.to_string())?; + + let scheme = url.scheme(); + if scheme == "ws" || scheme == "wss" { + Ok(url) + } else { + Err(format!( + "'{}' URL scheme not supported. Only websocket RPC is currently supported", + url.scheme() + )) + } +} + +/// Returns the value of `base_path` or the default_path if it is None +pub(crate) fn base_path_or_default( + base_path: Option, + executable_name: &String, +) -> BasePath { + base_path.unwrap_or_else(|| BasePath::from_project("", "", executable_name)) +} diff --git a/client/service-container-chain/src/service.rs b/client/service-container-chain/src/service.rs index 6b88723a0..d1112c3b8 100644 --- a/client/service-container-chain/src/service.rs +++ b/client/service-container-chain/src/service.rs @@ -52,7 +52,7 @@ use { substrate_prometheus_endpoint::Registry, tc_consensus::{ collators::lookahead::{ - self as lookahead_tanssi_aura, Params as LookaheadTanssiAuraParams, + self as lookahead_tanssi_aura, BuyCoreParams, Params as LookaheadTanssiAuraParams, }, OrchestratorAuraWorkerAuxData, }, @@ -330,8 +330,12 @@ fn start_consensus_container( u64::try_from(relay_slot_ms).expect("relay chain slot duration overflows u64"), ) } else { - cumulus_client_consensus_aura::slot_duration(&*orchestrator_client) - .expect("start_consensus_container: slot duration should exist") + cumulus_client_consensus_aura::slot_duration( + orchestrator_client + .as_deref() + .expect("solochain is false, orchestrator_client must be Some"), + ) + .expect("start_consensus_container: slot duration should exist") }; let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( @@ -365,6 +369,16 @@ fn start_consensus_container( .map(polkadot_primitives::ValidationCode) .map(|c| c.hash()) }; + let buy_core_params = if solochain { + BuyCoreParams::Solochain {} + } else { + BuyCoreParams::Orchestrator { + orchestrator_tx_pool: orchestrator_tx_pool + .expect("solochain is false, orchestrator_tx_pool must be Some"), + orchestrator_client: orchestrator_client + .expect("solochain is false, orchestrator_client must be Some"), + } + }; let params = LookaheadTanssiAuraParams { get_current_slot_duration: move |block_hash| { @@ -481,7 +495,9 @@ fn start_consensus_container( })?; let authorities = tc_consensus::authorities::( - orchestrator_client_for_cidp.as_ref(), + orchestrator_client_for_cidp + .as_ref() + .expect("solochain is false, orchestrator_client must be Some"), &latest_header.hash(), para_id, ); @@ -499,7 +515,9 @@ fn start_consensus_container( ); let slot_freq = tc_consensus::min_slot_freq::( - orchestrator_client_for_cidp.as_ref(), + orchestrator_client_for_cidp + .as_ref() + .expect("solochain is false, orchestrator_client must be Some"), &latest_header.hash(), para_id, ); @@ -531,9 +549,7 @@ fn start_consensus_container( code_hash_provider, // This cancellation token is no-op as it is not shared outside. cancellation_token: CancellationToken::new(), - orchestrator_tx_pool, - orchestrator_client, - solochain, + buy_core_params, }; let (fut, _exit_notification_receiver) = diff --git a/client/service-container-chain/src/spawner.rs b/client/service-container-chain/src/spawner.rs index 4c9366d92..76a809a9e 100644 --- a/client/service-container-chain/src/spawner.rs +++ b/client/service-container-chain/src/spawner.rs @@ -115,9 +115,10 @@ pub struct ContainerChainSpawnParams { #[derive(Clone)] pub struct CollationParams { pub collator_key: CollatorPair, - pub orchestrator_tx_pool: Arc>, - pub orchestrator_client: Arc, + pub orchestrator_tx_pool: Option>>, + pub orchestrator_client: Option>, pub orchestrator_para_id: ParaId, + /// If this is `false`, then `orchestrator_tx_pool` and `orchestrator_client` must be `Some`. pub solochain: bool, } diff --git a/container-chains/nodes/simple/src/command.rs b/container-chains/nodes/simple/src/command.rs index fbf171eeb..c6326a9cb 100644 --- a/container-chains/nodes/simple/src/command.rs +++ b/container-chains/nodes/simple/src/command.rs @@ -528,29 +528,13 @@ fn rpc_provider_mode(cli: Cli, profile_id: u64) -> Result<()> { // Spawn assignment watcher { - let mut container_chain_cli = ContainerChainCli::new( + let container_chain_cli = ContainerChainCli::new( &config, [ContainerChainCli::executable_name()] .iter() .chain(cli.container_chain_args().iter()), ); - // If the container chain args have no --wasmtime-precompiled flag, use the same as the orchestrator - if container_chain_cli - .base - .base - .import_params - .wasmtime_precompiled - .is_none() - { - container_chain_cli - .base - .base - .import_params - .wasmtime_precompiled - .clone_from(&config.wasmtime_precompiled); - } - log::info!("Container chain CLI: {container_chain_cli:?}"); let para_id = chain_spec::Extensions::try_get(&*config.chain_spec) diff --git a/node/Cargo.toml b/node/Cargo.toml index 428ad2083..f829ad11f 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -13,8 +13,10 @@ workspace = true [dependencies] async-io = { workspace = true } async-trait = { workspace = true } +chrono = { workspace = true } clap = { workspace = true, features = [ "derive" ] } exit-future = { workspace = true } +fdlimit = { workspace = true } flume = { workspace = true } fs2 = { workspace = true } futures = { workspace = true } @@ -79,7 +81,6 @@ sp-api = { workspace = true, features = [ "std" ] } sp-block-builder = { workspace = true } sp-blockchain = { workspace = true } sp-consensus = { workspace = true } - sp-consensus-aura = { workspace = true } sp-consensus-slots = { workspace = true } sp-core = { workspace = true, features = [ "std" ] } @@ -87,6 +88,7 @@ sp-inherents = { workspace = true, features = [ "std" ] } sp-io = { workspace = true, features = [ "std" ] } sp-keystore = { workspace = true, features = [ "std" ] } sp-offchain = { workspace = true, features = [ "std" ] } +sp-panic-handler = { workspace = true } sp-runtime = { workspace = true, features = [ "std" ] } sp-session = { workspace = true, features = [ "std" ] } sp-state-machine = { workspace = true, features = [ "std" ] } diff --git a/node/src/cli.rs b/node/src/cli.rs index e644fe9ee..a01edcfb4 100644 --- a/node/src/cli.rs +++ b/node/src/cli.rs @@ -18,6 +18,7 @@ use { node_common::service::Sealing, sc_cli::{CliConfiguration, NodeKeyParams, SharedParams}, std::path::PathBuf, + tc_service_container_chain::cli::ContainerChainRunCmd, }; /// Sub-commands supported by the collator. @@ -63,10 +64,36 @@ pub enum Subcommand { /// Precompile the WASM runtime into native code PrecompileWasm(sc_cli::PrecompileWasmCmd), + + /// Solochain collator mode + SoloChain(SoloChainCmd), +} + +/// The `build-spec` command used to build a specification. +#[derive(Debug, clap::Parser)] +#[group(skip)] +pub struct SoloChainCmd { + #[command(flatten)] + pub run: ContainerChainRunCmd, + + /// Disable automatic hardware benchmarks. + /// + /// By default these benchmarks are automatically ran at startup and measure + /// the CPU speed, the memory bandwidth and the disk speed. + /// + /// The results are then printed out in the logs, and also sent as part of + /// telemetry, if telemetry is enabled. + #[arg(long)] + pub no_hardware_benchmarks: bool, + + /// Relay chain arguments + #[arg(raw = true)] + pub relay_chain_args: Vec, } /// The `build-spec` command used to build a specification. #[derive(Debug, Clone, clap::Parser)] +#[group(skip)] pub struct BuildSpecCmd { #[clap(flatten)] pub base: sc_cli::BuildSpecCmd, @@ -123,10 +150,6 @@ pub struct RunCmd { #[arg(long)] pub dev_service: bool, - /// Enable collators to run against a solo-chain such as Starlight - #[arg(long)] - pub solo_chain: bool, - /// When blocks should be sealed in the dev service. /// /// Options are "instant", "manual", or timer interval in milliseconds diff --git a/node/src/command.rs b/node/src/command.rs index fa1ac9dd0..54122b773 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -40,6 +40,8 @@ use { tc_service_container_chain::{chain_spec::RawChainSpec, cli::ContainerChainCli}, }; +pub mod solochain; + fn load_spec( id: &str, para_id: Option, @@ -338,6 +340,74 @@ pub fn run() -> Result<()> { )) }) } + Some(Subcommand::SoloChain(cmd)) => { + // Cannot use create_configuration function because that needs a chain spec. + // So write our own `create_runner` function that doesn't need chain spec. + let container_chain_cli = cmd.run.normalize(); + let runner = solochain::create_runner(&container_chain_cli)?; + + // The expected usage is + // `tanssi-node solochain --flag` + // So `cmd` stores the flags from after `solochain`, and `cli` has the flags from between + // `tanssi-node` and `solo-chain`. We are ignoring the flags from `cli` intentionally. + // Would be nice to error if the user passes any flag there, but it's not easy to detect. + + // Zombienet appends a --chain flag after "solo-chain" subcommand, which is ignored, so it's fine, + // but warn users that this is not expected here. + // We cannot do this before create_runner because logging is not setup there yet. + if container_chain_cli.base.base.shared_params.chain.is_some() { + log::warn!( + "Ignoring --chain argument: solochain mode does only need the relay chain-spec" + ); + } + + let collator_options = container_chain_cli.base.collator_options(); + + runner.run_node_until_exit(|config| async move { + let containers_base_path = container_chain_cli + .base + .base + .shared_params + .base_path + .as_ref() + .expect("base_path is always set"); + let hwbench = (!cmd.no_hardware_benchmarks) + .then_some(Some(containers_base_path).map(|database_path| { + let _ = std::fs::create_dir_all(database_path); + sc_sysinfo::gather_hwbench(Some(database_path)) + })) + .flatten(); + + let polkadot_cli = solochain::relay_chain_cli_new( + &config, + [RelayChainCli::executable_name()] + .iter() + .chain(cmd.relay_chain_args.iter()), + ); + let tokio_handle = config.tokio_handle.clone(); + let polkadot_config = + SubstrateCli::create_configuration(&polkadot_cli, &polkadot_cli, tokio_handle) + .map_err(|err| format!("Relay chain argument error: {}", err))?; + + info!( + "Is collating: {}", + if config.role.is_authority() { + "yes" + } else { + "no" + } + ); + + crate::service::start_solochain_node( + polkadot_config, + container_chain_cli, + collator_options, + hwbench, + ) + .await + .map_err(Into::into) + }) + } None => { let runner = cli.create_runner(&cli.run.normalize())?; let collator_options = cli.run.collator_options(); @@ -376,22 +446,6 @@ pub fn run() -> Result<()> { SubstrateCli::create_configuration(&polkadot_cli, &polkadot_cli, tokio_handle) .map_err(|err| format!("Relay chain argument error: {}", err))?; - let solo_chain = cli.run.solo_chain; - if solo_chain { - // We need to bake in some container-chain args - let container_chain_cli = ContainerChainCli::new( - &config, - [ContainerChainCli::executable_name()].iter().chain(cli.container_chain_args().iter()), - ); - let tokio_handle = config.tokio_handle.clone(); - let container_chain_config = (container_chain_cli, tokio_handle); - - return crate::service::start_solochain_node(config, polkadot_config, container_chain_config, collator_options, hwbench) - .await - .map(|r| r.0) - .map_err(Into::into); - } - let parachain_account = AccountIdConversion::::into_account_truncating(&id); diff --git a/node/src/command/solochain.rs b/node/src/command/solochain.rs new file mode 100644 index 000000000..30b658abc --- /dev/null +++ b/node/src/command/solochain.rs @@ -0,0 +1,420 @@ +// Copyright (C) Moondance Labs Ltd. +// This file is part of Tanssi. + +// Tanssi is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Tanssi is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Tanssi. If not, see + +//! Helper functions used to implement solochain collator + +use { + crate::cli::{Cli, RelayChainCli}, + futures::FutureExt, + jsonrpsee::server::BatchRequestConfig, + log::{info, warn}, + sc_chain_spec::{ChainType, GenericChainSpec, NoExtension}, + sc_cli::{CliConfiguration, DefaultConfigurationValues, Signals, SubstrateCli}, + sc_network::config::{NetworkBackendType, NetworkConfiguration, TransportConfig}, + sc_network_common::role::Role, + sc_service::{ + config::KeystoreConfig, BasePath, BlocksPruning, Configuration, DatabaseSource, TaskManager, + }, + sc_tracing::logging::LoggerBuilder, + std::{ + future::Future, + num::NonZeroUsize, + path::{Path, PathBuf}, + time::Duration, + }, + tc_service_container_chain::cli::ContainerChainCli, +}; + +/// Alternative to [Configuration] struct used in solochain context. +pub struct SolochainConfig { + pub tokio_handle: tokio::runtime::Handle, + pub base_path: BasePath, + pub network_node_name: String, + pub role: Role, + pub relay_chain: String, +} + +/// Alternative to [Runner](sc_cli::Runner) struct used in solochain context. +pub struct SolochainRunner { + config: SolochainConfig, + tokio_runtime: tokio::runtime::Runtime, + signals: Signals, +} + +impl SolochainRunner { + /// Log information about the node itself. + /// + /// # Example: + /// + /// ```text + /// 2020-06-03 16:14:21 Substrate Node + /// 2020-06-03 16:14:21 ✌️ version 2.0.0-rc3-f4940588c-x86_64-linux-gnu + /// 2020-06-03 16:14:21 ❤️ by Parity Technologies , 2017-2020 + /// 2020-06-03 16:14:21 📋 Chain specification: Flaming Fir + /// 2020-06-03 16:14:21 🏷 Node name: jolly-rod-7462 + /// 2020-06-03 16:14:21 👤 Role: FULL + /// 2020-06-03 16:14:21 💾 Database: RocksDb at /tmp/c/chains/flamingfir7/db + /// 2020-06-03 16:14:21 ⛓ Native runtime: node-251 (substrate-node-1.tx1.au10) + /// ``` + fn print_node_infos(&self) { + use chrono::offset::Local; + use chrono::Datelike; + type C = ContainerChainCli; + info!("{}", C::impl_name()); + info!("✌️ version {}", C::impl_version()); + info!( + "❤️ by {}, {}-{}", + C::author(), + C::copyright_start_year(), + Local::now().year() + ); + // No chain spec + //info!("📋 Chain specification: {}", config.chain_spec.name()); + info!("🏷 Node name: {}", self.config.network_node_name); + info!("👤 Role: {}", self.config.role); + info!( + "💾 Database: {} at {}", + // Container chains only support paritydb + "ParityDb", + // Print base path instead of db path because each container will have its own db in a + // different subdirectory. + self.config.base_path.path().display(), + ); + } + + /// A helper function that runs a node with tokio and stops if the process receives the signal + /// `SIGTERM` or `SIGINT`. + pub fn run_node_until_exit( + self, + initialize: impl FnOnce(SolochainConfig) -> F, + ) -> std::result::Result<(), E> + where + F: Future>, + E: std::error::Error + Send + Sync + 'static + From, + { + self.print_node_infos(); + + let mut task_manager = self.tokio_runtime.block_on(initialize(self.config))?; + + let res = self + .tokio_runtime + .block_on(self.signals.run_until_signal(task_manager.future().fuse())); + // We need to drop the task manager here to inform all tasks that they should shut down. + // + // This is important to be done before we instruct the tokio runtime to shutdown. Otherwise + // the tokio runtime will wait the full 60 seconds for all tasks to stop. + let task_registry = task_manager.into_task_registry(); + + // Give all futures 60 seconds to shutdown, before tokio "leaks" them. + let shutdown_timeout = Duration::from_secs(60); + self.tokio_runtime.shutdown_timeout(shutdown_timeout); + + let running_tasks = task_registry.running_tasks(); + + if !running_tasks.is_empty() { + log::error!("Detected running(potentially stalled) tasks on shutdown:"); + running_tasks.iter().for_each(|(task, count)| { + let instances_desc = if *count > 1 { + format!("with {} instances ", count) + } else { + "".to_string() + }; + + if task.is_default_group() { + log::error!( + "Task \"{}\" was still running {}after waiting {} seconds to finish.", + task.name, + instances_desc, + shutdown_timeout.as_secs(), + ); + } else { + log::error!( + "Task \"{}\" (Group: {}) was still running {}after waiting {} seconds to finish.", + task.name, + task.group, + instances_desc, + shutdown_timeout.as_secs(), + ); + } + }); + } + + res.map_err(Into::into) + } +} + +/// Equivalent to [Cli::create_runner] +pub fn create_runner, DVC: DefaultConfigurationValues>( + command: &T, +) -> sc_cli::Result { + let tokio_runtime = sc_cli::build_runtime()?; + + // `capture` needs to be called in a tokio context. + // Also capture them as early as possible. + let signals = tokio_runtime.block_on(async { Signals::capture() })?; + + init_cmd(command, &Cli::support_url(), &Cli::impl_version())?; + + let base_path = command.base_path()?.unwrap(); + let network_node_name = command.node_name()?; + let is_dev = command.is_dev()?; + let role = command.role(is_dev)?; + // This relay chain id is only used when the relay chain args have no `--chain` value + // TODO: check if this works with an external relay rpc / light client + let relay_chain_id = "starlight_local_testnet".to_string(); + + let config = SolochainConfig { + tokio_handle: tokio_runtime.handle().clone(), + base_path, + network_node_name, + role, + relay_chain: relay_chain_id, + }; + + Ok(SolochainRunner { + config, + tokio_runtime, + signals, + }) +} + +/// The recommended open file descriptor limit to be configured for the process. +const RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT: u64 = 10_000; + +/// Equivalent to [CliConfiguration::init] +fn init_cmd, DVC: DefaultConfigurationValues>( + this: &T, + support_url: &String, + impl_version: &String, +) -> sc_cli::Result<()> { + sp_panic_handler::set(support_url, impl_version); + + let mut logger = LoggerBuilder::new(this.log_filters()?); + logger + .with_log_reloading(this.enable_log_reloading()?) + .with_detailed_output(this.detailed_log_output()?); + + if let Some(tracing_targets) = this.tracing_targets()? { + let tracing_receiver = this.tracing_receiver()?; + logger.with_profiling(tracing_receiver, tracing_targets); + } + + if this.disable_log_color()? { + logger.with_colors(false); + } + + logger.init()?; + + match fdlimit::raise_fd_limit() { + Ok(fdlimit::Outcome::LimitRaised { to, .. }) => { + if to < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT { + warn!( + "Low open file descriptor limit configured for the process. \ + Current value: {:?}, recommended value: {:?}.", + to, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, + ); + } + } + Ok(fdlimit::Outcome::Unsupported) => { + // Unsupported platform (non-Linux) + } + Err(error) => { + warn!( + "Failed to configure file descriptor limit for the process: \ + {}, recommended value: {:?}.", + error, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, + ); + } + } + + Ok(()) +} + +/// Equivalent to [RelayChainCli::new] +pub fn relay_chain_cli_new<'a>( + config: &SolochainConfig, + relay_chain_args: impl Iterator, +) -> RelayChainCli { + let base_path = config.base_path.path().join("polkadot"); + + RelayChainCli { + base_path, + chain_id: Some(config.relay_chain.clone()), + base: clap::Parser::parse_from(relay_chain_args), + } +} + +/// Create a dummy [Configuration] that should only be used as input to polkadot-sdk functions that +/// take this struct as input but only use one field of it. +/// This is needed because [Configuration] does not implement [Default]. +pub fn dummy_config(tokio_handle: tokio::runtime::Handle, base_path: BasePath) -> Configuration { + Configuration { + impl_name: "".to_string(), + impl_version: "".to_string(), + role: Role::Full, + tokio_handle, + transaction_pool: Default::default(), + network: NetworkConfiguration { + net_config_path: None, + listen_addresses: vec![], + public_addresses: vec![], + boot_nodes: vec![], + node_key: Default::default(), + default_peers_set: Default::default(), + default_peers_set_num_full: 0, + client_version: "".to_string(), + node_name: "".to_string(), + transport: TransportConfig::MemoryOnly, + max_parallel_downloads: 0, + max_blocks_per_request: 0, + sync_mode: Default::default(), + enable_dht_random_walk: false, + allow_non_globals_in_dht: false, + kademlia_disjoint_query_paths: false, + kademlia_replication_factor: NonZeroUsize::new(20).unwrap(), + ipfs_server: false, + yamux_window_size: None, + network_backend: NetworkBackendType::Libp2p, + }, + keystore: KeystoreConfig::InMemory, + database: DatabaseSource::ParityDb { + path: Default::default(), + }, + trie_cache_maximum_size: None, + state_pruning: None, + blocks_pruning: BlocksPruning::KeepAll, + chain_spec: Box::new( + GenericChainSpec::::builder(Default::default(), NoExtension::None) + .with_name("test") + .with_id("test_id") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(Default::default()) + .build(), + ), + wasm_method: Default::default(), + wasmtime_precompiled: None, + wasm_runtime_overrides: None, + rpc_addr: None, + rpc_max_connections: 0, + rpc_cors: None, + rpc_methods: Default::default(), + rpc_max_request_size: 0, + rpc_max_response_size: 0, + rpc_id_provider: None, + rpc_max_subs_per_conn: 0, + rpc_port: 0, + rpc_message_buffer_capacity: 0, + rpc_batch_config: BatchRequestConfig::Disabled, + rpc_rate_limit: None, + rpc_rate_limit_whitelisted_ips: vec![], + rpc_rate_limit_trust_proxy_headers: false, + prometheus_config: None, + telemetry_endpoints: None, + default_heap_pages: None, + offchain_worker: Default::default(), + force_authoring: false, + disable_grandpa: false, + dev_key_seed: None, + tracing_targets: None, + tracing_receiver: Default::default(), + max_runtime_instances: 0, + announce_block: false, + data_path: Default::default(), + base_path, + informant_output_format: Default::default(), + runtime_cache_size: 0, + } +} + +/// Returns the default path for configuration directory based on the chain_spec +pub(crate) fn build_solochain_config_dir(base_path: &PathBuf) -> PathBuf { + // base_path: Collator1000-01/data/containers + // config_dir: Collator1000-01/data/config + let mut base_path = base_path.clone(); + base_path.pop(); + + base_path.join("config") +} + +pub fn keystore_config( + keystore_params: Option<&sc_cli::KeystoreParams>, + config_dir: &PathBuf, +) -> sc_cli::Result { + keystore_params + .map(|x| x.keystore_config(config_dir)) + .unwrap_or_else(|| Ok(KeystoreConfig::InMemory)) +} + +/// Get the zombienet keystore path from the solochain collator keystore. +fn zombienet_keystore_path(keystore: &KeystoreConfig) -> PathBuf { + let keystore_path = keystore.path().unwrap(); + let mut zombienet_path = keystore_path.to_owned(); + // Collator1000-01/data/config/keystore/ + zombienet_path.pop(); + // Collator1000-01/data/config/ + zombienet_path.pop(); + // Collator1000-01/data/ + zombienet_path.push("chains/simple_container_2000/keystore/"); + // Collator1000-01/data/chains/simple_container_2000/keystore/ + + zombienet_path +} + +/// When running under zombienet, collator keys are injected in a different folder from what we +/// expect. This function will check if the zombienet folder exists, and if so, copy all the keys +/// from there into the expected folder. +pub fn copy_zombienet_keystore(keystore: &KeystoreConfig) -> std::io::Result<()> { + let keystore_path = keystore.path().unwrap(); + let zombienet_path = zombienet_keystore_path(keystore); + + if zombienet_path.exists() { + // Copy to keystore folder + let mut files_copied = 0; + copy_dir_all(zombienet_path, keystore_path, &mut files_copied)?; + log::info!("Copied {} keys from zombienet keystore", files_copied); + + Ok(()) + } else { + // Zombienet folder does not exist, assume we are not running under zombienet + Ok(()) + } +} + +/// Equivalent to `cp -r src/* dst` +// https://stackoverflow.com/a/65192210 +fn copy_dir_all( + src: impl AsRef, + dst: impl AsRef, + files_copied: &mut u32, +) -> std::io::Result<()> { + use std::fs; + fs::create_dir_all(&dst)?; + for entry in fs::read_dir(src)? { + let entry = entry?; + let ty = entry.file_type()?; + if ty.is_dir() { + copy_dir_all( + entry.path(), + dst.as_ref().join(entry.file_name()), + files_copied, + )?; + } else { + fs::copy(entry.path(), dst.as_ref().join(entry.file_name()))?; + *files_copied += 1; + } + } + Ok(()) +} diff --git a/node/src/service.rs b/node/src/service.rs index 1395c2b5a..cc4e8658f 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -17,6 +17,9 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. use { + crate::command::solochain::{ + build_solochain_config_dir, copy_zombienet_keystore, dummy_config, keystore_config, + }, cumulus_client_cli::CollatorOptions, cumulus_client_collator::service::CollatorService, cumulus_client_consensus_proposer::Proposer, @@ -50,13 +53,15 @@ use { polkadot_cli::ProvideRuntimeApi, polkadot_parachain_primitives::primitives::HeadData, polkadot_service::Handle, + sc_cli::CliConfiguration, sc_client_api::{ AuxStore, Backend as BackendT, BlockchainEvents, HeaderBackend, UsageProvider, }, sc_consensus::BasicQueue, sc_network::NetworkBlock, + sc_network_common::role::Role, sc_network_sync::SyncingService, - sc_service::{Configuration, SpawnTaskHandle, TFullBackend, TaskManager}, + sc_service::{Configuration, KeystoreContainer, SpawnTaskHandle, TFullBackend, TaskManager}, sc_telemetry::TelemetryHandle, sc_transaction_pool::FullPool, sp_api::StorageProof, @@ -68,7 +73,7 @@ use { std::{pin::Pin, sync::Arc, time::Duration}, tc_consensus::{ collators::lookahead::{ - self as lookahead_tanssi_aura, Params as LookaheadTanssiAuraParams, + self as lookahead_tanssi_aura, BuyCoreParams, Params as LookaheadTanssiAuraParams, }, OnDemandBlockProductionApi, OrchestratorAuraWorkerAuxData, TanssiAuthorityAssignmentApi, }, @@ -236,30 +241,12 @@ pub fn import_queue( async fn start_node_impl( orchestrator_config: Configuration, polkadot_config: Configuration, - mut container_chain_config: Option<(ContainerChainCli, tokio::runtime::Handle)>, + container_chain_config: Option<(ContainerChainCli, tokio::runtime::Handle)>, collator_options: CollatorOptions, para_id: ParaId, hwbench: Option, ) -> sc_service::error::Result<(TaskManager, Arc)> { let parachain_config = prepare_node_config(orchestrator_config); - if let Some((container_chain_cli, _)) = &mut container_chain_config { - // If the container chain args have no --wasmtime-precompiled flag, use the same as the orchestrator - if container_chain_cli - .base - .base - .import_params - .wasmtime_precompiled - .is_none() - { - container_chain_cli - .base - .base - .import_params - .wasmtime_precompiled - .clone_from(¶chain_config.wasmtime_precompiled); - } - } - let chain_type: sc_chain_spec::ChainType = parachain_config.chain_spec.chain_type(); let relay_chain = crate::chain_spec::Extensions::try_get(&*parachain_config.chain_spec) .map(|e| e.relay_chain.clone()) @@ -453,8 +440,8 @@ async fn start_node_impl( data_preserver: false, collation_params: if validator { Some(spawner::CollationParams { - orchestrator_client: orchestrator_client.clone(), - orchestrator_tx_pool, + orchestrator_client: Some(orchestrator_client.clone()), + orchestrator_tx_pool: Some(orchestrator_tx_pool), orchestrator_para_id: para_id, collator_key: collator_key .expect("there should be a collator key if we're a validator"), @@ -549,6 +536,10 @@ fn start_consensus_orchestrator( }; let cancellation_token = CancellationToken::new(); + let buy_core_params = BuyCoreParams::Orchestrator { + orchestrator_tx_pool, + orchestrator_client: client.clone(), + }; let params = LookaheadTanssiAuraParams { get_current_slot_duration: move |block_hash| { @@ -630,7 +621,7 @@ fn start_consensus_orchestrator( } }, block_import, - para_client: client.clone(), + para_client: client, relay_client: relay_chain_interface, sync_oracle, keystore, @@ -646,9 +637,7 @@ fn start_consensus_orchestrator( code_hash_provider, para_backend: backend, cancellation_token: cancellation_token.clone(), - orchestrator_tx_pool, - orchestrator_client: client, - solochain: false, + buy_core_params, }; let (fut, exit_notification_receiver) = @@ -682,69 +671,76 @@ pub async fn start_parachain_node( /// Start a solochain node. pub async fn start_solochain_node( - // Parachain config not used directly, but we need it to derive the default values for some container_config args - orchestrator_config: Configuration, polkadot_config: Configuration, - mut container_chain_config: (ContainerChainCli, tokio::runtime::Handle), + container_chain_cli: ContainerChainCli, collator_options: CollatorOptions, hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc)> { +) -> sc_service::error::Result { + let tokio_handle = polkadot_config.tokio_handle.clone(); let orchestrator_para_id = Default::default(); - let parachain_config = prepare_node_config(orchestrator_config); - { - let (container_chain_cli, _) = &mut container_chain_config; - // If the container chain args have no --wasmtime-precompiled flag, use the same as the orchestrator - if container_chain_cli - .base - .base - .import_params - .wasmtime_precompiled - .is_none() - { - container_chain_cli - .base - .base - .import_params - .wasmtime_precompiled - .clone_from(¶chain_config.wasmtime_precompiled); - } - } - - let chain_type: sc_chain_spec::ChainType = parachain_config.chain_spec.chain_type(); - let relay_chain = crate::chain_spec::Extensions::try_get(&*parachain_config.chain_spec) - .map(|e| e.relay_chain.clone()) - .ok_or("Could not find relay_chain extension in chain-spec.")?; - // Channel to send messages to start/stop container chains - let (cc_spawn_tx, cc_spawn_rx) = unbounded_channel(); - - // Create a `NodeBuilder` which helps setup parachain nodes common systems. - let mut node_builder = NodeConfig::new_builder(¶chain_config, hwbench.clone())?; - - let (_block_import, import_queue) = import_queue(¶chain_config, &node_builder); - - let (relay_chain_interface, collator_key) = node_builder - .build_relay_chain_interface(¶chain_config, polkadot_config, collator_options.clone()) - .await?; - - let validator = parachain_config.role.is_authority(); + let chain_type = polkadot_config.chain_spec.chain_type().clone(); + let relay_chain = polkadot_config.chain_spec.id().to_string(); + + let base_path = container_chain_cli + .base + .base + .shared_params + .base_path + .as_ref() + .expect("base_path is always set"); + let config_dir = build_solochain_config_dir(&base_path); + let keystore = keystore_config(container_chain_cli.keystore_params(), &config_dir) + .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; + + // Instead of putting keystore in + // Collator1000-01/data/chains/simple_container_2000/keystore + // We put it in + // Collator1000-01/data/config/keystore + // And same for "network" folder + // But zombienet will put the keys in the old path, so we need to manually copy it if we + // are running under zombienet + copy_zombienet_keystore(&keystore)?; + + let keystore_container = KeystoreContainer::new(&keystore)?; + + // No metrics so no prometheus registry + let prometheus_registry = None; + let mut task_manager = TaskManager::new(tokio_handle.clone(), prometheus_registry)?; + + // Each container chain will spawn its own telemetry + let telemetry_worker_handle = None; + + // Dummy parachain config only needed because `build_relay_chain_interface` needs to know if we + // are collators or not + let validator = container_chain_cli.base.collator; + let mut dummy_parachain_config = dummy_config( + polkadot_config.tokio_handle.clone(), + polkadot_config.base_path.clone(), + ); + dummy_parachain_config.role = if validator { + Role::Authority + } else { + Role::Full + }; + let (relay_chain_interface, collator_key) = + cumulus_client_service::build_relay_chain_interface( + polkadot_config, + &dummy_parachain_config, + telemetry_worker_handle.clone(), + &mut task_manager, + collator_options.clone(), + hwbench.clone(), + ) + .await + .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; log::info!("start_solochain_node: is validator? {}", validator); - let node_builder = node_builder - .build_cumulus_network::<_, sc_network::NetworkWorker<_, _>>( - ¶chain_config, - orchestrator_para_id, - import_queue, - relay_chain_interface.clone(), - ) - .await?; - - let relay_chain_slot_duration = Duration::from_secs(6); let overseer_handle = relay_chain_interface .overseer_handle() .map_err(|e| sc_service::Error::Application(Box::new(e)))?; - let sync_keystore = node_builder.keystore_container.keystore(); + let sync_keystore = keystore_container.keystore(); let collate_on_tanssi: Arc< dyn Fn() -> (CancellationToken, futures::channel::oneshot::Receiver<()>) + Send + Sync, > = Arc::new(move || { @@ -754,35 +750,13 @@ pub async fn start_solochain_node( panic!("Called collate_on_tanssi on solochain collator. This is unsupported and the runtime shouldn't allow this, it is a bug") }); - let announce_block = { - let sync_service = node_builder.network.sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; - - let (mut node_builder, import_queue_service) = node_builder.extract_import_queue_service(); - - start_relay_chain_tasks(StartRelayChainTasksParams { - client: node_builder.client.clone(), - announce_block: announce_block.clone(), - para_id: orchestrator_para_id, - relay_chain_interface: relay_chain_interface.clone(), - task_manager: &mut node_builder.task_manager, - da_recovery_profile: if validator { - DARecoveryProfile::Collator - } else { - DARecoveryProfile::FullNode - }, - import_queue: import_queue_service, - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle.clone()), - sync_service: node_builder.network.sync_service.clone(), - })?; - let orchestrator_chain_interface_builder = OrchestratorChainSolochainInterfaceBuilder { overseer_handle: overseer_handle.clone(), relay_chain_interface: relay_chain_interface.clone(), }; let orchestrator_chain_interface = orchestrator_chain_interface_builder.build(); + // Channel to send messages to start/stop container chains + let (cc_spawn_tx, cc_spawn_rx) = unbounded_channel(); if validator { // Start task which detects para id assignment, and starts/stops container chains. @@ -790,80 +764,73 @@ pub async fn start_solochain_node( orchestrator_chain_interface.clone(), sync_keystore.clone(), cc_spawn_tx.clone(), - node_builder.task_manager.spawn_essential_handle(), + task_manager.spawn_essential_handle(), ); } - let sync_keystore = node_builder.keystore_container.keystore(); - - { - let (container_chain_cli, tokio_handle) = container_chain_config; - // If the orchestrator chain is running as a full-node, we start a full node for the - // container chain immediately, because only collator nodes detect their container chain - // assignment so otherwise it will never start. - if !validator { - if let Some(container_chain_para_id) = container_chain_cli.base.para_id { - // Spawn new container chain node - cc_spawn_tx - .send(CcSpawnMsg::UpdateAssignment { - current: Some(container_chain_para_id.into()), - next: Some(container_chain_para_id.into()), - }) - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - } + // If the orchestrator chain is running as a full-node, we start a full node for the + // container chain immediately, because only collator nodes detect their container chain + // assignment so otherwise it will never start. + if !validator { + if let Some(container_chain_para_id) = container_chain_cli.base.para_id { + // Spawn new container chain node + cc_spawn_tx + .send(CcSpawnMsg::UpdateAssignment { + current: Some(container_chain_para_id.into()), + next: Some(container_chain_para_id.into()), + }) + .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; } + } - // Start container chain spawner task. This will start and stop container chains on demand. - let orchestrator_client = node_builder.client.clone(); - let orchestrator_tx_pool = node_builder.transaction_pool.clone(); - let spawn_handle = node_builder.task_manager.spawn_handle(); + // Start container chain spawner task. This will start and stop container chains on demand. + let spawn_handle = task_manager.spawn_handle(); - let container_chain_spawner = ContainerChainSpawner { - params: ContainerChainSpawnParams { - orchestrator_chain_interface, - container_chain_cli, - tokio_handle, - chain_type, - relay_chain, - relay_chain_interface, - sync_keystore, - orchestrator_para_id, - collation_params: if validator { - Some(spawner::CollationParams { - // TODO: all these args must be solochain instead of orchestrator - orchestrator_client: orchestrator_client.clone(), - orchestrator_tx_pool, - orchestrator_para_id, - collator_key: collator_key - .expect("there should be a collator key if we're a validator"), - solochain: true, - }) - } else { - None - }, - spawn_handle, - data_preserver: false, + let container_chain_spawner = ContainerChainSpawner { + params: ContainerChainSpawnParams { + orchestrator_chain_interface, + container_chain_cli, + tokio_handle, + chain_type, + relay_chain, + relay_chain_interface, + sync_keystore, + orchestrator_para_id, + collation_params: if validator { + Some(spawner::CollationParams { + // TODO: all these args must be solochain instead of orchestrator + orchestrator_client: None, + orchestrator_tx_pool: None, + orchestrator_para_id, + collator_key: collator_key + .expect("there should be a collator key if we're a validator"), + solochain: true, + }) + } else { + None }, - state: Default::default(), - collate_on_tanssi, - collation_cancellation_constructs: None, - }; - let state = container_chain_spawner.state.clone(); + spawn_handle, + data_preserver: false, + }, + state: Default::default(), + collate_on_tanssi, + collation_cancellation_constructs: None, + }; + let state = container_chain_spawner.state.clone(); - node_builder.task_manager.spawn_essential_handle().spawn( - "container-chain-spawner-rx-loop", - None, - container_chain_spawner.rx_loop(cc_spawn_rx, validator, true), - ); + task_manager.spawn_essential_handle().spawn( + "container-chain-spawner-rx-loop", + None, + container_chain_spawner.rx_loop(cc_spawn_rx, validator, true), + ); - node_builder.task_manager.spawn_essential_handle().spawn( - "container-chain-spawner-debug-state", - None, - monitor::monitor_task(state), - ) - } + task_manager.spawn_essential_handle().spawn( + "container-chain-spawner-debug-state", + None, + monitor::monitor_task(state), + ); - Ok((node_builder.task_manager, node_builder.client)) + Ok(task_manager) } pub const SOFT_DEADLINE_PERCENT: sp_runtime::Percent = sp_runtime::Percent::from_percent(100); diff --git a/test/configs/zombieStarlight.json b/test/configs/zombieStarlight.json index 48a5a3c99..cc2e02392 100644 --- a/test/configs/zombieStarlight.json +++ b/test/configs/zombieStarlight.json @@ -60,10 +60,9 @@ "prometheus_port": 33102 }, { - "name": "Collator1000-01", - "command": "../target/release/tanssi-node", + "name": "Collator-01", + "command": "../target/release/tanssi-node solo-chain", "args": [ - "--solo-chain", "--no-hardware-benchmarks", "--database=paritydb", "--wasmtime-precompiled=wasm" @@ -71,10 +70,9 @@ "prometheus_port": 33102 }, { - "name": "Collator1000-02", - "command": "../target/release/tanssi-node", + "name": "Collator-02", + "command": "../target/release/tanssi-node solo-chain", "args": [ - "--solo-chain", "--no-hardware-benchmarks", "--database=paritydb", "--wasmtime-precompiled=wasm" @@ -82,10 +80,9 @@ "prometheus_port": 33102 }, { - "name": "Collator1000-03", - "command": "../target/release/tanssi-node", + "name": "Collator-03", + "command": "../target/release/tanssi-node solo-chain", "args": [ - "--solo-chain", "--no-hardware-benchmarks", "--database=paritydb", "--wasmtime-precompiled=wasm" @@ -93,10 +90,9 @@ "prometheus_port": 33102 }, { - "name": "Collator1000-04", - "command": "../target/release/tanssi-node", + "name": "Collator-04", + "command": "../target/release/tanssi-node solo-chain", "args": [ - "--solo-chain", "--no-hardware-benchmarks", "--database=paritydb", "--wasmtime-precompiled=wasm" @@ -104,10 +100,9 @@ "prometheus_port": 33102 }, { - "name": "Collator2000-01", - "command": "../target/release/tanssi-node", + "name": "Collator-05", + "command": "../target/release/tanssi-node solo-chain", "args": [ - "--solo-chain", "--no-hardware-benchmarks", "--database=paritydb", "--wasmtime-precompiled=wasm" @@ -115,10 +110,9 @@ "prometheus_port": 33102 }, { - "name": "Collator2000-02", - "command": "../target/release/tanssi-node", + "name": "Collator-06", + "command": "../target/release/tanssi-node solo-chain", "args": [ - "--solo-chain", "--no-hardware-benchmarks", "--database=paritydb", "--wasmtime-precompiled=wasm" diff --git a/test/scripts/build-spec-starlight.sh b/test/scripts/build-spec-starlight.sh index 4749b7d3b..be05fc12c 100755 --- a/test/scripts/build-spec-starlight.sh +++ b/test/scripts/build-spec-starlight.sh @@ -16,7 +16,7 @@ mkdir -p specs $BINARY_FOLDER/container-chain-simple-node build-spec --disable-default-bootnode --add-bootnode "/ip4/127.0.0.1/tcp/33049/ws/p2p/12D3KooWHVMhQDHBpj9vQmssgyfspYecgV6e3hH1dQVDUkUbCYC9" --parachain-id 2000 --raw > specs/single-container-template-container-2000.json $BINARY_FOLDER/container-chain-frontier-node build-spec --disable-default-bootnode --add-bootnode "/ip4/127.0.0.1/tcp/33050/ws/p2p/12D3KooWFGaw1rxB6MSuN3ucuBm7hMq5pBFJbEoqTyth4cG483Cc" --parachain-id 2001 --raw > specs/single-container-template-container-2001.json $BINARY_FOLDER/container-chain-simple-node build-spec --disable-default-bootnode --parachain-id 2002 --raw > specs/single-container-template-container-2002.json -$BINARY_FOLDER/tanssi-relay build-spec --chain starlight-local --add-container-chain specs/single-container-template-container-2000.json --add-container-chain specs/single-container-template-container-2001.json --invulnerable "Collator1000-01" --invulnerable "Collator1000-02" --invulnerable "Collator1000-03" --invulnerable "Collator1000-04" --invulnerable "Collator2000-01" --invulnerable "Collator2000-02" > specs/tanssi-relay.json +$BINARY_FOLDER/tanssi-relay build-spec --chain starlight-local --add-container-chain specs/single-container-template-container-2000.json --add-container-chain specs/single-container-template-container-2001.json --invulnerable "Collator-01" --invulnerable "Collator-02" --invulnerable "Collator-03" --invulnerable "Collator-04" --invulnerable "Collator-05" --invulnerable "Collator-06" > specs/tanssi-relay.json # Also need to build the genesis-state to be able to register the container 2002 later $BINARY_FOLDER/container-chain-simple-node export-genesis-state --chain specs/single-container-template-container-2002.json specs/para-2002-genesis-state diff --git a/test/suites/zombie-tanssi-relay/test-tanssi-relay.ts b/test/suites/zombie-tanssi-relay/test-tanssi-relay.ts index 967ce6cea..8657ca269 100644 --- a/test/suites/zombie-tanssi-relay/test-tanssi-relay.ts +++ b/test/suites/zombie-tanssi-relay/test-tanssi-relay.ts @@ -386,12 +386,12 @@ describeSuite({ timeout: 300000, test: async function () { const logs = [ - "/Collator1000-01.log", - "/Collator1000-02.log", - "/Collator1000-03.log", - "/Collator1000-04.log", - "/Collator2000-01.log", - "/Collator2000-02.log", + "/Collator-01.log", + "/Collator-02.log", + "/Collator-03.log", + "/Collator-04.log", + "/Collator-05.log", + "/Collator-06.log", ]; for (const log of logs) { const logFilePath = getTmpZombiePath() + log;