diff --git a/Cargo.lock b/Cargo.lock index 5f1b9f8c5..876717cf6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3219,6 +3219,7 @@ dependencies = [ "impl-trait-for-tuples", "kate", "log", + "once_cell", "parity-scale-codec", "rs_merkle", "scale-info", diff --git a/README.md b/README.md index 76255c345..6d340e691 100644 --- a/README.md +++ b/README.md @@ -139,6 +139,11 @@ You can sync to the chain using: - Warp mode: This is will download the latest state then all the blocks data. It's the fastest and recommended way to have a running node. Use `--sync warp` - Fast / Fast Unsafe: This is currently not supported since it does not download data needed for Avail specific computation. +### Unsafe sync +When importing blocks, their content go through an additional check to make sure that the DA commitments are valid. +During initial sync, you can chose to ignore this check to increase the sync speed. This command is compatible with any `sync` mode. +- `--unsafe-da-sync` + ## Generate test code coverage report diff --git a/node/src/chain_spec.rs b/node/src/chain_spec.rs index 7ea762c62..2a3c97555 100644 --- a/node/src/chain_spec.rs +++ b/node/src/chain_spec.rs @@ -239,7 +239,7 @@ pub(crate) mod tests { network, transaction_pool, .. - } = new_full_base(config, |_, _| ())?; + } = new_full_base(config, |_, _| (), false)?; Ok(sc_service_test::TestNetComponents::new( task_manager, client, diff --git a/node/src/cli.rs b/node/src/cli.rs index 01d3f00e1..e47765f5c 100644 --- a/node/src/cli.rs +++ b/node/src/cli.rs @@ -36,6 +36,10 @@ pub struct Cli { /// telemetry, if telemetry is enabled. #[arg(long)] pub no_hardware_benchmarks: bool, + + /// Disable checking commitment on imported block during sync + #[arg(long, conflicts_with_all = &["validator"])] + pub unsafe_da_sync: bool, } /// Possible subcommands of the main binary. diff --git a/node/src/command.rs b/node/src/command.rs index c938e1ba0..9280c2f3d 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -87,7 +87,7 @@ pub fn run() -> Result<()> { None => { let runner = cli.create_runner(&cli.run)?; runner.run_node_until_exit(|config| async move { - service::new_full(config, cli.no_hardware_benchmarks) + service::new_full(config, cli.no_hardware_benchmarks, cli.unsafe_da_sync) .map_err(sc_cli::Error::Service) }) }, @@ -117,7 +117,7 @@ pub fn run() -> Result<()> { unimplemented!(); /* // ensure that we keep the task manager alive - let partial = new_partial(&config)?; + let partial = new_partial(&config, cli.unsafe_da_sync)?; cmd.run(partial.client) */ }, @@ -131,7 +131,7 @@ pub fn run() -> Result<()> { unimplemented!(); /* // ensure that we keep the task manager alive - let partial = new_partial(&config)?; + let partial = new_partial(&config, cli.unsafe_da_sync)?; let db = partial.backend.expose_db(); let storage = partial.backend.expose_storage(); @@ -142,7 +142,7 @@ pub fn run() -> Result<()> { unimplemented!(); /* // ensure that we keep the task manager alive - let partial = new_partial(&config)?; + let partial = new_partial(&config, cli.unsafe_da_sync)?; let ext_builder = RemarkBuilder::new(partial.client.clone()); cmd.run( @@ -158,7 +158,7 @@ pub fn run() -> Result<()> { unimplemented!(); /* // ensure that we keep the task manager alive - let partial = service::new_partial(&config)?; + let partial = service::new_partial(&config, cli.unsafe_da_sync)?; // Register the *Remark* and *TKA* builders. let ext_factory = ExtrinsicFactory(vec![ Box::new(RemarkBuilder::new(partial.client.clone())), @@ -198,7 +198,7 @@ pub fn run() -> Result<()> { task_manager, import_queue, .. - } = new_partial(&config)?; + } = new_partial(&config, cli.unsafe_da_sync)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, @@ -209,7 +209,7 @@ pub fn run() -> Result<()> { client, task_manager, .. - } = new_partial(&config)?; + } = new_partial(&config, cli.unsafe_da_sync)?; Ok((cmd.run(client, config.database), task_manager)) }) }, @@ -220,7 +220,7 @@ pub fn run() -> Result<()> { client, task_manager, .. - } = new_partial(&config)?; + } = new_partial(&config, cli.unsafe_da_sync)?; Ok((cmd.run(client, config.chain_spec), task_manager)) }) }, @@ -232,7 +232,7 @@ pub fn run() -> Result<()> { task_manager, import_queue, .. - } = new_partial(&config)?; + } = new_partial(&config, cli.unsafe_da_sync)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, @@ -248,7 +248,7 @@ pub fn run() -> Result<()> { task_manager, backend, .. - } = new_partial(&config)?; + } = new_partial(&config, cli.unsafe_da_sync)?; let aux_revert = Box::new(|client: Arc, backend, blocks| { sc_consensus_babe::revert(client.clone(), backend, blocks)?; sc_consensus_grandpa::revert(client, blocks)?; diff --git a/node/src/da_block_import.rs b/node/src/da_block_import.rs index b16fd7250..02c361583 100644 --- a/node/src/da_block_import.rs +++ b/node/src/da_block_import.rs @@ -27,6 +27,8 @@ use sp_runtime::traits::Block as BlockT; pub struct BlockImport { pub client: Arc, pub inner: I, + // If true, it skips the DA block import check during sync only. + pub unsafe_da_sync: bool, } impl Clone for BlockImport { @@ -34,6 +36,7 @@ impl Clone for BlockImport { Self { client: self.client.clone(), inner: self.inner.clone(), + unsafe_da_sync: self.unsafe_da_sync, } } } @@ -57,7 +60,18 @@ where block: BlockImportParams, ) -> Result { // We only want to check for blocks that are not from "Own" - if !matches!(block.origin, BlockOrigin::Own) { + let is_own = matches!(block.origin, BlockOrigin::Own); + + // We skip checks if we're syncing and unsafe_da_sync is true + let is_sync = matches!( + block.origin, + BlockOrigin::NetworkInitialSync | BlockOrigin::File + ); + let skip_sync = self.unsafe_da_sync && is_sync; + + let should_verify = !is_own && !skip_sync; + println!("Should verify: {should_verify}"); + if should_verify { let no_extrinsics = vec![]; let extrinsics = block.body.as_ref().unwrap_or(&no_extrinsics); let best_hash = self.client.info().best_hash; diff --git a/node/src/service.rs b/node/src/service.rs index 971f3845d..1e6b0872e 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -161,6 +161,7 @@ pub fn create_extrinsic( #[allow(clippy::type_complexity)] pub fn new_partial( config: &Configuration, + unsafe_da_sync: bool, ) -> Result< sc_service::PartialComponents< FullClient, @@ -235,7 +236,8 @@ pub fn new_partial( grandpa_block_import, client.clone(), )?; - let da_block_import = BlockImport::new(client.clone(), block_import); + + let da_block_import = BlockImport::new(client.clone(), block_import, unsafe_da_sync); let slot_duration = babe_link.config().slot_duration(); let (import_queue, babe_worker_handle) = @@ -343,6 +345,7 @@ pub fn new_full_base( config: Configuration, disable_hardware_benchmarks: bool, with_startup_data: impl FnOnce(&BlockImport, &sc_consensus_babe::BabeLink), + unsafe_da_sync: bool, ) -> Result { let hwbench = if !disable_hardware_benchmarks { config.database.path().map(|database_path| { @@ -362,7 +365,7 @@ pub fn new_full_base( select_chain, transaction_pool, other: (rpc_builder, import_setup, rpc_setup, mut telemetry), - } = new_partial(&config)?; + } = new_partial(&config, unsafe_da_sync)?; let shared_voter_state = rpc_setup; let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; @@ -591,9 +594,15 @@ pub fn new_full_base( pub fn new_full( config: Configuration, disable_hardware_benchmarks: bool, + unsafe_da_sync: bool, ) -> Result { - new_full_base(config, disable_hardware_benchmarks, |_, _| ()) - .map(|NewFullBase { task_manager, .. }| task_manager) + new_full_base( + config, + disable_hardware_benchmarks, + |_, _| (), + unsafe_da_sync, + ) + .map(|NewFullBase { task_manager, .. }| task_manager) } fn extend_metrics(prometheus: &Registry) -> Result<(), PrometheusError> { @@ -676,6 +685,7 @@ mod tests { babe_link: &sc_consensus_babe::BabeLink| { setup_handles = Some((block_import.clone(), babe_link.clone())); }, + false, )?; let node = sc_service_test::TestNetComponents::new( @@ -869,7 +879,7 @@ mod tests { network, transaction_pool, .. - } = new_full_base(config, |_, _| ())?; + } = new_full_base(config, |_, _| (), false)?; Ok(sc_service_test::TestNetComponents::new( task_manager, client, diff --git a/pallets/mandate/src/mock.rs b/pallets/mandate/src/mock.rs index 788c18d78..2e783780d 100644 --- a/pallets/mandate/src/mock.rs +++ b/pallets/mandate/src/mock.rs @@ -103,6 +103,11 @@ where r => Err(OuterOrigin::from(r)), }) } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + unimplemented!() + } } /// Create new externalities for `Mandate` module tests. diff --git a/pallets/system/Cargo.toml b/pallets/system/Cargo.toml index 1c49bef3c..dc93c0fc7 100644 --- a/pallets/system/Cargo.toml +++ b/pallets/system/Cargo.toml @@ -39,6 +39,8 @@ sp-weights = { version = "20.0.0", default-features = false, features = ["serde" sp-runtime-interface = { version = "17", default-features = false } binary-merkle-tree = { version = "4.0.0-dev", default-features = false } +once_cell = { version = "1.18", optional = true } + [dev-dependencies] hex-literal = "0.3.1" test-case = "1.2.3" @@ -64,6 +66,7 @@ std = [ "sp-version/std", "sp-weights/std", "binary-merkle-tree/std", + "once_cell" ] runtime-benchmarks = [ "frame-support/runtime-benchmarks", diff --git a/pallets/system/src/header_builder.rs b/pallets/system/src/header_builder.rs index e683d6b30..4425ae402 100644 --- a/pallets/system/src/header_builder.rs +++ b/pallets/system/src/header_builder.rs @@ -1,5 +1,3 @@ -#[cfg(feature = "std")] -use avail_core::DataLookup; use avail_core::{header::HeaderExtension, traits::ExtendedHeader, AppExtrinsic}; use frame_support::traits::Randomness; pub use kate::{ @@ -127,21 +125,39 @@ pub fn build_extension( metrics: &M, ) -> HeaderExtension { use avail_core::header::extension::{v1, v2}; - - let (xts_layout, commitment, block_dims, _data_matrix) = kate::com::par_build_commitments( - block_length.rows, - block_length.cols, - block_length.chunk_size(), - app_extrinsics, + // TODO Marko Move to OnceLock https://doc.rust-lang.org/stable/std/sync/struct.OnceLock.html on sub-upgrade v1 branch + use once_cell::sync::Lazy; + static PMP: Lazy = + once_cell::sync::Lazy::new(|| kate::testnet::multiproof_params(256, 256)); + + let timer = std::time::Instant::now(); + let grid = kate::gridgen::EvaluationGrid::from_extrinsics( + app_extrinsics.to_vec(), + 4, //TODO: where should this minimum grid width be specified + block_length.cols.0.saturated_into(), // even if we run on a u16 target this is fine + block_length.rows.0.saturated_into(), seed, - metrics, ) - .expect("Build commitments cannot fail .qed"); - let app_lookup = DataLookup::from_id_and_len_iter(xts_layout.into_iter()) - .expect("Extrinsic size cannot overflow .qed"); - let rows = block_dims.rows().0.saturated_into::(); - let cols = block_dims.cols().0.saturated_into::(); - + .expect("Grid construction cannot fail"); + metrics.preparation_block_time(timer.elapsed()); + + let timer = std::time::Instant::now(); + use kate::gridgen::AsBytes; + let commitment = grid + .make_polynomial_grid() + .expect("Make polynomials cannot fail") + .extended_commitments(&*PMP, 2) + .expect("Extended commitments cannot fail") + .iter() + .flat_map(|c| c.to_bytes().expect("Commitment serialization cannot fail")) + .collect::>(); + metrics.commitment_build_time(timer.elapsed()); + + // Note that this uses the original dims, _not the extended ones_ + let rows = grid.dims().rows().get(); + let cols = grid.dims().cols().get(); + + let app_lookup = grid.lookup().clone(); // **NOTE:** Header extension V2 is not yet enable by default. if cfg!(feature = "header_extension_v2") { use avail_core::kate_commitment::v2::KateCommitment; diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index cdf7c3772..d682c2ae4 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1183,7 +1183,8 @@ mod tests { >>::try_state(block, All)?; as TryState>::try_state(block, All)?; as TryState>::try_state(block, All)?; - as TryState>::try_state(block, All) + as TryState>::try_state(block, All)?; + Ok(()) } #[test]